diff --git "a/082.jsonl" "b/082.jsonl" new file mode 100644--- /dev/null +++ "b/082.jsonl" @@ -0,0 +1,795 @@ +{"seq_id":"494236319","text":"# 750. Number Of Corner Rectangles\nclass Solution:\n def countCornerRectangles(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n yLen = len(grid[0])\n ans = 0\n count = [[0]*yLen for i in range(yLen)]\n for row in grid:\n for i in range(yLen-1):\n if row[i]:\n for j in range(i+1, yLen):\n if row[j]:\n ans += count[i][j] \n count[i][j] += 1\n return ans\n","sub_path":"750/lc750-solution2.py","file_name":"lc750-solution2.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"55234566","text":"#!/usr/bin/python3\n# This script consumes a Star War API and search characters\nfrom sys import argv\nimport requests\n\nif __name__ == \"__main__\":\n params = {\"search\": argv[1]}\n url = \"http://swapi.co/api/people/?search=\"\n res = requests.get(url, params=params)\n try:\n d = res.json()\n print(\"Number of results:\", d.get(\"count\"))\n for result in d.get(\"results\"):\n print(result.get(\"name\"))\n except ValueError:\n print(\"Not a valid JSON\")\n","sub_path":"0x11-python-network_1/101-starwars.py","file_name":"101-starwars.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"167769661","text":"import pandas as pd\nimport Helpfunctions as hf\nimport VisualizationFunctions as vf\nimport matplotlib.pyplot as plt\n\nfrom pandas.tools.plotting import scatter_matrix\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.linear_model import LinearRegression, LogisticRegression\nfrom sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import classification_report, confusion_matrix, log_loss, roc_curve\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.tree import DecisionTreeClassifier\n\nclass Titanic_data:\n labelled_data_path = \"\"\n df_labelled_original = pd.DataFrame()\n df_labelled = pd.DataFrame()\n\n unlabelled_data_path = \"\"\n df_unlabelled_original = pd.DataFrame()\n df_unlabelled = pd.DataFrame()\n\n output_path = \"\"\n\n #classifier = AdaBoostClassifier(n_estimators=3)\n classifier = LogisticRegression(C=2.75)\n\n def __init__(self, labelled_data_path, unlabelled_data_path, output_path):\n self.labelled_data_path = labelled_data_path\n self.unlabelled_data_path = unlabelled_data_path\n self.output_path = output_path\n\n self.df_labelled_original = pd.read_csv(labelled_data_path, na_values=['NaN', '?'])\n self.df_unlabelled_original = pd.read_csv(unlabelled_data_path, na_values=['NaN', '?'])\n\n def encode_features(self):\n self.df_labelled = self.df_labelled_original.copy(deep=True)\n self.df_unlabelled = self.df_unlabelled_original.copy(deep=True)\n\n for df in [self.df_labelled, self.df_unlabelled]:\n titles = []\n surnames = []\n for name in df['Name']:\n names = name.split(\",\")\n surname = names[0]\n surnames.append(surname)\n title_and_first_name = names[1].split(\".\")\n title = title_and_first_name[0].replace(\" \", \"\")\n if (title != \"Mr\")\\\n and (title != \"Mrs\")\\\n and (title != \"Miss\")\\\n and (title != \"Master\")\\\n and (title != \"Dr\"):\n title = \"Rare\"\n titles.append(title)\n\n title_series = pd.Series(titles)\n surname_series = pd.Series(surnames)\n\n df['Title'] = title_series\n df['Surname'] = surname_series\n\n df.drop(\"Name\", axis=1, inplace=True)\n df.drop(\"Cabin\", axis=1, inplace=True)\n df.drop(\"Ticket\", axis=1, inplace=True)\n\n df[\"Familysize\"] = df[\"Parch\"] + df[\"SibSp\"]\n df[\"Isalone\"] = (df[\"Familysize\"] == 0)*1\n\n df['Embarked'] = df['Embarked'].factorize()[0] # get rid of NaN\n le = LabelEncoder()\n\n df[\"Sex\"] = le.fit_transform(df[\"Sex\"])\n df[\"Title\"] = le.fit_transform(df[\"Title\"])\n df[\"Surname\"] = le.fit_transform(df[\"Surname\"])\n df[\"Isalone\"] = le.fit_transform(df[\"Isalone\"])\n df[\"Embarked\"] = le.fit_transform(df[\"Embarked\"])\n #hf.encode_text_dummy(df, \"Embarked\")\n\n\n\n return\n\n def impute_features(self, plot_before_and_after=True):\n f = \"\"\n ax1 = \"\"\n ax2 = \"\"\n ax3 = \"\"\n ax4 = \"\"\n if plot_before_and_after:\n f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)\n plt.suptitle(\"Data density, imputation\")\n ax1.set_title(\"Labelled data (Age)\")\n ax2.set_title(\"Unlabelled data (Age)\")\n ax3.set_title(\"Labelled data (Fare)\")\n ax4.set_title(\"Unlabelled data (Fare)\")\n\n self.df_labelled[\"Age\"].plot.density(ax=ax1, label=\"Pre (Age)\")\n self.df_unlabelled[\"Age\"].plot.density(ax=ax2, label=\"Pre (Age)\")\n self.df_labelled[\"Fare\"].plot.density(ax=ax3, label=\"Pre (Fare)\")\n self.df_unlabelled[\"Fare\"].plot.density(ax=ax4, label=\"Pre (Fare)\")\n\n self.df_labelled = self.impute_fare(self.df_labelled)\n self.df_unlabelled = self.impute_fare(self.df_unlabelled)\n\n self.df_labelled = self.impute_age(self.df_labelled)\n self.df_unlabelled = self.impute_age(self.df_unlabelled)\n\n # imputer = skmice.MiceImputer()\n # X, specs = imputer.transform(df)\n\n if plot_before_and_after:\n self.df_labelled[\"Age\"].plot.density(ax=ax1, label=\"Post (Age)\")\n self.df_unlabelled[\"Age\"].plot.density(ax=ax2, label=\"Post (Age)\")\n self.df_labelled[\"Fare\"].plot.density(ax=ax3, label=\"Post (Fare)\")\n self.df_unlabelled[\"Fare\"].plot.density(ax=ax4, label=\"Post (Fare)\")\n ax1.legend(loc=\"upper right\")\n ax2.legend(loc=\"upper right\")\n ax3.legend(loc=\"upper right\")\n ax4.legend(loc=\"upper right\")\n plt.show()\n\n self.df_labelled = self.bin_age_and_fare(self.df_labelled)\n self.df_unlabelled = self.bin_age_and_fare(self.df_unlabelled)\n return\n\n def bin_age_and_fare(self, dataset):\n dataset.loc[dataset['Fare'] <= 7.91, 'Fare'] = 0\n dataset.loc[(dataset['Fare'] > 7.91) & (dataset['Fare'] <= 14.454), 'Fare'] = 1\n dataset.loc[(dataset['Fare'] > 14.454) & (dataset['Fare'] <= 31), 'Fare'] = 2\n dataset.loc[dataset['Fare'] > 31, 'Fare'] = 3\n dataset['Fare'] = dataset['Fare'].astype(int)\n\n # Mapping Age\n dataset.loc[dataset['Age'] <= 16, 'Age'] = 0\n dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1\n dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2\n dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3\n dataset.loc[dataset['Age'] > 64, 'Age'];\n\n return dataset\n\n def impute_age(self, dataframe):\n df_is_not_nan = dataframe[dataframe[\"Age\"].notnull()]\n X, y = hf.to_xy(df_is_not_nan, \"Age\")\n lr = LinearRegression()\n lr.fit(X, y)\n\n df_is_nan = dataframe[dataframe[\"Age\"].isnull()]\n df_is_nan = df_is_nan.drop(\"Age\", axis=1)\n age_pred = lr.predict(df_is_nan)\n df_is_nan[\"Age\"] = age_pred\n dataframe = pd.concat([df_is_nan, df_is_not_nan])\n return dataframe\n #dataframe[\"Age\"] = dataframe[\"Age\"].fillna(dataframe[\"Age\"].mean())\n\n\n def impute_fare(self, dataframe):\n dataframe[\"Fare\"] = dataframe[\"Fare\"].fillna(dataframe[\"Fare\"].median())\n return dataframe\n\n def plot_scatter_matrix(self):\n scatter_matrix(self.df_labelled, diagonal=\"kde\")\n plt.show()\n scatter_matrix(self.df_unlabelled, diagonal=\"kde\")\n plt.show()\n\n def describe_original_data(self):\n print(\"Training data set description:\")\n print(self.df_labelled_original.info())\n print(self.df_labelled_original.describe())\n print(\"Submission data set description:\")\n print(self.df_unlabelled_original.info())\n print(self.df_unlabelled_original.describe())\n\n def describe_prepared_data(self):\n print(\"Training data set description:\")\n print(self.df_labelled.info())\n print(self.df_labelled.describe())\n print(\"Submission data set description:\")\n print(self.df_unlabelled.info())\n print(self.df_unlabelled.describe())\n\n def fit_classifier(self, print_report=True):\n X, y = hf.to_xy(self.df_labelled.drop('PassengerId', axis=1), \"Survived\")\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75, random_state=42)\n self.classifier.fit(X_train, y_train)\n y_pred = self.classifier.predict(X_test)\n print(classification_report(y_test, y_pred, target_names=[\"Survived\", \"Diseased\"]))\n\n def predict_unlabelled_data(self):\n self.df_unlabelled[\"Survived\"] = \\\n self.classifier.predict(self.df_unlabelled.drop('PassengerId', axis=1))\n\n self.df_unlabelled[['PassengerId', 'Survived']]\\\n .to_csv(self.output_path, index=False, sep=\",\")\n\n def test_different_settings(self):\n\n X, y = hf.to_xy(self.df_labelled.drop('PassengerId', axis=1), \"Survived\")\n X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.25, random_state=42)\n\n #testparameters = [2, 2.25, 2.5, 2.75, 3, 3.25, 3.5, 3.75, 4, 4.25, 4.5, 4.75, 5, 5.25, 5.5, 5.75, 6]\n testparameters = range(1,300)\n\n train_error = list()\n test_error = list()\n log_losses = list()\n\n plt.figure()\n\n for testparameter in testparameters:\n classifier = LogisticRegression(C=2.75)\n classifier.fit(X_train, y_train)\n y_pred = classifier.predict(X_test)\n y_proba = classifier.predict_proba(X_test)\n train_error.append(classifier.score(X_train, y_train))\n test_error.append(classifier.score(X_test, y_test))\n log_losses.append(log_loss(y_test, y_proba))\n #print(\"\\nTestparameter: \" + str(testparameter))\n #print(confusion_matrix(y_test, y_pred))\n #print(log_loss(y_test, y_proba))\n\n\n plt.semilogx(testparameters, train_error, label=\"train\")\n plt.semilogx(testparameters, test_error, label=\"test\")\n plt.legend(loc='lower left')\n plt.show()\n\n plt.plot(testparameters, log_losses)\n plt.show()\n","sub_path":"Titanic_data.py","file_name":"Titanic_data.py","file_ext":"py","file_size_in_byte":9351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"282455207","text":"# -*- coding: utf-8 -*-\n# Cardboardlint is a cheap lint solution for pull requests.\n# Copyright (C) 2011-2017 The Cardboardlint Development Team\n#\n# This file is part of Cardboardlint.\n#\n# Cardboardlint is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 3\n# of the License, or (at your option) any later version.\n#\n# Cardboardlint is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see \n# --\n\"\"\"Linter using CPPLint.\n\nThis test calls the cpplint.py program, see https://github.com/google/styleguide\n\"\"\"\nfrom __future__ import print_function\n\nfrom cardboardlint.common import run_command, Message, Linter\n\n\n__all__ = ['linter_cpplint']\n\n\nDEFAULT_CONFIG = {\n # Filename filter rules\n 'filefilter': ['+ *.h', '+ *.h.in', '+ *.cpp', '+ *.c'],\n # Location of the file\n 'script': 'cpplint'\n}\n\n\ndef _has_failed(_returncode, stdout, _stderr):\n \"\"\"Determine if cpplint.py has failed.\"\"\"\n return 'FATAL' in stdout\n\n\ndef run_cpplint(config, filenames):\n \"\"\"Linter for cpplint.\n\n Parameters\n ----------\n config : dict\n Dictionary that contains the configuration for the linter\n Not supported\n filenames : list\n A list of filenames to check\n\n Returns\n -------\n messages : list\n The list of messages generated by the external linter.\n\n \"\"\"\n messages = []\n if len(filenames) > 0:\n # Call cpplint\n command = ([config['script'], '--linelength=100', '--filter=-runtime/int'] +\n filenames)\n output = run_command(command, has_failed=_has_failed)[1]\n\n # Parse the output of cpplint into standard return values\n for line in output.split('\\n')[:-1]:\n words = line.split()\n if len(words) == 0 or words[0].count(':') != 2:\n continue\n filename, lineno = words[0].split(':')[:2]\n description = ' '.join(words[1:-2])\n tag = words[-2]\n priority = words[-1]\n lineno = int(lineno)\n if lineno == 0:\n lineno = None\n messages.append(Message(filename, lineno, None, '%s %s %s' % (\n priority, tag, description)))\n return messages\n\n\n# pylint: disable=invalid-name\nlinter_cpplint = Linter('cpplint', run_cpplint, DEFAULT_CONFIG, language='cpp')\n","sub_path":"cardboardlint/linter_cpplint.py","file_name":"linter_cpplint.py","file_ext":"py","file_size_in_byte":2720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"481735526","text":"#-*- coding: UTF-8 -*-\nimport tornado.web\nimport tornado.httpserver\nfrom lib.util import Handler\nfrom lib.mysendmail import send_mail\nfrom connect.tornado.web import Route\n\nimport json\n\nclass MysqlHandler(Handler):\n importName = __name__\n\n@Route(\"/mysql/\")\nclass mysql(MysqlHandler):\n @tornado.web.authenticated\n def get(self):\n return self.render(\"mysql.html\")\n\n @tornado.web.authenticated\n def post(self):\n dbname = self.get_argument(\"dbname\",0)\n #if not dbname or not str(name).strip():\n # raise tornado.web.HTTPError(500, \"请填写dbname\")\n #comment = self.request.files['myfile']\n comment = self.get_argument(\"myfile\")\n username = self.current_user\n res = comment.lower()\n self.db.mysqlinsert(username,dbname,res)\n alert_context = \"提交成功! 请等待审核...\\n您提交的内容如下:\\n%s\" % res\n self.write(alert_context)\n\n@Route(\"/userinfo/(.*?)/\")\nclass userinfo(MysqlHandler):\n @tornado.web.authenticated\n def get(self,method):\n if method == \"show\":\n if self.current_user == \"admin\":\n result = self.db.mysqlonlineadminshow()\n else:\n result = self.db.mysqlonlineshow(self.current_user)\n if result:\n return self.render(\"user_info.html\",result=result)\n\n @tornado.web.authenticated\n def post(self,method):\n if method == \"updatemysql\":\n logid = self.get_argument(\"logid\")\n username = self.get_argument(\"username\")\n if self.current_user == \"admin\":\n if self.db.updateMysql(logid):\n self.write(\"上线成功\")\n temptext = \"你有新的数据库上线通知,请登录系统进行查看.\"\n send_mail(['%s@u17.com' % username,],'数据库上线通知','用户%s你好!

%s
' % (username,temptext))\n else:\n self.write(\"上线失败\")\n\n ","sub_path":"src/mysql/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"413563000","text":"class TransferToDirectlink():\n def __init__(self, url=''):\n if url == '请输入或粘贴正确的链接':\n self.url = ''\n else:\n self.url = url\n self.directlink = ''\n\n def img_hosting(self):\n split_str = self.url.split('%2F')\n head_str = split_str[0].split('/_')\n self.directlink += head_str[0]\n\n for index in range(len(split_str)):\n if index > 2:\n self.directlink += '/' + split_str[index]\n\n temp_link = self.directlink.split('&')\n self.directlink = temp_link[0]\n return self.directlink\n\n def file_downloading(self):\n if 'onedrive.aspx?id=' in self.url:\n split_str = self.url.split('onedrive.aspx?id=')\n self.directlink = split_str[0] + 'download.aspx?SourceUrl='\n split_sub_str = split_str[1].split('&parent=')\n self.directlink += split_sub_str[0]\n else:\n split_str = self.url.split('/')\n\n token_str = split_str[-1].split('?')\n add_str = '_layouts/52/download.aspx?share='\n\n for index in range(len(split_str)):\n if (index == len(split_str) - 1):\n self.directlink = self.directlink + add_str + token_str[0]\n elif (index == len(split_str) - 4 or index == len(split_str) - 5):\n pass\n else:\n self.directlink = self.directlink + split_str[index] + '/'\n return self.directlink\n","sub_path":"directlink.py","file_name":"directlink.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"348203081","text":"# -*- coding: utf-8 -*-\n\nimport urllib.request, re, datetime, os, time\nimport threading\nfrom PyQt5 import QtCore\n# html = urllib.request.urlopen(\"http://www.baidu.com\")\n#\n# print(html.getcode())\n\n\nclass Down :\n\n # 检查网址合法性\n def checkUrl(urlStr):\n if not (urlStr.startswith(\"http://\")) and not (urlStr.startswith(\"https://\")):\n return \"网址不正确(以http或者https开头)\"\n return \"\"\n\n def __init__(self):\n super().__init__()\n # self.status = 0\n self.showMessageFunc = 0\n # self.text = 0\n def getHtmlStr(self, urlStr, showMessageFunc):\n\n self.urlStr = urlStr\n self.showMessageFunc = showMessageFunc\n # 模拟请求\n headers = {\n 'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 9_1 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Version/9.0 Mobile/13B143 Safari/601.1',\n }\n req = urllib.request.Request(urlStr, headers=headers)\n\n # self.status.showMessage(\"解析网址中....\")\n\n try:\n html = urllib.request.urlopen(req)\n except:\n # return \"网址访问出错\"\n showMessageFunc(\"网址访问出错\")\n\n return\n else:\n # self.status.showMessage(\"解析图片地址中....\")\n return self.getImgs(html.read())\n\n def getImgs(self, htmlStr):\n print(htmlStr)\n # 普通网页图片解析\n reg = r'((https|http):[^\\s\";]*?\\.(jpg|png|jpeg))'\n imgArray = re.compile(reg).findall(htmlStr.decode('gbk', 'ignore'))\n print(len(imgArray))\n\n # 1688处理\n if self.urlStr.find(\"1688\")>0 and htmlStr.decode('gbk', 'ignore').find(\"detailUrl\")>0:\n reg1 = r'\"detailUrl\":\"(.*?)\"'\n aliUrlArray = re.compile(reg1).findall(htmlStr.decode('gbk', 'ignore'))\n if len(aliUrlArray)>0:\n print(aliUrlArray[0])\n # self.status.showMessage(\"读取1688图片详情网址...\")\n imgArray1 = self.getHtmlStr(aliUrlArray[0], self.showMessageFunc)\n if type(imgArray) == type(imgArray1) and len(imgArray1)>0:\n for item in imgArray1:\n imgArray.append(item)\n print(len(imgArray))\n return imgArray\n\n\n def saveImgs(self, imgArray, showMessageFunc):\n date = datetime.datetime.now()\n timeName = date.strftime(\"%H_%M_%S_imgs\")\n documentPath = os.path.join(os.path.expanduser(\"~\"), 'Desktop') + \"/\" + timeName\n os.makedirs(documentPath)\n # self.status.showMessage(\"创建桌面文件夹\"+timeName)\n\n threads = []\n for img in imgArray:\n try:\n # 处理img[0]\n urlStr = img[0]\n # print(urlStr)\n urlStr1 = urlStr.replace(r\"\\/\", \"/\").replace(\"small\", \"large\")\n print(urlStr1)\n conn = urllib.request.urlopen(urlStr1)\n except:\n print(\"错误\")\n else:\n print(\"成功\")\n # self.text.append(urlStr1)\n imgPath = documentPath + \"/\" + str(time.time()) + \".\" + img[2]\n f = open(imgPath, 'wb')\n f.write(conn.read())\n f.close()\n\n showMessageFunc(\"文件存储到桌面/{}下\".format(timeName))","sub_path":"DownImgs/DownImg.py","file_name":"DownImg.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"89191324","text":"# Imports\nimport sys, os\nxbmcutil = None\naddonPath = None\n\n# Constants\ncurrentPath = os.getcwd()\naddonsPath = os.path.join(currentPath, \"addons\")\n\n# Create list of Path that need to me added to path vars\npaths = [os.path.join(addonsPath, item, \"lib\") for item in os.listdir(addonsPath) if os.path.isdir(os.path.join(addonsPath, item)) and (item.startswith(\"script.module.\") or item.startswith(\"script.common.\"))]\npaths.append(os.path.join(currentPath, \"modules\"))\nsys.path.extend(paths)\ntemp = sys.argv[:]\naddonID = os.path.basename(temp[0]).split(\".\")[0].replace(\"_\",\".\")\n\ndef set_addonID(id):\n\tglobal addonID\n\taddonID = id\n\ndef setup_addon(url=\"\", id=\"\"):\n\t# Fake sys args\n\tif id: set_addonID(id)\n\tsys.argv[0] = \"plugin://%s/\" % addonID\n\tsys.argv.append(\"1\")\n\tsys.argv.append(url)\n\tglobal xbmcutil, addonPath\n\timport xbmcutil\n\taddonPath = os.path.join(currentPath, \"addons\", addonID)\n\ndef clear_cache():\n\tpath = os.path.join(currentPath, \"addon.data\", addonID, \"urlcache\")\n\tif os.path.exists(path):\n\t\tfor file in os.listdir(path):\n\t\t\tos.remove(os.path.join(path, file))","sub_path":"initializer.py","file_name":"initializer.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"240450665","text":"import requests\r\nfrom bs4 import BeautifulSoup \r\n\r\n\r\nheaders = {'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.79 Safari/537.36'} \r\n\r\n\r\n\r\ndef main():\r\n baseUrl = 'http://ip.yqie.com/proxyhttps/'\r\n html = requests.get(baseUrl,headers=headers)\r\n soup = BeautifulSoup(html.text,'lxml')\r\n\r\n tr_list = soup.findAll('tr')[1:]\r\n\r\n # print(tr_list)\r\n for item in tr_list:\r\n https = item.findAll('td')[1]\r\n ip = item.findAll('td')[2]\r\n print(str(https) + ':' + str(ip))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"10.【高效&反反爬】ip池/XXG_getIP.py","file_name":"XXG_getIP.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"12650297","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 19 17:03:15 2019\n\n@author: fubao\n\"\"\"\n\n\n\n# overcome the imbalanced dataset \n# not use oversampling or undersampling\n\n# use bagging classifier \n# it's said that the skleran bagging classifier can not solve the imbalanced problem\n\n# https://towardsdatascience.com/having-an-imbalanced-dataset-here-is-how-you-can-solve-it-1640568947eb\n\n\n\nimport sys\nimport os\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.tree import DecisionTreeClassifier\nfrom imblearn.ensemble import BalancedBaggingClassifier\n\nfrom common_classifier import load_data_all_features\nfrom sklearn.metrics import confusion_matrix\n\n\ncurrent_file_cur = os.path.dirname(os.path.abspath(__file__))\nsys.path.insert(0, current_file_cur + '/..')\n\nfrom profiling.common_prof import dataDir2\n\n\n\n\n\n\ndef emsembleClassifierTrainTest(X, y):\n\n # Splitting the dataset into the Training set and Test set\n \n X_Train, X_Test, Y_Train, Y_Test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n print (\"X_train X_test shape:\", X_Train.shape, X_Test.shape)\n \n # Feature Scaling\n sc_X = StandardScaler()\n X_Train = sc_X.fit_transform(X_Train)\n X_Test = sc_X.transform(X_Test)\n \n # Fitting the classifier into the Training set\n classifier = BalancedBaggingClassifier(base_estimator=DecisionTreeClassifier(),n_estimators = 200,\n sampling_strategy='auto',\n replacement=False,\n random_state=0)\n \n classifier.fit(X_Train,Y_Train)\n \n # Predicting the test set results\n \n Y_Pred = classifier.predict(X_Test)\n \n # Making the Confusion Matrix \n accuracy = classifier.score(X_Test, Y_Test) \n cm = confusion_matrix(Y_Test, Y_Pred)\n \n training_accuracy = classifier.score(X_Train, Y_Train) \n\n print (\"rftTrainTest testing acc, cm: \", accuracy, cm)\n \n print (\"rftTrainTest training acc, cm: \", training_accuracy)\n\n \ndef executeTest_feature_most_expensive_config():\n '''\n execute classification, where features are calculated from the pose esimation result derived from the most expensive config\n '''\n video_dir_lst = ['output_006-cardio_condition-20mins/', 'output_008-Marathon-20mins/'\n ] \n \n for video_dir in video_dir_lst[1:2]: #[1:2]: #[0:1]:\n \n data_examples_dir = dataDir2 + video_dir + 'data_examples_files/'\n\n xfile = 'X_data_features_config-history-frms1-sampleNum8025.pkl' # 'X_data_features_config-history-frms1-sampleNum8025.pkl'\n yfile = 'Y_data_features_config-history-frms1-sampleNum8025.pkl' #'Y_data_features_config-history-frms1-sampleNum8025.pkl'\n \n #xfile = 'X_data_features_config-weighted_interval-history-frms1-5-10-sampleNum8025.pkl' # 'X_data_features_config-history-frms1-sampleNum8025.pkl'\n #yfile = 'Y_data_features_config-weighted_interval-history-frms1-5-10-sampleNum8025.pkl' #'Y_data_features_config-history-frms1-sampleNum8025.pkl'\n X,y= load_data_all_features(data_examples_dir, xfile, yfile)\n \n emsembleClassifierTrainTest(X,y)\n \n \ndef executeTest_feature_selected_config():\n '''\n '''\n data_examples_dir = dataDir2 + 'output_006-cardio_condition-20mins/' + 'data_examples_files_feature_selected_config/'\n\n #xfile = 'X_data_features_config-history-frms25-sampleNum8025.pkl' # 'X_data_features_config-history-frms1-sampleNum8025.pkl'\n #yfile = 'Y_data_features_config-history-frms25-sampleNum8025.pkl' #'Y_data_features_config-history-frms1-sampleNum8025.pkl'\n \n xfile = 'X_data_features_config-history-frms1-sampleNum35765.pkl' # 'X_data_features_config-history-frms1-sampleNum8025.pkl'\n yfile = 'Y_data_features_config-history-frms1-sampleNum35765.pkl' #'Y_data_features_config-history-frms1-sampleNum8025.pkl'\n X,y= load_data_all_features(data_examples_dir, xfile, yfile)\n emsembleClassifierTrainTest(X,y)\n\n\nif __name__== \"__main__\": \n \n executeTest_feature_most_expensive_config()\n \n #executeTest_feature_selected_config()\n\n","sub_path":"backup_codes/classifierForSwitchConfig/classifier_ensembleClassifier.py","file_name":"classifier_ensembleClassifier.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"628778374","text":"# pie game\r\n# first simple game\r\n# created by zhangliangliang 2017/6/28\r\nimport sys\r\nimport pygame\r\nimport math\r\nfrom pygame.locals import *\r\npygame.init()\r\nSCREEN = pygame.display.set_mode((600,500))\r\npygame.display.set_caption(\"Pie Game\")\r\nMY_FONT = pygame.font.Font(None, 60) #set font\r\n\r\nCOLOR = 200, 80, 60 #set color\r\nWIDTH = 40\r\nX = 300\r\nY = 250\r\nRADIUS = 250 \r\nPOSTION = X - RADIUS, Y - RADIUS, RADIUS * 2, RADIUS *2\r\n\r\npiece1 = False\r\npiece2 = False\r\npiece3 = False\r\npiece4 = False\r\n\r\nwhile True:\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n sys.exit()\r\n elif event.type == KEYUP: \r\n if event.key == pygame.K_1:\r\n piece1 = True\r\n elif event.key == pygame.K_2:\r\n piece2 = True\r\n elif event.key == pygame.K_3:\r\n piece3 = True\r\n elif event.key == pygame.K_4:\r\n piece4 = True\r\n \r\n #clear the screen\r\n SCREEN.fill((0, 200, 200))\r\n\r\n #draw the four numbers\r\n textImg1 = MY_FONT.render(\"1\", True, COLOR)\r\n SCREEN.blit(textImg1, (X + RADIUS / 2 - 20, Y - RADIUS / 2))\r\n textImg2 = MY_FONT.render(\"2\", True, COLOR)\r\n SCREEN.blit(textImg2, (X - RADIUS / 2, Y - RADIUS / 2))\r\n textImg3 = MY_FONT.render(\"3\", True, COLOR)\r\n SCREEN.blit(textImg3, (X - RADIUS / 2, Y + RADIUS / 2 - 20))\r\n textImg4 = MY_FONT.render(\"4\", True, COLOR)\r\n SCREEN.blit(textImg4, (X + RADIUS / 2 - 20, Y + RADIUS / 2 - 20))\r\n\r\n #which one should be drawn\r\n if piece1 :\r\n start_angle = math.radians(0)\r\n end_angle = math.radians(90)\r\n pygame.draw.arc(SCREEN, COLOR, POSTION, start_angle, end_angle, WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X, Y - RADIUS), WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X + RADIUS, Y), WIDTH)\r\n if piece2 :\r\n start_angle = math.radians(90)\r\n end_angle = math.radians(180)\r\n pygame.draw.arc(SCREEN, COLOR, POSTION, start_angle, end_angle, WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X, Y - RADIUS), WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X - RADIUS, Y), WIDTH)\r\n if piece3 :\r\n start_angle = math.radians(180)\r\n end_angle = math.radians(270)\r\n pygame.draw.arc(SCREEN, COLOR, POSTION, start_angle, end_angle, WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X - RADIUS, Y), WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X, Y + RADIUS), WIDTH)\r\n if piece4 :\r\n start_angle = math.radians(270)\r\n end_angle = math.radians(360)\r\n pygame.draw.arc(SCREEN, COLOR, POSTION, start_angle, end_angle, WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X, Y + RADIUS), WIDTH)\r\n pygame.draw.line(SCREEN, COLOR, (X, Y), (X + RADIUS, Y), WIDTH)\r\n\r\n #is the pie finished\r\n if piece1 and piece2 and piece3 and piece4 :\r\n COLOR = 0, 250, 0\r\n textImg5 = MY_FONT.render(\"You win!\", True, COLOR)\r\n SCREEN.blit(textImg5, (0,0))\r\n pygame.display.update()","sub_path":"pygame_2.py","file_name":"pygame_2.py","file_ext":"py","file_size_in_byte":3050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"235521949","text":"# coding: utf8\nfrom __future__ import unicode_literals, print_function, division\nfrom unittest import TestCase\n\nfrom mock import patch, Mock\n\nfrom clldutils.testing import capture, capture_all\n\n\nclass Tests(TestCase):\n def test_ArgumentParser(self):\n from clldutils.clilib import ArgumentParser, ParserError, command\n\n def cmd(args):\n \"\"\"\n docstring\n \"\"\"\n if len(args.args) < 1:\n raise ParserError('not enough arguments')\n print(args.args[0])\n\n parser = ArgumentParser('pkg', cmd)\n\n with capture(parser.main, args=['help', 'cmd']) as out:\n self.assertIn('docstring', out)\n\n with capture(parser.main, args=['cmd', 'arg']) as out:\n self.assertIn('arg', out)\n\n self.assertEqual(parser.main(args=['cmd', 'arg']), 0)\n\n with capture(parser.main, args=['cmd']) as out:\n self.assertIn('not enough arguments', out)\n\n with capture_all(parser.main, args=['x']) as res:\n self.assertNotEqual(res[0], 0)\n self.assertTrue(res[1].startswith('invalid'))\n\n @command()\n def ls(args):\n \"\"\"\n my name is ls\n \"\"\"\n return\n\n @command(name='list')\n def f(args):\n \"\"\"\n my name is list\n \"\"\"\n return\n\n parser = ArgumentParser('pkg')\n with capture(parser.main, args=['help', 'ls']) as out:\n self.assertIn('my name is ls', out)\n\n with capture(parser.main, args=['help', 'list']) as out:\n self.assertIn('my name is list', out)\n\n self.assertEqual(parser.main(args=['ls', 'arg']), 0)\n self.assertEqual(parser.main(args=['list', 'arg']), 0)\n\n def test_cmd_error(self):\n from clldutils.clilib import ArgumentParser\n\n def cmd(args):\n raise ValueError\n\n parser = ArgumentParser('pkg', cmd)\n with self.assertRaises(ValueError):\n parser.main(args=['cmd'])\n\n self.assertEqual(parser.main(args=['cmd'], catch_all=True), 1)\n\n def test_confirm(self):\n from clldutils.clilib import confirm\n\n with patch('clldutils.clilib.input', Mock(return_value='')):\n self.assertTrue(confirm('a?'))\n self.assertFalse(confirm('a?', default=False))\n\n with patch('clldutils.clilib.input', Mock(side_effect=['x', 'y'])):\n with capture_all(confirm, 'a?') as res:\n self.assertTrue(res[0])\n self.assertIn('Please respond', res[1])\n","sub_path":"clldutils/tests/test_clilib.py","file_name":"test_clilib.py","file_ext":"py","file_size_in_byte":2569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"127649068","text":"from collections import (\n Mapping,\n)\nimport json\nimport os\n\nfrom cytoolz import (\n compose,\n)\nfrom eth_keyfile import (\n create_keyfile_json,\n decode_keyfile_json,\n)\nfrom eth_keys import (\n KeyAPI,\n keys,\n)\nfrom eth_keys.exceptions import (\n ValidationError,\n)\nfrom eth_utils import (\n is_dict,\n keccak,\n)\n\nfrom web3.utils.datastructures import (\n AttributeDict,\n HexBytes,\n)\nfrom web3.utils.decorators import (\n combomethod,\n)\nfrom web3.utils.encoding import (\n hexstr_if_str,\n text_if_str,\n to_bytes,\n to_int,\n)\nfrom web3.utils.signing import (\n LocalAccount,\n hash_of_signed_transaction,\n sign_message_hash,\n sign_transaction_dict,\n signature_wrapper,\n to_standard_signature_bytes,\n to_standard_v,\n)\nfrom web3.utils.transactions import (\n Transaction,\n vrs_from,\n)\n\n\nclass Account(object):\n _keys = keys\n\n @combomethod\n def create(self, extra_entropy=''):\n extra_key_bytes = text_if_str(to_bytes, extra_entropy)\n key_bytes = keccak(os.urandom(32) + extra_key_bytes)\n return self.privateKeyToAccount(key_bytes)\n\n @staticmethod\n def decrypt(keyfile_json, password):\n if isinstance(keyfile_json, str):\n keyfile = json.loads(keyfile_json)\n elif is_dict(keyfile_json):\n keyfile = keyfile_json\n else:\n raise TypeError(\"The keyfile should be supplied as a JSON string, or a dictionary.\")\n password_bytes = text_if_str(to_bytes, password)\n return decode_keyfile_json(keyfile, password_bytes)\n\n @staticmethod\n def encrypt(private_key, password):\n key_bytes = HexBytes(private_key)\n password_bytes = text_if_str(to_bytes, password)\n assert len(key_bytes) == 32\n return create_keyfile_json(key_bytes, password_bytes)\n\n @staticmethod\n def hashMessage(data=None, hexstr=None, text=None):\n message_bytes = to_bytes(data, hexstr=hexstr, text=text)\n recovery_hasher = compose(HexBytes, keccak, signature_wrapper)\n return recovery_hasher(message_bytes)\n\n @combomethod\n def privateKeyToAccount(self, private_key):\n key_bytes = HexBytes(private_key)\n try:\n key_obj = self._keys.PrivateKey(key_bytes)\n return LocalAccount(key_obj, self)\n except ValidationError as original_exception:\n raise ValueError(\n \"The private key must be exactly 32 bytes long, instead of \"\n \"%d bytes.\" % len(key_bytes)\n ) from original_exception\n\n @combomethod\n def recover(self, msghash, vrs=None, signature=None):\n hash_bytes = HexBytes(msghash)\n if vrs is not None:\n v, r, s = map(hexstr_if_str(to_int), vrs)\n v_standard = to_standard_v(v)\n signature_obj = self._keys.Signature(vrs=(v_standard, r, s))\n elif signature is not None:\n signature_bytes = HexBytes(signature)\n signature_bytes_standard = to_standard_signature_bytes(signature_bytes)\n signature_obj = self._keys.Signature(signature_bytes=signature_bytes_standard)\n else:\n raise TypeError(\"You must supply the vrs tuple or the signature bytes\")\n pubkey = signature_obj.recover_public_key_from_msg_hash(hash_bytes)\n return pubkey.to_checksum_address()\n\n @combomethod\n def recoverMessage(self, data=None, hexstr=None, text=None, vrs=None, signature=None):\n msg_hash = self.hashMessage(data, hexstr=hexstr, text=text)\n return self.recover(msg_hash, vrs=vrs, signature=signature)\n\n @combomethod\n def recoverTransaction(self, serialized_transaction):\n txn_bytes = HexBytes(serialized_transaction)\n txn = Transaction.from_bytes(txn_bytes)\n msg_hash = hash_of_signed_transaction(txn)\n return self.recover(msg_hash, vrs=vrs_from(txn))\n\n def setKeyBackend(self, backend):\n self._keys = KeyAPI(backend)\n\n @combomethod\n def sign(self, message=None, private_key=None, message_hexstr=None, message_text=None):\n '''\n @param private_key in bytes, str, or int.\n '''\n msg_bytes = to_bytes(message, hexstr=message_hexstr, text=message_text)\n msg_hash = self.hashMessage(msg_bytes)\n key_bytes = HexBytes(private_key)\n key = self._keys.PrivateKey(key_bytes)\n (v, r, s, eth_signature_bytes) = sign_message_hash(key, msg_hash)\n return AttributeDict({\n 'message': HexBytes(msg_bytes),\n 'messageHash': msg_hash,\n 'r': r,\n 's': s,\n 'v': v,\n 'signature': HexBytes(eth_signature_bytes),\n })\n\n @combomethod\n def signTransaction(self, transaction_dict, private_key):\n '''\n @param private_key in bytes, str, or int.\n '''\n assert isinstance(transaction_dict, Mapping)\n\n account = self.privateKeyToAccount(private_key)\n\n # sign transaction\n (\n v,\n r,\n s,\n rlp_encoded,\n ) = sign_transaction_dict(account._key_obj, transaction_dict)\n\n transaction_hash = keccak(rlp_encoded)\n\n return AttributeDict({\n 'rawTransaction': HexBytes(rlp_encoded),\n 'hash': HexBytes(transaction_hash),\n 'r': r,\n 's': s,\n 'v': v,\n })\n","sub_path":"sdk/web3.py/web3/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"596695390","text":"# Atividade - 7 \n# João Papo-de-Pescador, homem de bem, comprou um\n# microcomputador para controlar o rendimento diário de seu\n# trabalho. Toda vez que ele traz um peso de peixes maior que o\n# estabelecido pelo regulamento de pesca do estado de São Paulo (50\n# quilos8 deve pagar uma multa de R$ 4,00 por quilo excedente. João\n# precisa que você faça um programa que leia a variável peso (peso\n# de peixes8 e verifque se há excesso. Se houver, gravar na variável\n# excesso e na variável multa o valor da multa que João deverá pagar.\n# Caso contrário mostrar tais variáveis com o conteúdo ZERO.\n\n\npeso_peixe = int(input(\"digite o peso do peixe:\"))\npeso_excedente =0\nvalor_multa = 0.0\nif (peso_peixe>50):\n\tpeso_excedente = peso_peixe-50\n\tvalor_multa = peso_excedente*4.00\n\tprint (\"De acordo com o regulamento de SP foi excedido %skg a mais do estabelecido e pagará %sR$ de multa\" %(peso_excedente,valor_multa))\nelse:\n\tprint (\"Não houve peso em excesso %sKg portanto não há multa %sR$\" %(peso_excedente,valor_multa))","sub_path":"Exercicio1/atv07.py","file_name":"atv07.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"358982270","text":"import tensorflow as tf\n\nclass MLP_Model():\n def __init__(self,\n input_count,\n class_count):\n self.input_count = input_count\n self.class_count = class_count\n self.weight1 = tf.get_variable(\"weight1\",\n initializer=tf.zeros_initializer([input_count, class_count]))\n self.bias1 = tf.get_variable(\"bais1\", initializer=tf.zeros_initializer([class_count]))\n\n def build(self):\n image = tf.placeholder(tf.float32, [None, self.input_count])\n label = tf.placeholder(tf.float32, [None, self.class_count])\n output = self.classify(image)\n self.cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=output))\n predicted_class = tf.nn.softmax(output)\n return image, label, predicted_class, self.cross_entropy\n\n def train(self):\n train_step = tf.train.GradientDescentOptimizer(0.5).minimize(self.cross_entropy)\n return train_step\n\n def classify(self, image):\n output = tf.matmul(image, self.weight1) + self.bias1\n return output\n\nclass Convolution_Model():\n def __init__(self,\n input_count,\n class_count):\n self.input_count = input_count\n self.class_count = class_count\n self.weight1 = tf.get_variable(\"weight1\", [5, 5, 1, 32],\n initializer=tf.truncated_normal_initializer(stddev=0.1))\n self.bias1 = tf.get_variable(\"bais1\", [32], initializer=tf.constant_initializer(0.1))\n self.weight2 = tf.get_variable(\"weight2\", [5, 5, 32, 64],\n initializer=tf.truncated_normal_initializer(stddev=0.1))\n self.bias2 = tf.get_variable(\"bais2\", [64], initializer=tf.constant_initializer(0.1))\n self.weight3 = tf.get_variable(\"weight3\", [7 * 7 * 64, 1024],\n initializer=tf.truncated_normal_initializer(stddev=0.1))\n self.bias3 = tf.get_variable(\"bais3\", [1024], initializer=tf.constant_initializer(0.1))\n self.weight4 = tf.get_variable(\"weight4\", [1024, 10],\n initializer=tf.truncated_normal_initializer(stddev=0.1))\n self.bias4 = tf.get_variable(\"bais4\", [10], initializer=tf.constant_initializer(0.1))\n\n def build(self):\n image = tf.placeholder(tf.float32, [None, self.input_count])\n label = tf.placeholder(tf.float32, [None, self.class_count])\n self.keep_prob = tf.placeholder(tf.float32)\n output = self.classify(image)\n self.cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=output))\n predicted_class = tf.nn.softmax(output)\n return image, label, predicted_class, self.cross_entropy, self.keep_prob\n\n def train(self):\n train_step = tf.train.AdamOptimizer(1e-4).minimize(self.cross_entropy)\n return train_step\n\n def classify(self, image):\n input_image = tf.reshape(image, [-1, 28, 28, 1])\n\n h1 = tf.nn.relu(tf.nn.conv2d(input_image, self.weight1, strides=[1, 1, 1, 1], padding=\"SAME\") + self.bias1)\n h1 = tf.nn.max_pool(h1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n\n h2 = tf.nn.relu(tf.nn.conv2d(h1, self.weight2, strides=[1, 1, 1, 1], padding=\"SAME\") + self.bias2)\n h2 = tf.nn.max_pool(h2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n\n h3 = tf.reshape(h2, [-1, 7 * 7 * 64])\n h3 = tf.nn.relu(tf.matmul(h3, self.weight3) + self.bias3)\n\n h4 = tf.nn.dropout(h3, self.keep_prob)\n\n output = tf.matmul(h4, self.weight4) + self.bias4\n return output\n\n\n\n","sub_path":"GenerativeNetworks/mnist/classification/classification_models.py","file_name":"classification_models.py","file_ext":"py","file_size_in_byte":3703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"187326588","text":"import os\nimport tkinter\nfrom tkinter import Menu\nfrom tk_builder.panel_builder import WidgetPanel\nfrom tk_builder.widgets.axes_image_canvas import AxesImageCanvas\n\nfrom tk_builder.image_readers.geotiff_reader import GeotiffImageReader\nfrom tk_builder.widgets import widget_descriptors\nfrom tkinter import filedialog\nfrom example_apps.geotiff_viewer.panels.band_selection import BandSelection\nfrom example_apps.geotiff_viewer.panels.controls import Controls\nfrom tk_builder.image_readers.numpy_image_reader import NumpyImageReader\n\n\nclass GeotiffViewer(WidgetPanel):\n \"\"\"\n A geotiff viewer prototype.\n \"\"\"\n _widget_list = (\"band_selection_panel\", \"controls_panel\", \"geotiff_image_panel\", \"zoom_image_panel\")\n geotiff_image_panel = widget_descriptors.AxesImageCanvasDescriptor(\"geotiff_image_panel\") # type: AxesImageCanvas\n zoom_image_panel = widget_descriptors.AxesImageCanvasDescriptor(\"zoom_image_panel\") # type: AxesImageCanvas\n band_selection_panel = widget_descriptors.PanelDescriptor(\"band_selection_panel\", BandSelection) # type: BandSelection\n controls_panel = widget_descriptors.PanelDescriptor(\"controls_panel\", Controls) # type: Controls\n image_reader = None # type: GeotiffImageReader\n\n def __init__(self, primary):\n \"\"\"\n\n Parameters\n ----------\n primary\n The primary widget.\n \"\"\"\n\n self.primary = primary\n\n primary_frame = tkinter.Frame(primary)\n WidgetPanel.__init__(self, primary_frame)\n\n self.init_w_horizontal_layout()\n\n self.geotiff_image_panel.set_canvas_size(800, 1080)\n self.geotiff_image_panel.canvas.set_current_tool_to_pan()\n\n menubar = Menu()\n\n filemenu = Menu(menubar, tearoff=0)\n filemenu.add_command(label=\"Open\", command=self.select_file)\n filemenu.add_separator()\n filemenu.add_command(label=\"Exit\", command=self.exit)\n\n # create more pulldown menus\n popups_menu = Menu(menubar, tearoff=0)\n popups_menu.add_command(label=\"Main Controls\", command=self.exit)\n\n menubar.add_cascade(label=\"File\", menu=filemenu)\n menubar.add_cascade(label=\"Popups\", menu=popups_menu)\n\n primary.config(menu=menubar)\n\n primary_frame.pack()\n\n self.band_selection_panel.red_selection.on_selection(self.callback_update_red_band)\n self.band_selection_panel.green_selection.on_selection(self.callback_update_green_band)\n self.band_selection_panel.blue_selection.on_selection(self.callback_update_blue_band)\n self.band_selection_panel.alpha_selection.on_selection(self.callback_update_alpha_band)\n\n self.controls_panel.pan.on_left_mouse_click(self.callback_set_to_pan)\n self.controls_panel.select.on_left_mouse_click(self.callback_set_to_select)\n self.geotiff_image_panel.canvas.on_left_mouse_release(self.callback_select)\n\n def exit(self):\n \"\"\"\n Exits/destroys the widget.\n\n Returns\n -------\n None\n \"\"\"\n\n self.quit()\n\n def select_file(self, fname=None):\n \"\"\"\n File selector action. Will open a file selector dialog if `None`.\n\n Parameters\n ----------\n fname : str|None\n\n Returns\n -------\n None\n \"\"\"\n\n if fname is None:\n fname = filedialog.askopenfilename(initialdir=os.path.expanduser(\"~\"),\n title=\"Select file\",\n filetypes=((\"tiff files\", (\"*.tif\", \"*.tiff\", \"*.TIF\", \"*.TIFF\")),\n (\"all files\", \"*.*\"))\n )\n self.image_reader = GeotiffImageReader(fname)\n self.geotiff_image_panel.set_image_reader(self.image_reader)\n self.populate_band_selections()\n\n def populate_band_selections(self):\n \"\"\"\n Helper method for populating the band selection.\n\n Returns\n -------\n None\n \"\"\"\n\n bands = self.image_reader.n_bands\n band_selections = [str(band) for band in range(bands)]\n band_selections.append(\"None\")\n self.band_selection_panel.red_selection.update_combobox_values(band_selections)\n self.band_selection_panel.green_selection.update_combobox_values(band_selections)\n self.band_selection_panel.blue_selection.update_combobox_values(band_selections)\n self.band_selection_panel.alpha_selection.update_combobox_values(band_selections)\n\n self.band_selection_panel.red_selection.set(str(self.image_reader.display_bands[0]))\n self.band_selection_panel.green_selection.set(str(self.image_reader.display_bands[1]))\n self.band_selection_panel.blue_selection.set(str(self.image_reader.display_bands[2]))\n if len(self.image_reader.display_bands) > 3:\n self.band_selection_panel.alpha_selection.set(str(self.image_reader.display_bands[3]))\n else:\n self.band_selection_panel.alpha_selection.set(\"None\")\n\n def callback_update_red_band(self, event):\n \"\"\"\n Update the read band.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n red_band = self.band_selection_panel.red_selection.get()\n band_num = 0\n if red_band == \"None\":\n if band_num not in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:\n self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.append(band_num)\n else:\n if band_num in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:\n self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.remove(band_num)\n self.image_reader.display_bands[band_num] = int(red_band)\n self.geotiff_image_panel.canvas.update_current_image()\n\n def callback_update_green_band(self, event):\n \"\"\"\n Update the green band.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n green_band = self.band_selection_panel.green_selection.get()\n band_num = 1\n if green_band == \"None\":\n if band_num not in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:\n self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.append(1)\n else:\n if band_num in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:\n self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.remove(1)\n self.image_reader.display_bands[1] = int(green_band)\n self.geotiff_image_panel.canvas.update_current_image()\n\n def callback_update_blue_band(self, event):\n \"\"\"\n Update the blue band.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n band_num = 2\n blue_band = self.band_selection_panel.blue_selection.get()\n if blue_band == \"None\":\n if band_num not in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:\n self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.append(band_num)\n else:\n if band_num in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:\n self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.remove(band_num)\n self.image_reader.display_bands[band_num] = int(blue_band)\n self.geotiff_image_panel.canvas.update_current_image()\n\n def callback_update_alpha_band(self, event):\n \"\"\"\n Update the alpha channel.\n\n Parameters\n ----------\n event\n\n Returns\n -------\n None\n \"\"\"\n\n alpha_band = self.band_selection_panel.alpha_selection.get()\n band_num = 3\n if len(self.image_reader.display_bands) == 3:\n self.image_reader.display_bands.append(band_num)\n if alpha_band == \"None\":\n self.image_reader.display_bands = self.image_reader.display_bands[0:3]\n else:\n if band_num in self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands:\n self.geotiff_image_panel.canvas.variables.canvas_image_object.drop_bands.remove(band_num)\n self.image_reader.display_bands[band_num] = int(alpha_band)\n self.geotiff_image_panel.canvas.update_current_image()\n\n def callback_set_to_pan(self, event):\n self.geotiff_image_panel.canvas.set_current_tool_to_pan()\n\n def callback_set_to_select(self, event):\n self.geotiff_image_panel.canvas.set_current_tool_to_selection_tool()\n\n def callback_select(self, event):\n data = self.geotiff_image_panel.canvas.get_image_data_in_canvas_rect_by_id(self.geotiff_image_panel.canvas.variables.select_rect_id, decimation=1)\n data = data[:, :, 0]\n reader = NumpyImageReader(data)\n self.zoom_image_panel.set_image_reader(reader)\n self.zoom_image_panel.canvas.update_current_image()\n\n\nif __name__ == '__main__':\n root = tkinter.Tk()\n app = GeotiffViewer(root)\n root.mainloop()\n","sub_path":"example_apps/geotiff_viewer/geotiff_viewer.py","file_name":"geotiff_viewer.py","file_ext":"py","file_size_in_byte":9290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"215979935","text":"import time # 时间库\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport mnist_forward\nimport mnist_backward\nTEST_INTERVAL_SECS = 5 # 延迟5秒\n\n\ndef test(mnist):\n with tf.Graph().as_default() as g:\n # 其内定义的结点在计算图g中复现\n x = tf.placeholder(\n tf.float32,\n [None, mnist_forward.INPUT_NODE]\n ) # 输入占位\n y_ = tf.placeholder(\n tf.float32,\n [None, mnist_forward.OUTPUT_NODE]\n ) # 标准答案占位\n y = mnist_forward.forward(x, None) # 模型训练结果\n # 滑动平均\n ema = tf.train.ExponentialMovingAverage(\n mnist_backward.MOVING_AVERAGE_DECAY)\n ema_restore = ema.variables_to_restore()\n # 实例化可以还原滑动平均值的saver\n saver = tf.train.Saver(ema_restore)\n # 准确率计算:比较y和y_第一维度的最大值是否相等 如果相等则说明预测正确\n # 这里用最大值的原因是不同的测试图片神经网络会给出每个数字的概率 概率最大的即认定为计算机识别出来的\n # arg_max(x,axis):返回x中维度为axis的最大值所在的索引\n correct_prediction = tf.equal(tf.arg_max(y, 1), tf.arg_max(y_, 1))\n # cast(x,dtype):将x转换为dtype类型\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\n while True:\n # 加载模型\n with tf.Session() as sess:\n ckpt = tf.train.get_checkpoint_state(\n mnist_backward.MODEL_SAVE_PATH\n ) # 从目标路径中加载模型\n if ckpt and ckpt.model_checkpoint_path:\n # 如果有模型则恢复模型\n saver.restore(\n sess,\n ckpt.model_checkpoint_path\n )\n # string.split方法:按指定拆分符对字符串切片 返回分割后的列表\n global_step = ckpt.model_checkpoint_path.split(\n '/')[-1].split('-')[-1]\n accuracy_score = sess.run(\n accuracy,\n feed_dict={\n x: mnist.test.images,\n y_: mnist.test.labels\n }\n )\n print(\"After %s training steps, test accuracy = %g\" %\n (global_step, accuracy_score))\n else:\n # 否则提示模型未找到\n print(\"No checkpoint file found\")\n time.sleep(TEST_INTERVAL_SECS) # 暂停5秒\n\n\ndef main():\n mnist = input_data.read_data_sets(\n \"./data/\",\n one_hot=True\n )\n test(mnist)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"mnist_test.py","file_name":"mnist_test.py","file_ext":"py","file_size_in_byte":2881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"379975299","text":"import argparse\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport torch \nfrom torch import nn\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import models\nimport torchvision.datasets as dsets\nimport torch.utils.data as Data\nimport random\nfrom torch.autograd import Variable\nfrom torch import randn\nfrom torch import randint\nfrom model import Generator, Discriminator\n\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ncuda = True if torch.cuda.is_available() else False\n\n#parse arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-e\",\"--epochs\", type=int, default=200, help=\"number of epochs\")\nparser.add_argument(\"-b\",\"--batch_size\", type=int, default=32, help=\"batch size\")\nparser.add_argument(\"-mn\",\"--model_name\", type=str, default=\"model\", help=\"model name for saving\")\nparser.add_argument(\"-lr\",\"--learning_rate\", type=float, default=0.0002, help=\"learning rate\")\nopt = parser.parse_args()\nprint(opt)\n\nmodel_folder_path = \"models/\"+opt.model_name+\"/\"\nos.makedirs(model_folder_path,exist_ok=True)\n\n# parameters\nEPOCH = opt.epochs\nBATCH_SIZE = opt.batch_size\nLR = opt.learning_rate\nD_UPD_NUM = 1\nG_UPD_NUM = 1\n\n#######################################################################\nFloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\nLongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor\nadversarial_loss = torch.nn.BCELoss().cuda()\nauxiliary_loss = torch.nn.MSELoss().cuda()\n\ndef update(iterator, generator, discriminator, optimizer_D, optimizer_G):\n \n for i, batch in enumerate(iterator):\n # Configure input\n real_imgs = batch[0].to(device)\n shuf_imgs = batch[1].to(device)\n text = batch[2].to(device)\n\n real_imgs = real_imgs.permute(0,3,1,2)\n shuf_imgs = shuf_imgs.permute(0,3,1,2)\n batch_size = real_imgs.shape[0]\n\n valid = torch.ones(batch_size,1, requires_grad=False).to(device)\n fake = torch.zeros(batch_size,1, requires_grad=False).to(device)\n # -----------------\n # Train Generator\n # -----------------\n for _ in range(3):\n optimizer_G.zero_grad()\n\n # Sample noise as generator input\n noise = randn(batch_size, 100).to(device)\n #noise = Variable(FloatTensor(np.random.normal(0,1,(batch_size, 100))))\n #gen_text = Variable(LongTensor(np.random.randint(0,22,batch_size)))\n gen_text_1 = torch.zeros(batch_size,12,requires_grad=False).to(device)\n gen_text_2 = torch.zeros(batch_size,10,requires_grad=False).to(device)\n one_pos_for_gtext_1 = randint(0,12,(batch_size,))\n one_pos_for_gtext_2 = randint(0,10,(batch_size,))\n for idx_1,text_1 in enumerate(gen_text_1): text_1[one_pos_for_gtext_1[idx_1]] = 1\n for idx_2,text_2 in enumerate(gen_text_2): text_2[one_pos_for_gtext_2[idx_2]] = 1\n gen_text = torch.cat((gen_text_1,gen_text_2),1)\n \n # Generate a batch of images\n gen_imgs = generator(noise, gen_text)\n\n # Loss measures generator's ability to fool the discriminator\n validity, pred_label = discriminator(gen_imgs, gen_text)\n \n pred_label = Variable(pred_label)\n #gen_text = Variable(gen_text)\n #gen_text = gen_text.long()\n g_loss_1 = adversarial_loss(validity, valid)\n #print(gen_text)\n #g_loss_2 = auxiliary_loss(pred_label, torch.argmax(gen_text,1))\n g_loss_2 = auxiliary_loss(pred_label, gen_text)\n \n g_loss = 0.5*(g_loss_1+g_loss_2)\n g_loss.backward()\n optimizer_G.step()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n for _ in range(1):\n optimizer_D.zero_grad()\n\n # Measure discriminator's ability to classify real from generated samples\n #loss for real images \n \n real_pred, real_aux = discriminator(real_imgs, text)\n d_real_loss_1 = adversarial_loss(real_pred, valid)\n d_real_loss_2 = auxiliary_loss(real_aux, text)\n d_real_loss = 0.5*(d_real_loss_1 + d_real_loss_2)\n \n #loss for fake image\n fake_pred, fake_aux = discriminator(gen_imgs.detach(), text)\n d_fake_loss_1 = adversarial_loss(fake_pred, fake) \n d_fake_loss_2 = auxiliary_loss(fake_aux, text)\n d_fake_loss = 0.5*(d_fake_loss_1 + d_fake_loss_2)\n \n #total d_loss\n d_loss = (d_real_loss + d_fake_loss) / 2\n\n d_loss.backward()\n optimizer_D.step()\n print(\n \" [Batch %d/%d] [D loss: %f] [G loss: %f]\"\n % ( i, len(iterator), d_loss.item(), g_loss.item()),\n end = '\\r'\n )\n\n\n#######################################################################\n\ntrain_df = pd.read_pickle('../train_img.pkl')\nreal_img_arr = np.concatenate(train_df.values[0],axis=0)[:]\nshuf_img_arr = real_img_arr\nnp.random.shuffle(shuf_img_arr)\n\n\n# ITER_NUM = len(real_img) // BATCH_SIZE\n# print(len(real_img_arr))\n# print(real_img_arr[0])\n# print(real_img_arr[0].shape)\n\ntags_df = pd.read_csv('../extra_data/tags.csv')\ntags = tags_df['attr']\ntags_list = []\ntags_num_list = []\nfor tag in tags:\n tag = tag[1]\n\nfor tag in tags:\n tags_list.append(tag.split())\n\n# ‘color hair’\n# 'orange hair', 0 ,'white hair', 1, 'aqua hair', 2, 'gray hair', 3\n# 'green hair', 4, 'red hair', 5, 'purple hair', 6, 'pink hair', 7\n# 'blue hair', 8, 'black hair', 9, 'brown hair', 10, 'blonde hair', 11\n# ‘color eyes’\n# 'black eyes', 12, 'orange eyes', 13\n# 'pink eyes', 14, 'yellow eyes', 15, 'aqua eyes', 16, 'purple eyes', 17\n# 'green eyes', 18, 'brown eyes', 19, 'red eyes', 20, 'blue eyes', 21\n# two hot \n\nfor tag_list in tags_list:\n if(tag_list[0] == 'orange'): num = 0\n elif(tag_list[0] == 'white'): num = 1\n elif (tag_list[0] == 'aqua'): num = 2\n elif (tag_list[0] == 'gray'): num = 3\n elif (tag_list[0] == 'green'): num = 4\n elif (tag_list[0] == 'red'): num = 5\n elif (tag_list[0] == 'purple'): num = 6\n elif (tag_list[0] == 'pink'): num = 7\n elif (tag_list[0] == 'blue'): num = 8\n elif (tag_list[0] == 'black'): num = 9\n elif (tag_list[0] == 'brown'): num = 10\n elif (tag_list[0] == 'blonde'): num = 11\n\n if(tag_list[2] == 'black'): num_2 = 12\n elif(tag_list[2] == 'orange'): num_2 = 13\n elif(tag_list[2] == 'pink'): num_2 = 14\n elif(tag_list[2] == 'yellow'): num_2 = 15\n elif(tag_list[2] == 'aqua'): num_2 = 16\n elif(tag_list[2] == 'purple'): num_2 = 17\n elif(tag_list[2] == 'green'): num_2 = 18\n elif(tag_list[2] == 'brown'): num_2 = 19\n elif(tag_list[2] == 'red'): num_2 = 20\n elif(tag_list[2] == 'blue'): num_2 = 21\n\n num_list = np.zeros(22)\n num_list[num] = 1\n num_list[num_2] = 1\n tags_num_list.append(num_list)\n\nreal_img_tensor = torch.from_numpy(real_img_arr).float()\nshuf_img_tensor = torch.from_numpy(shuf_img_arr).float()\ntags_tensor = torch.Tensor(tags_num_list)\ntorch_dataset = Data.TensorDataset(real_img_tensor, shuf_img_tensor, tags_tensor)\n\nloader = Data.DataLoader( \n dataset = torch_dataset,\n batch_size = BATCH_SIZE,\n shuffle = True,\n num_workers = 2 \n)\n\nG = Generator().cuda()\nD = Discriminator().cuda()\n\noptimizer_g = optim.Adam(G.parameters(),lr= LR*0.5, betas = (0.5, 0.99))\noptimizer_d = optim.Adam(D.parameters(),lr= LR, betas = (0.5, 0.99))\n\n\nfor i in range(EPOCH):\n #train\n print(\"start training epoch\"+str(i))\n \n update(loader, G, D,optimizer_d, optimizer_g)\n #for j in range(D_UPD_NUM): \n # update_d(loader, G, D, optimizer_d)\n #for j in range(G_UPD_NUM):\n # update_g(BATCH_SIZE, ITER_NUM, G, D, optimizer_g)\n if(i%5==4):\n torch.save(G.state_dict(), model_folder_path+'G'+ str(i)+'.pkl')\n torch.save(D.state_dict(), model_folder_path+'D'+ str(i)+'.pkl')\n","sub_path":"hw3/hw3-2 3-3/hw3_2/acgan/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"632295792","text":"class Checker:\n\n def __init__(self, coordinates, color, deck):\n\n self.coords = coordinates\n self.color = color\n self.deck = deck\n\n def step(self, move_dest, valid_condition1, valid_condition2):\n \"\"\"\n Method describes posibility of piece to move\n :param move_dest: tuple of coordination\n :param valid_condition1: tuple with allowed coordination for moving\n :param valid_condition2: tuple with allowed coordination for moving\n :return: bool True/False\n \"\"\"\n correct_destination = (move_dest == valid_condition1 or move_dest == valid_condition2)\n\n if self.deck[self.coords[0]][self.coords[1]] == self.color:\n\n if correct_destination and self.is_empty(move_dest):\n result = True\n\n else:\n result = False\n return result\n\n else:\n print('This is empty field')\n result = False\n return result\n\n def empty_field_list(self):\n \"\"\"\n Method calculates empty fields on the deck\n :return: list of coordinates\n :rtype: list\n \"\"\"\n empty_field = []\n\n for x in range(len(self.deck)):\n for y in range(len(self.deck[x])):\n if self.deck[x][y] == ' ':\n empty_field.append((x, y))\n\n return empty_field\n\n def is_empty(self, move_dest):\n \"\"\"\n Method checks of is field is empty\n :param move_dest: coords of square to move\n :return: bool True/False\n \"\"\"\n if move_dest in self.empty_field_list():\n is_empty = True\n else:\n print('Field is not empty')\n is_empty = False\n\n return is_empty\n\n def attack_needed(self, checkers_army):\n \"\"\"\n Method checks of attack opportunity\n :param checkers_army: list of tuples\n :return: bool True/False\n \"\"\"\n enemy_army = []\n\n for x in range(len(self.deck)):\n for y in range(len(self.deck[x])):\n if self.deck[x][y] != ' ' and self.deck[x][y] != self.color:\n enemy_army.append((x, y))\n print('Enemy army', enemy_army)\n first_check = []\n second_check = []\n\n for i in checkers_army:\n\n for j in enemy_army:\n\n if j == (i[0] + 1, i[1] + 1) or j == (i[0] - 1, i[1] - 1) or j == (i[0] + 1, i[1] - 1) or j == (\n i[0] - 1, i[1] + 1):\n first_check.append(j)\n\n for j in first_check:\n for ef in self.empty_field_list():\n\n if ef == (j[0] + 1, j[1] + 1) or ef == (j[0] - 1, j[1] - 1) or ef == (j[0] + 1, j[1] - 1) or ef == (\n j[0] - 1, j[1] + 1):\n second_check.append(ef)\n\n if not second_check:\n result = False\n\n elif second_check:\n print('You need to attack!!!')\n result = True\n\n else:\n result = False\n\n return result\n\n def attack_targets(self):\n \"\"\"\n Method configurates dictionary with targets and fields for jumping\n :return: dict\n \"\"\"\n targ_1 = self.coords[0] - 1, self.coords[1] - 1\n targ_2 = self.coords[0] - 1, self.coords[1] + 1\n targ_3 = self.coords[0] + 1, self.coords[1] - 1\n targ_4 = self.coords[0] + 1, self.coords[1] + 1\n\n step_attack_1 = self.coords[0] - 2, self.coords[1] - 2\n step_attack_2 = self.coords[0] - 2, self.coords[1] + 2\n step_attack_3 = self.coords[0] + 2, self.coords[1] - 2\n step_attack_4 = self.coords[0] + 2, self.coords[1] + 2\n\n dict_attack = {step_attack_1: targ_1, step_attack_2: targ_2, step_attack_3: targ_3, step_attack_4: targ_4}\n\n return dict_attack\n\n def attack(self, move_dest):\n \"\"\"\n Method describes and rewrites deck after attack\n :param move_dest: tuple(x, y) - coordination to go\n :return: rewritten deck\n \"\"\"\n dict_attack = self.attack_targets()\n\n if move_dest in dict_attack and self.is_empty(move_dest):\n target = dict_attack[move_dest]\n\n if self.deck[target[0]][target[1]] != self.color and self.deck[target[0]][target[1]] != ' ':\n self.deck[move_dest[0]][move_dest[1]] = self.deck[self.coords[0]][self.coords[1]]\n self.deck[self.coords[0]][self.coords[1]] = ' '\n self.deck[target[0]][target[1]] = ' '\n return self.deck\n\n else:\n print('This step is not correct. Do another one')\n","sub_path":"Basov_Dmytrii/Project/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":4607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"5981626","text":"\n\n\"\"\"PyAudio example: Record a few seconds of audio and save to a WAVE file.\"\"\"\n\nimport matplotlib.pyplot as plt\nimport pyaudio\nimport wave\nimport calcbytes as cb\nimport numpy as np\nimport struct\nCHUNK = 1024\nFORMAT = pyaudio.paInt16\nCHANNELS = 1\nRATE = 44100\nRECORD_SECONDS = 1\nWAVE_OUTPUT_FILENAME = \"output.wav\"\n\np = pyaudio.PyAudio()\n\nstream = p.open(format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n\t\toutput=True,\n frames_per_buffer=CHUNK)\n\nprint(\"* recording\")\n\nframes = []\nframesint=np.array([])\n\nfor i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):\n data = stream.read(CHUNK)\n framesint=np.concatenate((framesint,np.array(struct.unpack(\"%dh\" % (CHUNK), data)))) \n \n frames.append(data)\n\t\nprint(\"* done recording\")\n\n\nstream.stop_stream()\nstream.close()\np.terminate()\n\n\nplt.plot(framesint)\nplt.show()\n\nwf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')\nwf.setnchannels(CHANNELS)\nwf.setsampwidth(p.get_sample_size(FORMAT))\nwf.setframerate(RATE)\nwf.writeframes(b''.join(frames))\nwf.close()\n\n#if __name__ == '__main__':\n#\tmain()\n\n","sub_path":"writeWave.py","file_name":"writeWave.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"194552411","text":"import unittest\nfrom neat.utils import _has_probability, zip_with_probabilities, weighted_random\n\n\n\nclass TestUtils(unittest.TestCase):\n\tdef test_has_probability(self):\n\t\tprint(\"Testing _has_probability()\")\n\t\thp = _has_probability\n\n\t\tself.assertEquals(False, hp('word'), msg=\"_has_probability('word') : Failed\")\n\t\tself.assertEquals(False, hp(('word', '0.5')), msg=\"_has_probability(('word', '0.5')) : Failed\")\n\t\tself.assertEquals(True, hp(('word', 0.5)), msg=\"_has_probability(('word', 0.5)) : Failed\")\n\t\tself.assertEquals(True, hp(['word', 0.5]), msg=\"_has_probability(['word', 0.5]) : Failed\")\n\n\n\tdef test_zip_with_probabilities(self):\n\t\tprint(\"Testing zip_with_probabilities()\")\n\t\tzp = zip_with_probabilities\n\n\t\tself.assertEquals(\n\t\t\t[('a', .25), ('b', .25), ('c', .25), ('d', .25)],\n\t\t\tzp(['a', 'b', 'c', 'd']),\n\t\t\tmsg=\"zip_with_probabilities(['a', 'b', 'c', 'd']) : Failed\")\n\n\t\tself.assertEquals(\n\t\t\t[('a', .25), ('b', .25), ('c', .5)],\n\t\t\tzp([('a', .25), ('b', .25), 'c']),\n\t\t\tmsg=\"zip_with_probabilities([('a', .25), ('b', .25), 'c']) : Failed\")\n\n\t\tself.assertEquals(\n\t\t\t[('a', .25), ('b', .25), ('c', .5)],\n\t\t\tzp([('a', .25), ('b', .25), 'c']),\n\t\t\tmsg=\"zip_with_probabilities([('a', .25), ('b', .25), 'c']) : Failed\")\n\n\t\tself.assertEquals(\n\t\t\t[('a', .25), ('b', .25), ('c', .5)],\n\t\t\tzp([('a', .25), ('b', .25), ('c', .5)]),\n\t\t\tmsg=\"zip_with_probabilities([('a', .25), ('b', .25), ('c', .5)]) : Failed\")\n\n\t\tself.assertEquals(\n\t\t\t[],\n\t\t\tzp([]),\n\t\t\tmsg=\"zip_with_probabilities([]) : Failed\")\n\n\n\n\tdef test_weighted_random(self):\n\t\tprint(\"Testing weighted_random() (this one takes a long time, it's OK)\")\n\n\t\t# assert that ValueError is raised when trying to choose from empty list\n\t\tself.assertRaises(\n\t\t\tValueError,\n\t\t\tweighted_random,\n\t\t\t[]\n\t\t\t)\t\t\n\n\t\ttypes = [('a', 0.1), ('b', 0.1), ('c', 0.1), ('d', 0.1), ('e', 0.6)]\n\n\t\tn_trials = 250000\n\t\tfreqs = {'a':0, 'b':0, 'c':0, 'd':0, 'e':0}\n\n\t\tfor _ in range(n_trials):\n\t\t\tfreqs[weighted_random(types)] += 1\n\n\t\tfreqs = {t: round(freqs[t] / (1.*n_trials), 2) for t in freqs}\n\n\t\tself.assertEquals(\n\t\t\tset(freqs.items()),\n\t\t\tset(types),\n\t\t\tmsg=\"weighted_random gives wrong frequencies of choices\"\n\t\t\t)","sub_path":"tests/utils_tests.py","file_name":"utils_tests.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"45601718","text":"#for creating a histogram that depicts the 50 most common words in the supplied text file\r\nfrom collections import Counter\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nwords = open('tweets.txt', 'r').read()\r\nwords = words.split()\r\nword_count = map(lambda w:(w, words.count(w)),set(words))\r\nbow = dict(word_count)\r\n\r\n\r\nbow_cnt = Counter(bow)\r\nsorted_list = dict(bow_cnt.most_common(50))\r\n\r\n#print(sorted_list)\r\n\r\nlabels = list(sorted_list.keys())\r\nvalues = list(sorted_list.values())\r\n#print(labels)\r\n#print(values)\r\n\r\nindexes = np.arange(len(labels))\r\nwidth = 1\r\nplt.bar(indexes, values, 1)\r\nplt.xticks(indexes, labels, rotation = 'vertical')\r\nplt.show()\r\n\r\n\r\n","sub_path":"most_freq_plt.py","file_name":"most_freq_plt.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"617718154","text":"#monisha\nx1=int(input())\ny=input().split()\ny.sort()\nzu=0\nza=len(y)\nwhile(za>0):\n\tr=int(y[za-1])\n\tzu=zu*10+r\n\tza-=1\nprint(zu)\n","sub_path":"hun1a2.py","file_name":"hun1a2.py","file_ext":"py","file_size_in_byte":125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"391460459","text":"#Write a program that asks the user for one or more sentences and\n#then lets the user know if it is a palindrome.\nimport re\n\ndef raw_sentence():\n \"\"\"Returns exact user input\"\"\"\n return input(\"\\nGive me a sentence and I will tell you if it is a palindrome \\n >\" )\n\n\ndef strip_sentence(raw_sentence):\n \"\"\"Gets user sentence, returns without spaces and punctuation\"\"\"\n stripped_sentence = re.sub(r'[^A-Za-z]', \"\", raw_sentence.lower())\n return stripped_sentence\n\ndef is_palindrome(stripped_sentence):\n if (stripped_sentence):\n if (stripped_sentence[0] == stripped_sentence[-1]):\n result = is_palindrome(stripped_sentence[1:-1])\n print(stripped_sentence[1:-1])\n print(\"is a palindrome\")\n if result:\n return True\n else:\n return True\n return False\n\nif __name__ == \"__main__\":\n answer = is_palindrome(strip_sentence(raw_sentence()))\n print(\"ANSWER:\")\n if not answer:\n print(\"is not a palindrome\")\n else:\n print(\"is a palindrome\")","sub_path":"palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"615759536","text":"# -*- coding: utf-8 -*-\n'''Dork game main module'''\n\n__version__ = '0.1.0'\n__author__ = \", \".join([\n \"Luke Smith\",\n \"Casey Jones\",\n \"Gregory Dews\",\n \"Derek Haviland\",\n \"Edwin Hernandez Sanchez\",\n \"Matt Hurt\",\n \"David Kittleso\",\n \"Daniel L'Episcopo\"\n])\n","sub_path":"dork/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"306034637","text":"import numpy as np\nimport keras\nimport keras.backend as K\nfrom keras import optimizers\nfrom keras.models import Sequential\nfrom keras.layers import Dropout, Flatten, Dense,LeakyReLU,BatchNormalization\n\n\nx_train=np.load('/content/drive/My Drive/GreenDeck/15k_data_cnn_features.npy')\ny_train=np.load('/content/drive/My Drive/GreenDeck/15k_data_cnn_feature_asins.npy')\n\n\n\n\nclass Custom_lr(keras.callbacks.Callback):\n\n def on_train_begin(self, logs={}):\n\t K.set_value(self.model.optimizer.lr, 0.001)\n\n def on_epoch_begin(self, epoch, logs={}):\n lr_present=K.get_value(self.model.optimizer.lr)\n #print(epoch)\n if (epoch%10==0) and epoch:\n\n K.set_value(self.model.optimizer.lr, lr_present/((epoch)**0.5))\n print(K.get_value(self.model.optimizer.lr))\n print(lr_present/((epoch)**0.5))\n\ntop_model=Sequential()\ntop_model.add(Flatten(input_shape=xtrain.shape[1:]))\ntop_model.add(Dense(256, activation='relu'))\ntop_model.add(BatchNormalization())\ntop_model.add(Dropout(0.5))\ntop_model.add(Dense(32, activation='relu'))\ntop_model.add(BatchNormalization())\ntop_model.add(Dropout(0.5))\n# top_model.add(BatchNormalization())\ntop_model.add(Dense(17, activation='softmax'))\n\ntop_model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\ncallbacks = [\n\t Custom_lr()\n\t]\ncheckpoint = ModelCheckpoint(\"./bottleneck_vgg16_model.h5\",\n monitor=\"val_loss\",\n mode=\"min\",\n save_best_only = True,\n verbose=1)\nearlystop = EarlyStopping(monitor = 'val_loss',\n mode=\"min\",\n min_delta = 0,\n patience = 5,\n verbose = 1,\n restore_best_weights = True)\n\nreduce_lr = ReduceLROnPlateau(monitor = 'val_loss', factor = 0.2, patience = 2,verbose = 1, min_delta = 0.0001)\n\n\n\ntop_model.fit(xtrain, ytrain,\n epochs=30,\n batch_size=32,\n validation_data=(xtest, ytest), callbacks=callbacks)\n\nscore = top_model.evaluate(xtest,ytest)\nprint(score)\n","sub_path":"bottleneck_feature/train_bottleneck.py","file_name":"train_bottleneck.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"353558478","text":"import tensorflow as tf\nimport numpy as np\n\n# H(x) (Hypothesis) = W (weight) * x + b (bias)\n\n\n# 만약 변수가 3개라면?\n# H(x1,x2,x3) = w1x1 + w2x2 + w3x3 + b\n# Matrix = w1x1 + w2x2 + w3x3 + ... + wnxn\n# 변수가 많아지면 길게 계속 늘어뜨리기 번거롭다.\n# 그러므로, Matrix를 사용한다.\n# [ x1 x2 x3 .... xn] x [w1 w2 w3 ... wn]^(T) = (x1w1 + x2w2 + ... + xnwn)\n# Matrix 를 쓸때는 x를 앞에다가 쓴다. \n# H(X) = X*W\n# X와 W가 대문자라는 건 Matrix라는 암시.\n\n# row가 하나의 인스턴스\n# ( x11 x12 x13 ) = ( x11w1 + x12w2 + x13w3 )\n# ( x21 x22 x23 ) * ( w1 ) = ( x21w1 + x22w2 + x23w3 )\n# ( x31 x32 x33 ) * ( w2 ) = ( x31w1 + x32w2 + x33w3 )\n# ( x41 x42 x43 ) * ( w3 ) = ( x41w1 + x42w2 + x43w3 )\n# ( x51 x52 x53 ) = ( x51w1 + x52w2 + x53w3 )\n# [ 5 , 3] * [ 3 , 1 ] = [ 5 , 1 ]\n# H(X) = XW\n# W의 크기는 어떻게 결정? -> x의 요소개수인 3받아서 y의 요소개수인 1\n# n 은 인스턴스 개수\n# [ n , x] * [ x , y ] = [ n , y ]\n\n# Lecture(theory):\n # H(x) = Wx + b\n\n# Implementation(TensorFlow):\n # H(x) = XW \n\nx1_data = [73.,93.,89.,96.,73.]\nx2_data = [80.,88.,91.,98.,66.]\nx3_data = [75.,93.,90.,100.,70.]\ny_data = [152.,185.,180.,196.,142.]\n\n# 만일 x가 100개가 넘어가면 힘들다. 그러므로 이제 이 표현은 안하고, 매트릭스를 사용할 것이다.\n\n# placeholders for a tensor taht will be always fed.\nx1 = tf.placeholder(tf.float32)\nx2 = tf.placeholder(tf.float32)\nx3 = tf.placeholder(tf.float32)\n\nY = tf.placeholder(tf.float32)\n\nw1 = tf.Variable(tf.random_normal([1]),name = 'weight1')\nw2 = tf.Variable(tf.random_normal([1]),name = 'weight2')\nw3 = tf.Variable(tf.random_normal([1]),name = 'weight3')\nb = tf.Variable(tf.random_normal([1]),name = 'bias')\n\nhypothesis = x1*w1 + x2*w2 + x3*w3 + b\n\n#cost/loss function\ncost = tf.reduce_mean(tf.square(hypothesis - Y))\n# Minimize. Need a very small learning rate for this data set\noptimizer = tf.train.GradientDescentOptimizer(learning_rate = 1e-5)\ntrain = optimizer.minimize(cost)\n\n#Launch the graph in a session.\nsess = tf.Session()\n#Initializes global variables in the graph.\nsess.run(tf.global_variables_initializer())\nfor step in range(2001):\n cost_val, hy_val, _ = sess.run([cost,hypothesis,train],feed_dict={x1:x1_data,x2:x2_data,x3:x3_data,Y:y_data})\n\n if step % 10 == 0:\n print(\"step:\",step, \"Cost:\",cost_val, \"\\nPrediction:\",hy_val)\n","sub_path":"practice/5_MultivariableLinearRegression.py","file_name":"5_MultivariableLinearRegression.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"97872293","text":"from . import plugin\n\nfrom flask_login import current_user, login_required\nfrom flask import render_template, request, Response\n\nfrom wtforms import StringField, FloatField\nfrom flask_wtf import FlaskForm\nfrom flask_wtf.file import FileField, FileRequired\nfrom wtforms.widgets import TextArea\n\nfrom wtforms.validators import DataRequired, URL\n\nfrom security.admin import is_admin\nfrom security.request import send_get_request, send_post_request, send_put_request\n\nimport os, json\n\nclass PluginForm(FlaskForm):\n video_url = StringField('Video URL', validators=[DataRequired()])\n image_urls = StringField('Image URLs', validators=[DataRequired()])\n\n name = StringField('Name', validators=[DataRequired()])\n\n price = StringField('Price', validators=[DataRequired()])\n\n description = StringField('Description', validators=[DataRequired()], widget=TextArea())\n\n plugin_file = FileField()\n\n@plugin.route('/plugin/add', methods=['GET', 'POST'])\n@login_required\n@is_admin\ndef plugin_add():\n\n form = PluginForm()\n\n if form.validate_on_submit():\n\n form_json = {\n\n 'video_url': form.video_url.data,\n 'image_urls': form.image_urls.data,\n 'name': form.name.data,\n 'price': form.price.data,\n 'description': form.description.data,\n 'path': 'files/{}'.format(form.plugin_file.data.filename),\n }\n\n resp = send_post_request({}, json.dumps(form_json), 'plugin/add')\n\n if resp.status_code == 201:\n form.plugin_file.data.save('files/{}'.format(form.plugin_file.data.filename))\n return 'Plugin added'\n else:\n return 'Failed to add plugin'\n\n return render_template('plugin/form.html', form=form, method='add')\n\n@plugin.route('/plugin/edit/', methods=['POST', 'GET'])\n@login_required\n@is_admin\ndef plugin_edit(plugin_id):\n resp = send_get_request({'plugin_id': plugin_id}, 'plugin/get')\n\n if resp.status_code != 200:\n return Response(status=resp.status_code)\n\n plugin_data = resp.json()\n\n form = PluginForm()\n\n if form.validate_on_submit():\n form_json = {\n 'id': plugin_id,\n 'video_url': form.video_url.data,\n 'image_urls': form.image_urls.data,\n 'name': form.name.data,\n 'price': form.price.data,\n 'description': form.description.data\n }\n\n if form.plugin_file.data != None:\n\n if os.path.isfile(plugin_data['path']):\n os.remove(plugin_data['path'])\n\n form_json['path'] = 'files/{}'.format(form.plugin_file.data.filename)\n form.plugin_file.data.save('files/{}'.format(form.plugin_file.data.filename))\n\n resp = send_put_request({}, json.dumps(form_json), 'plugin/edit')\n\n if resp.status_code == 200:\n return 'Plugin edited'\n\n return 'Plugin edit failed'\n\n form.description.data = plugin_data['description']\n form.video_url.data = plugin_data['video_url']\n form.image_urls.data = plugin_data['image_urls']\n form.price.data = plugin_data['price']\n form.name.data = plugin_data['name']\n\n return render_template('plugin/form.html', form=form, method=\"edit/{}\".format(plugin_id))\n","sub_path":"plugin/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"411003176","text":"from flask import request, jsonify\nimport sqlite3\n\n\ndef add_car():\n username = request.json['username']\n car_number = request.json['carNumber']\n db = sqlite3.connect('db.db')\n cur = db.cursor()\n try:\n cur.execute('INSERT INTO Cars (username,carNumber,isInside) VALUES (?,?,?)', (username, car_number, 0))\n db.commit()\n except Exception as e:\n print(e)\n return jsonify({'message': f'Car number {car_number} is alreday exist',\n 'status': 'failed'})\n finally:\n db.close()\n return jsonify({'message': 'car added',\n 'status': 'successful',\n 'carNumber': car_number,\n 'username': username})\n","sub_path":"api/add_car.py","file_name":"add_car.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"459755789","text":"# Defining the structure and operations which Operators can be built from.\n\nimport numpy as np\nfrom numpy.linalg import norm\nimport cmath\nimport matplotlib.pyplot as plt\ntry:\n from src.sparse_matrix import SparseMatrix\n from src.quantum_register import QuantumRegister\nexcept:\n from sparse_matrix import SparseMatrix\n from quantum_register import QuantumRegister\n\nclass Operator(SparseMatrix):\n\n \"\"\"\n Operator class inherits from SparceMatrix\n \"\"\"\n def __init__(self, n_qubits : int=1, base = np.zeros((2,2))):\n\n \"\"\"\n Class constructor\n\n Inputs:\n n_qubits : Number of qubits the oparator acts on\n base : Matrix representation for the operators\n \"\"\"\n\n if n_qubits <= 0 :\n raise ValueError('Operator must operate on at least 1 qubit!')\n self.n_qubits = n_qubits\n self.size = 2 ** n_qubits\n if self.size < len(base):\n raise ValueError(\"Operator cannot act on the specified number\" +\n \"of qubits.\")\n act_qubits = int(np.log2(len(base)))\n base_matrix = SparseMatrix(*[len(base)]*2)\n for i in range(0, len(base)):\n for j in range(0, len(base)):\n if base[i][j] != 0:\n base_matrix.setElement(i, j, complex(base[i][j]))\n else:\n continue\n for i in range(0, n_qubits, act_qubits):\n if i == 0:\n result = base_matrix\n continue\n result = result.outerProduct(base_matrix)\n super(Operator, self).__init__(self.size,self.size)\n self.matrix = result.matrix\n\n def __mul__(self, rhs):\n \"\"\"\n :return: (QuantumRegister / Operator) Inner product result. Return type\n depends on the type of the input rhs.\n \"\"\"\n if isinstance(rhs, QuantumRegister):\n result = QuantumRegister(n_qubits = self.n_qubits)\n elif isinstance(rhs, Operator):\n result = Operator(n_qubits = self.n_qubits)\n else :\n \" Raise type error if the right type isn't provided\"\n raise TypeError('Multiplication not defined for Operator' +\n ' and {}.'.format(type(rhs)))\n if rhs.n_qubits != self.n_qubits:\n raise ValueError(\n 'Number of states do not correspnd: rhs.n_qubits = {},' +\n ' lhs.n_qubits = {}'.format(rhs.n_qubits, self.n_qubits))\n result.matrix = self.innerProduct(rhs).matrix\n return result\n\n def __mod__(self, rhs):\n \"\"\"\n Calculates the outer product betweeen its self and rhs.\n :param: (Operator) rhs.\n :return: (Operator) Outer product result.\n \"\"\"\n if isinstance(rhs, Operator):\n result = Operator(self.n_qubits + rhs.n_qubits)\n result.matrix = self.outerProduct(rhs).matrix\n return result\n else:\n raise TypeError('Operation not defined between operator and ' +\n '{}.'.format(type(rhs)))\n","sub_path":"QCP-Group-1-2019/src/quantum_operator.py","file_name":"quantum_operator.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"333912010","text":"'''\r\nCreated on Mar 30, 2016\r\n@author: anup\r\n'''\r\n\r\nimport re\r\nimport nltk\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.tokenize import sent_tokenize\r\nfrom collections import Counter\r\n\r\nclass Document(object):\r\n def __init__(self, topic, title, doc):\r\n \"\"\"Initial the Document object with tokens and topics\r\n \"\"\"\r\n if topic:\r\n self.terms = Counter()\r\n stopwrd = stopwords.words('english')\r\n self.topic = topic\r\n self.title = self.tokenize(title,stopwrd)\r\n self.tokens = self.tokenize(doc,stopwrd)\r\n \r\n def document_terms(self):\r\n return self.terms\r\n \r\n def tokenize(self,doc,stopwrd):\r\n \"\"\"Tokenize using whitespace and words only\"\"\"\r\n \r\n result = []\r\n document = doc\r\n if type(doc) is list:\r\n document = '.'.join(doc)\r\n \r\n for sent in sent_tokenize(document):\r\n sent = sent.strip('.')\r\n for token in nltk.word_tokenize(sent):\r\n token = token.lower().strip('.').strip('\\'')\r\n if token not in stopwrd and len(token) > 2 and re.match('^[0-9]*[a-zA-Z]+[0-9]*$', token):\r\n self.terms[token] += 1\r\n result.append(token)\r\n return result\r\n","sub_path":"Cluster/Document.py","file_name":"Document.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"48147273","text":"# -*- coding: utf-8 -*-\nimport urwid, sys\nfrom programs import Program\n\n\ndef get_status():\n status = \"Status: \"\n if Program.running:\n e = Program.running.error()\n if e:\n status += \"ERROR\"\n else:\n status += \"RUNNING\"\n status += \" (\" + Program.running.__class__.__name__ + \") \"\n if e:\n status += \"\\n\" + str(e) + \" (\" + e.__class__.__name__ + \") \"\n else:\n status += \"OFF\"\n\n return status\n\nstatus = urwid.Text(get_status())\n\ndef menu(choices):\n body = [urwid.Divider(\"-\"), urwid.Text(\"ANGEZEIGE\", align='center'), urwid.Divider(\"-\"), status, urwid.Divider(\"-\"), urwid.Text(\"Choose Program:\")]\n for choice in choices:\n button = urwid.Button(choice)\n urwid.connect_signal(button, 'click', item_chosen, user_args = [choice])\n body.append(urwid.AttrMap(button, None, focus_map='reversed'))\n body.append(urwid.Divider(\"-\"))\n button = urwid.Button(\"EXIT\")\n urwid.connect_signal(button, 'click', exit_application)\n body.append(urwid.AttrMap(button, None, focus_map='reversed'))\n \n return urwid.ListBox(urwid.SimpleFocusListWalker(body))\n\ndef item_chosen(choice, button):\n body = [urwid.Divider(\"-\"), urwid.Text(choice, align='center'), urwid.Divider(\"-\"), urwid.Text(\"Parameters:\")]\n\n params = {}\n for p, v in Program.getPromotedPrograms()[choice].getParams().items():\n #body.append(urwid.Text())\n edit = urwid.Edit(caption = u\"▸ \" + p.title() + \": \", edit_text = v)\n body.append(urwid.AttrMap(edit, None, focus_map='reversed'))\n #body.append(urwid.Divider())\n params[p] = edit\n\n body.append(urwid.Divider(\"-\"))\n\n ok = urwid.Button(u'Ok')\n back = urwid.Button(u'Back')\n \n urwid.connect_signal(ok, 'click', start_program, user_args = [choice, params])\n urwid.connect_signal(back, 'click', show_menu)\n \n tOk = urwid.AttrMap(ok, None, focus_map='reversed')\n \n body.append(tOk)\n body.append(urwid.AttrMap(back, None, focus_map='reversed'))\n\n mainWidget.original_widget = urwid.Filler(urwid.Pile(body, focus_item=tOk))\n\ndef start_program(choice, params, button):\n cParams = {}\n for p in params:\n cParams[p] = params[p].get_edit_text()\n\n if Program.running:\n Program.running.stop()\n Program.running.join()\n\n p = Program.getPromotedPrograms()[choice](**cParams)\n p.start()\n show_menu()\n\ndef exit_application(button):\n raise urwid.ExitMainLoop()\n\ndef show_menu(button = None):\n mainWidget.original_widget = listMenu\n \ndef get_info(mainLoop, data):\n status.set_text(get_status())\n mainLoop.set_alarm_in(1, get_info)\n\ndef choose():\n top = urwid.Overlay(mainWidget, urwid.SolidFill(u'\\N{MEDIUM SHADE}'),\n align='center', width=('relative', 60),\n valign='middle', height=('relative', 60),\n min_width=20, min_height=9)\n show_menu()\n mainLoop = urwid.MainLoop(top, palette=[('reversed', 'standout', '')])\n mainLoop.set_alarm_in(0, get_info)\n mainLoop.run()\n\n\nlistMenu = menu(Program.promotedPrograms.keys())\nmainWidget = urwid.Padding(None, left=1, right=1)\n","sub_path":"chooser.py","file_name":"chooser.py","file_ext":"py","file_size_in_byte":3134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"220953475","text":"# Function to rename multiple files\r\nimport os\r\ndef main():\r\n i = 1704\r\n path=\"C:/Users/bkauy/OneDrive/Desktop/241_Final_Project/videos_and_frames/cleaned_individual_disengaged/\"\r\n for filename in os.listdir(path):\r\n my_dest =\"example\" + str(i) + \".jpg\"\r\n my_source = path + filename\r\n my_dest = path + my_dest\r\n # rename() function will\r\n # rename all the files\r\n os.rename(my_source, my_dest)\r\n i += 1\r\n# Driver Code\r\nif __name__ == '__main__':\r\n # Calling main() function\r\n main()\r\n\r\n\r\n# path = working_dir + '/' + the_relative_path_you_gave_me\r\n#\r\n# working_dir = \"c:/users/bkauyeung/engagement_app\"\r\n#\r\n# path = 'c:/users/bkauyeung/engagement_app/Users\\bkauy\\engagement_app\\app\\static\\img\\photos'\r\n","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"266939858","text":"# leetcode #2\n# Runtime: 64 ms, faster than 94.33% of Python\n# online submissions for Add Two Numbers.\n\n'''\nYou are given two non-empty linked lists representing two non-negative\nintegers. The digits are stored in reverse order and each of their nodes\ncontain a single digit. Add the two numbers and return it as a linked list.\n\nYou may assume the two numbers do not contain any leading zero, except the\nnumber 0 itself.\n\nExample:\n\nInput: (2 -> 4 -> 3) + (5 -> 6 -> 4)\nOutput: 7 -> 0 -> 8\nExplanation: 342 + 465 = 807.\n'''\n\n# Definition for singly-linked list.\n\n\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n\n head = ListNode(0)\n temp = head\n carry = 0\n while (l1 is not None) or (l2 is not None):\n if l1 is None:\n i = 0\n else:\n i = l1.val\n if l2 is None:\n j = 0\n else:\n j = l2.val\n sum = carry + i + j\n total = sum % 10\n carry = sum // 10\n\n temp.next = ListNode(total)\n temp = temp.next\n\n if (l1 is not None):\n l1 = l1.next\n if (l2 is not None):\n l2 = l2.next\n if carry > 0:\n temp.next = ListNode(carry)\n return head.next\n\n\ntest = Solution()\n\nlistOne = ListNode(2)\nlistOne.next = ListNode(4)\nlistOne.next.next = ListNode(3)\n\nlistTwo = ListNode(5)\nlistTwo.next = ListNode(6)\nlistTwo.next.next = ListNode(4)\n\ntest.addTwoNumbers(listOne, listTwo)\n","sub_path":"interview-questions/add-two-numbers/addtwo.py","file_name":"addtwo.py","file_ext":"py","file_size_in_byte":1727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"138784821","text":"from google_helpers.sheets_attendance import (\n get_sheet_as_df,\n MORNING,\n COL_EMAIL,\n COL_TIMESTAMP,\n)\nfrom attendance.models import MorningAttendance\nfrom django.contrib.auth import get_user_model\nfrom django.core.management.base import BaseCommand, CommandError\nfrom datetime import datetime, timedelta\n\nUser = get_user_model()\n\n\ndef save_row(row):\n # print(f\"morning - {row[COL_EMAIL]}\")\n user, _ = User.objects.get_or_create(email=row[COL_EMAIL])\n MorningAttendance.objects.get_or_create(\n user=user,\n # timestamp=row[COL_TIMESTAMP],\n date=row[COL_TIMESTAMP].date(),\n defaults={\n \"plan_of_action\": row[\"plan_of_action\"],\n \"problems_forseen\": row[\"problems_forseen\"],\n \"requests\": row[\"requests\"],\n \"late_reason\": row[\"late_reason\"],\n \"score\": row[\"score\"],\n \"timestamp\": row[COL_TIMESTAMP],\n },\n )\n\n\ndef pull_morning_attendance(days):\n df = get_sheet_as_df(MORNING)\n df = df[df[\"Timestamp\"].dt.date == datetime.now().date() - timedelta(days=days)]\n df.apply(save_row, axis=1)\n print(df.head())\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\"days\", type=int, nargs=\"?\", default=0)\n\n def handle(self, *args, **options):\n pull_morning_attendance(options[\"days\"])\n","sub_path":"backend/attendance/management/commands/pull_morning_attendance.py","file_name":"pull_morning_attendance.py","file_ext":"py","file_size_in_byte":1362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"496120444","text":"# Functions for editing config files\nimport os, random\n# import config\nimport re\n\ndef parse_line(line, delim=':', sep='\"'):\n comps = line.split(delim)\n if len(comps) == 2:\n arg, val = [x.strip().strip(',').strip(sep) for x in comps]\n return arg, val\n else:\n return None\n\ndef modify(filename, args_dict={}, ext='json', **kwargs):\n\n delims = {\n 'json': ':',\n 'conf': '=',\n 'yml': ':',\n 'html': '='}\n seps = {\n 'json': '\"',\n 'conf': '',\n 'yml' : '',\n 'html': '\"'}\n\n with open(filename, 'r') as f:\n original = f.readlines()\n to_change = {**args_dict, **kwargs}\n arg_list = list(to_change.keys())\n for i, line in enumerate(original):\n parsed = parse_line(line, delim=delims[ext], sep=seps[ext])\n if parsed is not None:\n arg, val = parsed\n if arg in arg_list:\n original[i] = line.replace(val, str(to_change[arg]))\n print('{}: \"{}\" | {} ==> {}'.format(\n filename, arg, val, to_change[arg]))\n f = open(filename, 'w')\n f.writelines(original)\n f.close()\n\ndef count_cols(filename):\n with open(filename, 'r') as f:\n text = f.read()\n codes = re.findall('^#(([0-9a-fA-F]{2}){3}|([0-9a-fA-F]){3})$', text)\n print(text)\n print(codes)\n\n\ndef replace_color(filename, new):\n with open(filename, 'r') as f:\n original = f.read()\n # print(original)\n ind = original.find('\"#') + 2\n print(ind)\n print(original[ind:ind+6])\n old = original[ind:ind+6]\n print(old)\n original = original.replace(old, new)\n # original[ind:ind+6] = new\n f = open(filename, 'w')\n f.writelines(original)\n f.close()\n\n\n\nif __name__ == \"__main__\":\n # modify('tips.md', test=50)\n import json\n # modify('tips.md', ext='conf', test=60)\n # modify('json_test.json', args_dict={\"workbench.colorTheme\": \"Eva Dark Bold\"})\n # filename = '/home/andrius/.config/Code/User/settings.json'\n # themef = '/home/andrius/.vscode/extensions/fisheva.eva-theme-0.7.9/themes/Eva-Dark.json'\n # themes = [\"Material Theme\", 'Eva Dark Bold', 'Monokai']\n # modify(filename=filename, args_dict={\"workbench.colorTheme\": random.sample(themes, 1)[0]})\n # js = json.load(open('json_test.json'))\n # print(js)\n # modify('Classifier.svg', ext='html', fill='#000000')\n import glob\n ls = glob.glob(os.path.abspath(os.curdir)+'/*.svg')\n print(ls)\n # F90000\n col = 'F90000'\n [replace_color(x, col) for x in ls]\n # replace_color('Classifier.svg', 'F90000')\n # count_cols(themef)","sub_path":"misc/icons/color.py","file_name":"color.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"434862214","text":"from django.urls import path\nfrom . import views\n\napp_name = 'music'\n\nurlpatterns = [\n path('result/', views.result, name='result'),\n\n #music/\n path('', views.IndexView.as_view(), name='Home'),\n\n #music/1\n path('/', views.DetailView.as_view(), name=\"Details\"),\n\n #music/album/add\n path('album/add', views.AlbumCreate.as_view(), name=\"album-add\"),\n\n #music/edit-album/1\n path('edit-album/', views.UpdateAlbumView.as_view(), name='update-album'),\n\n #music/album/1/delete\n path('album//delete', views.DeleteAlbumView.as_view(), name='delete-album'),\n\n #music/1/song/add\n path('/song/add', views.SongCreate.as_view(), name=\"song-add\"),\n\n #music/1/song/update/1/\n path('/song/update/', views.UpdateSong.as_view(), name=\"update-song\"),\n\n #music/1/song/delete/1\n path('/song/delete/', views.DeleteSong.as_view(), name=\"delete-song\"),\n\n path('/favourite/song/', views.favourite_song, name=\"favourite-song\")\n]\n\n","sub_path":"website/music/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"244218599","text":"from kivy.uix.textinput import TextInput\nfrom kivy.uix.stacklayout import StackLayout\n\nfrom widgetpresets import *\nfrom robotclass import *\nimport sqlite3\n\nclass TeleopLayout(StackLayout):\n def __init__(self, screenSwitcher):\n self.switcher = screenSwitcher\n super(TeleopLayout, self).__init__()\n\n def display(self):\n displist = []\n\n # displays cubes in switch\n switchDisp = quarterLabel(\"Cubes put in switch:\\n\\n\" + str(self.switcher.robot.switch), seaFoamGreen)\n displist.append(switchDisp)\n # displays team number\n teamDisp = quarterLabel(\"Team: \" + str(self.switcher.robot.teamNumber), black)\n displist.append(teamDisp)\n # displays event name\n eventDisp = quarterLabel(\"Event: \" + self.switcher.robot.eventName, black)\n displist.append(eventDisp)\n # \"climbed\" button for climb options\n climb1Color = darkMagenta if self.switcher.robot.climb == \"climbed\" else lightMagenta # darkening the currently selected climb option\n climbButton1 = eighthButton(\"Robot\\nclimbed\\nsuccessfully\", climb1Color)\n climbButton1.bind(on_release=lambda x: self.changeClimb(\"climbed\"))\n displist.append(climbButton1)\n # \"tried but failed\" button for climb options\n climb2Color = darkMagenta if self.switcher.robot.climb == \"tried but failed\" else lightMagenta # darkening the currently selected climb option\n climbButton2 = eighthButton(\"Robot\\nattempted to\\nclimb but\\nfailed\", climb2Color)\n climbButton2.bind(on_release=lambda x: self.changeClimb(\"tried but failed\"))\n displist.append(climbButton2)\n\n # decrement switchDisp\n switchDec = eighthButton(\"-\", seaFoamGreen)\n switchDec.bind(on_release=lambda x: self.changeSwitch(-1))\n displist.append(switchDec)\n # increment switchDisp\n switchInc = eighthButton(\"+\", seaFoamGreen)\n switchInc.bind(on_release=lambda x: self.changeSwitch(1))\n displist.append(switchInc)\n # menu button\n menuButton = quarterButton(\"Menu\")\n menuButton.bind(on_release=lambda x: self.switcher.switch(\"menu\"))\n displist.append(menuButton)\n # displays scouter name\n scouterDisp = quarterLabel(\"Scouter: \" + self.switcher.robot.scouter, black)\n displist.append(scouterDisp)\n # \"levitated\" button for climb options\n climb3Color = darkMagenta if self.switcher.robot.climb == \"levitated\" else lightMagenta # darkening the currently selected climb option\n climbButton3 = eighthButton(\"Robot\\nlevitated\", climb3Color)\n climbButton3.bind(on_release=lambda x: self.changeClimb(\"levitated\"))\n displist.append(climbButton3)\n # \"did not climb\" button for climb options\n climb4Color = darkMagenta if self.switcher.robot.climb == \"did not climb\" else lightMagenta # darkening the currently selected climb option\n climbButton4 = eighthButton(\"Robot did\\nnot climb\", climb4Color)\n climbButton4.bind(on_release=lambda x: self.changeClimb(\"did not climb\"))\n displist.append(climbButton4)\n\n # scale display\n scaleLayout = StackLayout(size_hint=(.25, .5)) # smaller layout to get around larger widgets in the same line (notesTextInput)\n displist.append(scaleLayout)\n scaleDisp = fullLabel(\"Cubes put in scale:\\n\\n\" + str(self.switcher.robot.scale), fairBlue)\n scaleLayout.add_widget(scaleDisp)\n # input for notes\n notesTextInput = TextInput(size_hint=(.5, .5))\n displist.append(notesTextInput)\n # displays cubes in exchange\n exchangeLayout = StackLayout(size_hint=(.25, .5))\n displist.append(exchangeLayout)\n exchangeDisp = fullLabel(\"Cubes put in exchange:\\n\\n\" + str(self.switcher.robot.exchange), lightOrange)\n exchangeLayout.add_widget(exchangeDisp)\n\n # decrement scaleDisp\n scaleDec = halfButton(\"-\", fairBlue)\n scaleDec.bind(on_release=lambda x: self.changeScale(-1))\n scaleLayout.add_widget(scaleDec)\n # increment scaleDisp\n scaleInc = halfButton(\"+\", fairBlue)\n scaleInc.bind(on_release=lambda x: self.changeScale(1))\n scaleLayout.add_widget(scaleInc)\n # decrement exchangeDisp\n exchangeDec = halfButton(\"-\", lightOrange)\n exchangeDec.bind(on_release=lambda x: self.changeExchange(-1))\n exchangeLayout.add_widget(exchangeDec)\n # increment exchangeDisp\n exchangeInc = halfButton(\"+\", lightOrange)\n exchangeInc.bind(on_release=lambda x: self.changeExchange(1))\n exchangeLayout.add_widget(exchangeInc)\n\n self.clear_widgets()\n for widg in displist:\n self.add_widget(widg)\n\n def changeSwitch(self, change):\n self.switcher.robot.switch += change\n if self.switcher.robot.switch < 0:\n self.switcher.robot.switch = 0\n self.display()\n def changeScale(self, change):\n self.switcher.robot.scale += change\n if self.switcher.robot.scale < 0:\n self.switcher.robot.scale = 0\n self.display()\n def changeExchange(self, change):\n self.switcher.robot.exchange += change\n if self.switcher.robot.exchange < 0:\n self.switcher.robot.exchange = 0\n self.display()\n def changeClimb(self, change):\n self.switcher.robot.climb = change\n self.display()\n","sub_path":"buildbackup/PowerUpScouting/android/app/buildapp/android/app/teleopscreen.py","file_name":"teleopscreen.py","file_ext":"py","file_size_in_byte":5400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"282542096","text":"\"\"\"Scrapes images from Bing's image of the day\"\"\"\nimport os\nimport urllib\nimport urllib.request\nimport json\nfrom bs4 import BeautifulSoup\n\ndef scrape():\n download_images('https://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=8&mkt=en-US', 0)\n download_images('https://www.bing.com/HPImageArchive.aspx?format=js&idx=8&n=8&mkt=en-US', 1)\n\ndef download_images(url, imageStart):\n save_location = os.path.dirname(os.path.realpath(__file__)) + \"/scrape/\"\n if not os.path.exists(save_location):\n print(\"Making image directory...\")\n os.makedirs(save_location)\n \n website = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(website, \"html.parser\")\n raw_json = soup.get_text()\n formatted_json = json.loads(raw_json)\n num_items = len(formatted_json['images'])\n for j in range(imageStart, num_items):\n filename = formatted_json['images'][j]['url']\n parts = filename.split(\"/\")\n if os.path.isfile(save_location + parts[4]):\n print(\"Image '\" + save_location + parts[4] + \"' already exists.\")\n else:\n print(\"Image output to: \" + save_location + parts[4])\n urllib.request.urlretrieve(\"http://www.bing.com\" + formatted_json['images'][j]['url'], save_location + parts[4])\n\ndef main():\n \"\"\"Main entry point\"\"\"\n scrape()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"236844997","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage.feature import match_template\nfrom skimage import io\n\nimage = io.imread(\"Sprites/blobs.png\")\nblob = image[170:220, 75:130]\n\nresult = match_template(image, blob)\nij = np.unravel_index(np.argmax(result), result.shape)\nx, y = ij[::-1]\n\nfig = plt.figure(figsize=(8, 3))\nax1 = plt.subplot(1, 3, 1)\nax2 = plt.subplot(1, 3, 2, adjustable='box-forced')\nax3 = plt.subplot(1, 3, 3, sharex=ax2, sharey=ax2, adjustable='box-forced')\n\nax1.imshow(blob)\nax1.set_axis_off()\nax1.set_title('template')\n\nax2.imshow(image)\nax2.set_axis_off()\nax2.set_title('image')\n# highlight matched region\nhcoin, wcoin = coin.shape\nrect = plt.Rectangle((x, y), wblob, hblob, edgecolor='r', facecolor='none')\nax2.add_patch(rect)\n\nax3.imshow(result)\nax3.set_axis_off()\nax3.set_title('`match_template`\\nresult')\n# highlight matched region\nax3.autoscale(False)\nax3.plot(x, y, 'o', markeredgecolor='r', markerfacecolor='none', markersize=10)\n\nplt.show()\n","sub_path":"vision/blob2.py","file_name":"blob2.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"511142887","text":"ivan = {\n \"name\": \"ivan\",\n \"age\": 34,\n \"children\": [{\n \"name\": \"vasja\",\n \"age\": 12\n },\n {\n \"name\": \"petja\",\n \"age\": 10\n }]\n}\n\ndarja = {\n \"name\": \"darja\",\n \"age\": 41,\n \"children\": [{\n \"name\": \"kirill\",\n \"age\": 21\n },\n {\n \"name\": \"pavel\",\n \"age\": 15\n }]\n}\n\nemps = [ivan, darja]\n\ndef print_people_with_adult_children(people):\n for person in people:\n has_adult_children = False\n for child in person['children']:\n if child['age'] > 18:\n has_adult_children = True\n if has_adult_children:\n print(person['name'])\n\n\nif __name__ == '__main__':\n print_people_with_adult_children(emps)","sub_path":"lab1/dict_algs.py","file_name":"dict_algs.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"526894570","text":"#!/usr/bin/python3\n# *_* coding: utf-8 *_*\n# @Author: shengyang\n# @Email: samonsix@163.com\n# @IDE: PyCharm\n# @File: split_occ_clean_file.py\n# @Modify Time @Author @Version @Desciption\n# ---------------- ------- -------- -----------\n# 2019.11.14 11:44 shengyang v0.1 creation\n\nimport os\nimport os.path as osp\nimport shutil\n\n\ndef move(input_dir, occ_output_dir, clean_output_dir):\n for root_dir, sub_dir, file_list in os.walk(input_dir):\n for f in file_list:\n if \"_occ.jpg\" in f:\n ori_file_path = osp.join(root_dir, f)\n tar_file_path = ori_file_path.replace(input_dir, occ_output_dir)\n tar_file_path = tar_file_path.replace(\"_occ.jpg\", \".jpg\")\n tar_file_dir = osp.dirname(tar_file_path)\n if not osp.exists(tar_file_dir):\n os.makedirs(tar_file_dir)\n shutil.move(ori_file_path, tar_file_path)\n elif \".jpg\" in f:\n ori_file_path = osp.join(root_dir, f)\n tar_file_path = ori_file_path.replace(input_dir, clean_output_dir)\n tar_file_dir = osp.dirname(tar_file_path)\n if not osp.exists(tar_file_dir):\n os.makedirs(tar_file_dir)\n shutil.move(ori_file_path, tar_file_path)\n else:\n raise Exception(\"{} file error\".format(f))\n\n\nif __name__ == \"__main__\":\n move(input_dir=\"/home/shengyang/haige_dataset/face_occusion/subset_for_test/mask\",\n occ_output_dir=\"/home/shengyang/haige_dataset/face_occusion/subset_for_test/occ_mask\",\n clean_output_dir=\"/home/shengyang/haige_dataset/face_occusion/subset_for_test/clean_mask\",\n )\n","sub_path":"occlusion_face_recognition/split_occ_clean_file.py","file_name":"split_occ_clean_file.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"125338523","text":"import pandas as pd\nimport numpy as np\n\nfrom enum import Enum\n\n\nclass DataWrapper(object):\n \"\"\"Class to wrap the concatenated data.\n The different Mode settings change the output of test_data and train_data.\n\n The main advantage of this wrapper is, to write the cleaning, feature engineering and modelling code\n using ``DataWrapper.data``, ``DataWrapper.train_data`` and ``DataWrapper.test_data``.\n For validation and submission, only the ``Mode`` has to be changed, the remaining code remains as it is.\n\n Examples\n --------\n >>> d = DataWrapper(data_tr, data_te, DataWrapper.Mode.TRAIN)\n >>> d.train_data # data_tr\n >>> d.test_data # data_te\n\n >>> # data_su is not used in VALIDATION mode\n >>> d = DataWrapper(data_tr, data_te, data_va, data_su, DataWrapper.Mode.VALIDATE)\n >>> d.train_data # data_tr + data_te\n >>> d.test_data # data_va\n\n \"\"\"\n\n class Mode(Enum):\n TRAIN = 1\n VALIDATE = 2\n SUBMIT = 3\n\n def __init__(self, train, test, validate=None, submit=None, mode=Mode.TRAIN):\n self.__mode = mode\n\n if self.__mode == self.Mode.TRAIN:\n self.test_offset = len(train)\n data_used = [train, test]\n elif self.__mode == self.Mode.VALIDATE:\n self.test_offset = len(train) + len(test)\n data_used = [train, test, validate]\n elif self.__mode == self.Mode.SUBMIT:\n self.test_offset = len(train) + len(test) + len(validate)\n data_used = [train, test, validate, submit]\n else:\n raise Exception(\"Please select a predefined Mode from DataWrapper.Mode\")\n\n self.data = pd.concat(data_used, ignore_index=True)\n\n @property\n def train_data(self):\n return self.data[0:self.test_offset]\n\n @train_data.setter\n def train_data(self, d):\n if len(d) != self.test_offset:\n raise Exception(\"Data size does not match\")\n\n self.data[0:self.test_offset] = d\n\n @property\n def test_data(self):\n return self.data[self.test_offset:]\n\n @test_data.setter\n def test_data(self, d):\n if len(d) != len(self.data) - self.test_offset:\n raise Exception(\"Data size does not match\")\n\n self.data[self.test_offset:] = d\n","sub_path":"Instacart/AdVetter/src/data/data_wrapper.py","file_name":"data_wrapper.py","file_ext":"py","file_size_in_byte":2281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"9116481","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport os\nimport sys\nimport pickle\nimport torch.nn.utils\nimport torch.optim as optim\nimport torch.utils.data as data\nfrom tqdm.auto import tqdm\nfrom sklearn.metrics import r2_score, precision_score, recall_score, f1_score, confusion_matrix, accuracy_score\nimport itertools as it\nimport time\nimport numpy as np\nfrom seqeval.metrics import classification_report\n\nfrom tqdm.auto import tqdm\n\ndef load_emb(path, total=None):\n toks = []\n embs = []\n with open(path, 'r') as f:\n for l in tqdm(f, path, total=total):\n tok, *emb = l.strip().split()\n emb = [float(x) for x in emb]\n toks.append(tok)\n embs.append(emb)\n assert('PAD_TOK' not in toks and 'UNK_TOK' not in toks)\n toks += ['PAD_TOK', 'UNK_TOK']\n embs += [[0.]*len(emb), [0.]*len(emb)]\n tok_to_id = dict(zip(toks, it.count()))\n emb = torch.tensor(embs)\n return tok_to_id, emb\n\n# load characters from (training) data\ndef load_chrs(path, total=None):\n chars = set()\n with open(path, 'r') as f:\n for l in tqdm(f, path, total=total):\n try:\n for c in l.strip().split()[2]:\n chars.add(c)\n except:\n pass\n assert('PAD_CHR' not in chars and 'UNK_CHR' not in chars)\n chars = sorted(chars)\n chars.append('PAD_CHR')\n chars.append('UNK_CHR')\n return dict(zip(chars, it.count())) \n\ndef load_classes(path, total=None):\n id_to_lbl = set()\n with open(path, 'r') as f:\n for l in tqdm(f, path, total=total):\n try:\n id_to_lbl.add(l.strip().split()[3])\n except:\n pass\n assert('PAD_LBL' not in id_to_lbl)\n id_to_lbl = sorted(id_to_lbl)\n id_to_lbl.append('PAD_LBL')\n lbl_to_id = {k:v for v, k in enumerate(id_to_lbl)}\n return lbl_to_id, id_to_lbl\n \ndef load_data(path, tok_to_id, lbl_to_id, chr_to_id):\n with open(path, 'r') as f:\n seqs = f.read().split('\\n\\n')\n if not seqs[-1].strip():\n seqs.pop()\n if seqs[0][0] == '\\n':\n seqs[0] = seqs[0][1:]\n seqs = [l.split('\\n') for l in seqs]\n seq_len = max((len(seq) for seq in seqs))\n seqs = [[l.split(' ') for l in seq] for seq in seqs]\n wrd_len = max((max((len(cols[2]) for cols in seq)) for seq in seqs))\n W = torch.empty((len(seqs), seq_len, wrd_len), dtype=torch.long).fill_(chr_to_id['PAD_CHR'])\n X = torch.empty((len(seqs), seq_len), dtype=torch.long).fill_(tok_to_id['PAD_TOK'])\n Y = torch.empty((len(seqs), seq_len), dtype=torch.long).fill_(lbl_to_id['PAD_LBL'])\n for i, seq in enumerate(tqdm(seqs, 'sequences')):\n for j, cols in enumerate(seq):\n assert(j < seq_len)\n tok, _, wrd, lbl = cols\n for k, ch in enumerate(wrd):\n try:\n W[i,j,k] = chr_to_id[ch]\n except KeyError:\n W[i,j,k] = chr_to_id['UNK_CHR']\n try:\n X[i,j] = tok_to_id[tok]\n except KeyError:\n X[i,j] = tok_to_id['UNK_TOK'] \n Y[i,j] = lbl_to_id[lbl]\n return W, X, Y\n\nclass NERDataset(data.Dataset):\n def __init__(self, W, X, Y):\n self.W, self.X, self.Y = W, X, Y\n \n def __len__(self):\n return self.Y.shape[0]\n \n def __getitem__(self, i):\n return self.W[i], self.X[i], self.Y[i]\n\nclass LinearCRF(nn.Module):\n def __init__(self, input_size, hidden_size, lbl_to_id, lstm_model, Y=None, freeze=False):\n super().__init__()\n \n #self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True, bidirectional=False)\n self.lstm = lstm_model\n self.lbl_to_id = lbl_to_id\n self.lbl_to_id['STR_LBL'] = 18\n self.num_tags = 19 #Includes the 17 tags + start + end(PAD_TOK)\n #self.T.data[0:17,0:17] = torch.from_numpy(get_matrix().transpose())\n\n # Rishabh: initialization\n if Y is None:\n self.T = nn.Parameter(torch.randn(self.num_tags, self.num_tags))\n else:\n print('initializing self.T')\n Y = torch.cat((\n torch.empty(Y.shape[0], 1, dtype=torch.long).fill_(self.lbl_to_id['STR_LBL']),\n Y.cpu(),\n torch.empty(Y.shape[0], 1, dtype=torch.long).fill_(self.lbl_to_id['PAD_LBL'])\n ), dim=-1)\n Y = Y.cuda()\n A = torch.empty(self.num_tags, self.num_tags)\n for i in range(self.num_tags):\n for j in range(self.num_tags):\n A[i,j] = 1+torch.sum((Y[:,:-1] == i) & (Y[:,1:] == j)).item()\n #for i in tqdm(range(Y.shape[0]), 'sequences'):\n # for j in range(Y.shape[1]-1):\n # A[Y[i,j], Y[i,j+1]] += 1\n A /= torch.sum(A, dim=-1)[:,None]\n A = torch.log(A)\n self.T = nn.Parameter(A.transpose(0,1))\n if freeze:\n print('frozen transition matrix')\n self.T.requires_grad=False\n \n self.dropout = nn.Dropout(0.5)\n self.proj = nn.Sequential( nn.Linear(2 * input_size, input_size), \n nn.ReLU(),\n nn.Linear(input_size, 19))\n \n \n def forward(self, W, X):\n #X is of the shape (batch_size, seq_length, num_features)\n \n o = self.lstm(W, X)\n \n #return F.softmax(self.proj(o), dim=2)\n return o\n\n def predict(self, P, mask):\n \n with torch.no_grad():\n batch_size = P.shape[0]\n path = []\n for i in range(batch_size):\n path.append([])\n \n choice = torch.zeros(P.shape[0], P.shape[1], 19).cuda()\n \n prob_matrix = self.T\n # X.shape is (batch_size, sentence_length)\n \n \n DP = torch.full((P.shape[0], 19), -1000).cuda()\n DP[:, self.lbl_to_id['START_LBL']] = 0 #start tag\n for i in range(batch_size):\n for j in range(19):\n choice[i,0,j] = j\n #path[:,0] = DP[:,0,:].argmax(dim=1)\n \n for i in range(0, P.shape[1]):\n #next_DP = \n submask = mask[:, i].unsqueeze(1).float() # [B, 1]\n emission_score = P[:, i] # [B, C]\n\n # [B, 1, C] + [C, C]\n next_choice = DP.unsqueeze(1) + self.T # [B, C, C]\n next_choice, choice[:, i, :] = next_choice.max(dim=-1)\n next_choice += emission_score\n DP = next_choice * submask + DP * (1 - submask) # max_score or acc_score_t\n \n DP += self.T[self.lbl_to_id['PAD_LBL']]\n last_elem = DP.argmax(-1)\n # now, the choice vector has been constructed and the solution can\n # be computed in the reverse direction. \n # DP[i][j][k] indicates the choice made at the kth step in the jth token \n # of the ith sentences\n \n choice = choice.cpu()\n \n for i in range(batch_size):\n \n num_tags = mask[i].sum()\n path[i].append(last_elem[i].int().item())\n prev = last_elem[i].int().item()\n for j in range(int(num_tags) - 2, -1, -1):\n \n path[i].append(choice[i][j + 1][prev].int().item())\n prev = choice[i][j + 1][prev].int().item()\n \n \n for i in range(batch_size):\n path[i].reverse()\n \n return path\n \ndef log_sum_exp(x):\n \"\"\"calculate log(sum(exp(x))) = max(x) + log(sum(exp(x - max(x))))\n \"\"\"\n max_score = x.max(-1)[0]\n return max_score + (x - max_score.unsqueeze(-1)).exp().sum(-1).log()\n \ndef train(train_set, dev_set, ner_model, id_to_lbl, lbl_to_id, pad_lbl_id, output_file, freeze=False):\n \n id_to_lbl[len(id_to_lbl) - 1] = 'START_LBL'\n \n trainset = NERDataset(train_set[0], train_set[1], train_set[2])\n devset = NERDataset(dev_set[0], dev_set[1], dev_set[2])\n \n trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=4)\n devloader = torch.utils.data.DataLoader(devset, batch_size=128, shuffle=False, num_workers=4)\n \n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n \n net = LinearCRF(100, 100, lbl_to_id, ner_model, train_set[2], freeze=freeze).to(device)\n \n print(net)\n \n tic = time.time()\n optimizer = optim.Adam(net.parameters(), lr=1e-3)\n scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, 100)\n \n patience = 15\n steps_left = patience\n min_val_loss = float('inf')\n best_model_dict = None\n early_stop = False\n\n for epoch in range(1, 101): # loop over the dataset multiple times\n #if epoch % 10 == 0 or epoch == 1:\n net.train()\n \n running_loss = 0.0\n for i, mbatch in enumerate(trainloader):\n W, X, Y = mbatch\n W, X, Y = W.to(device), X.to(device), Y.to(device)\n\n labels = torch.nn.functional.one_hot(Y, num_classes=19).float()\n\n mask = (~(Y == lbl_to_id['PAD_LBL'])).float() # mask is of shape batch_size (batch_size * seq_len)\n\n optimizer.zero_grad()\n P = net(W, X) #shape = (batch_size, sentence_length, num_tags). This is the P matrix.\n\n\n score_curr = ((P * labels).sum(dim=2) * mask).sum(dim=1) #Sanity checked to be correct.\n prob_matrix = net.T\n\n prob_sum = net.T[Y[:,0:1], lbl_to_id['START_LBL']].squeeze() + (net.T[Y[:,1:], Y[:,:-1]] * mask[:,:-1]).sum(dim=1)\n\n score = score_curr + prob_sum\n #print(\"score:\", score)\n\n\n DP = torch.full((X.shape[0], 19), -1000).cuda() #DP.shape is batch_size * num_tags\n DP[:, lbl_to_id['START_LBL']] = 0.\n for j in range(X.shape[1]):\n\n sub_mask = mask[:,j].unsqueeze(1)\n DP = (sub_mask) * (log_sum_exp(DP.unsqueeze(1) + prob_matrix.unsqueeze(0) + P[:,j].unsqueeze(2))) + (1 - sub_mask) * DP\n\n partition = (DP + net.T[lbl_to_id['PAD_LBL']]).logsumexp(dim=1)\n #print(\"partition:\", partition)\n #break\n loss = (partition - score).sum() / X.shape[0] #sum over all minibatches\n loss.backward()\n\n\n nn.utils.clip_grad_value_(net.parameters(), 5)\n optimizer.step()\n running_loss += loss.item()\n\n if i % 10 == 9:\n print('[%d, %5d] loss: %f' % (epoch, i + 1, running_loss / 10))\n running_loss = 0.0\n #scheduler.step()\n \n net.eval()\n \n total_loss = 0.0\n for i, mbatch in enumerate(devloader):\n W, X, Y = mbatch\n W, X, Y = W.to(device), X.to(device), Y.to(device)\n \n labels = torch.nn.functional.one_hot(Y, num_classes=19).float()\n\n mask = (1 - (Y == lbl_to_id['PAD_LBL'])).float() # mask is of shape batch_size (batch_size * seq_len)\n\n P = net(W, X) #shape = (batch_size, sentence_length, num_tags). This is the P matrix.\n\n\n score_curr = ((P * labels).sum(dim=2) * mask).sum(dim=1) #Sanity checked to be correct.\n prob_matrix = net.T\n\n prob_sum = net.T[Y[:,0:1], lbl_to_id['START_LBL']].squeeze() + (net.T[Y[:,1:], Y[:,:-1]] * mask[:,:-1]).sum(dim=1)\n\n score = score_curr + prob_sum\n\n\n DP = torch.full((X.shape[0], 19), -1000).cuda() #DP.shape is batch_size * num_tags\n DP[:, lbl_to_id['START_LBL']] = 0.\n for j in range(X.shape[1]):\n\n sub_mask = mask[:,j].unsqueeze(1)\n DP = (sub_mask) * (log_sum_exp(DP.unsqueeze(1) + prob_matrix.unsqueeze(0) + P[:,j].unsqueeze(2))) + (1 - sub_mask) * DP\n\n partition = (DP + net.T[lbl_to_id['PAD_LBL']]).logsumexp(dim=1)\n #print(\"partition:\", partition)\n #break\n loss = (partition - score).sum() #sum over all minibatches\n total_loss += loss.item()\n \n \n total_loss = total_loss / len(devset)\n \n print(\"val loss:\", total_loss)\n print(\"best loss:\", min_val_loss)\n print(\"Patience:\", steps_left)\n print(\"time:\", time.time() - tic)\n \n if total_loss < min_val_loss:\n steps_left = patience\n min_val_loss = total_loss\n best_model_dict = net.state_dict()\n else:\n if steps_left == 1:\n early_stop = True\n break\n else:\n steps_left -= 1\n \n print(\"Early stop:\", early_stop)\n if early_stop:\n print(\"Replacing with better model\")\n net.load_state_dict(best_model_dict)\n \n torch.save(net, output_file )\n \n \ndef predict(saved_model_file, test_set, ner_model, id_to_lbl, lbl_to_id, tok_to_id, pad_lbl_id, output_file):\n \n Y = (1 - (test_set[1] == tok_to_id['PAD_TOK'])).float()\n testset = NERDataset(test_set[0], test_set[1], Y)\n testloader = torch.utils.data.DataLoader(testset, batch_size=128, shuffle=False, num_workers=4)\n \n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n net = torch.load(saved_model_file).to(device)\n all_labels = []\n for i, mbatch in enumerate(testloader):\n W, X, mask = mbatch\n W, X, mask = W.to(device), X.to(device), mask.to(device)\n P = net(W, X)\n labels = net.predict(P, mask)\n \n for label in labels:\n all_labels.append(label)\n \n return all_labels\n \n \n","sub_path":"A1/LSTM-CRF/crf.py","file_name":"crf.py","file_ext":"py","file_size_in_byte":13880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"17282822","text":"# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom textwrap import dedent\nfrom typing import Iterable, Type\n\nimport pytest\n\nfrom pants.backend.python.dependency_inference.rules import import_rules\nfrom pants.backend.python.goals import lockfile\nfrom pants.backend.python.goals.lockfile import GeneratePythonLockfile\nfrom pants.backend.python.subsystems.python_tool_base import LockfileRules, PythonToolBase\nfrom pants.backend.python.target_types import ConsoleScript\nfrom pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints\nfrom pants.core.goals import generate_lockfiles\nfrom pants.core.goals.generate_lockfiles import GenerateLockfilesGoal, GenerateToolLockfileSentinel\nfrom pants.engine.rules import QueryRule\nfrom pants.engine.target import Dependencies, SingleSourceField, Target\nfrom pants.engine.unions import UnionRule\nfrom pants.testutil.rule_runner import RuleRunner\nfrom pants.util.ordered_set import FrozenOrderedSet\n\n\ndef _get_generated_lockfile_sentinel(\n rules: Iterable, subsystem: Type[PythonToolBase]\n) -> Type[GenerateToolLockfileSentinel]:\n \"\"\"Fish the generated lockfile sentinel out of the pool of rules so it can be used in a\n QueryRule.\"\"\"\n return next(\n r\n for r in rules\n if isinstance(r, UnionRule)\n and r.union_base == GenerateToolLockfileSentinel\n and issubclass(r.union_member, GenerateToolLockfileSentinel)\n and r.union_member.resolve_name == subsystem.options_scope\n ).union_member\n\n\nclass FakeToolWithSimpleLocking(PythonToolBase):\n options_scope = \"cowsay\"\n name = \"Cowsay\"\n help = \"A tool to test pants\"\n\n default_version = \"cowsay==5.0\"\n default_main = ConsoleScript(\"cowsay\")\n\n register_interpreter_constraints = True\n default_interpreter_constraints = [\"CPython>=3.7,<4\"]\n\n default_lockfile_resource = (\"\", \"cowsay.lock\")\n lockfile_rules_type = LockfileRules.SIMPLE\n\n\nclass MockSourceField(SingleSourceField):\n pass\n\n\nclass MockDependencies(Dependencies):\n pass\n\n\nclass MockTarget(Target):\n alias = \"tgt\"\n core_fields = (MockSourceField, MockDependencies)\n\n\n@pytest.fixture\ndef rule_runner() -> RuleRunner:\n lockfile_sentinel = _get_generated_lockfile_sentinel(\n FakeToolWithSimpleLocking.rules(), FakeToolWithSimpleLocking\n )\n rule_runner = RuleRunner(\n rules=[\n *lockfile.rules(),\n *generate_lockfiles.rules(),\n *import_rules(),\n *FakeToolWithSimpleLocking.rules(),\n QueryRule(GeneratePythonLockfile, [lockfile_sentinel]),\n ],\n target_types=[MockTarget],\n )\n\n rule_runner.write_files(\n {\"project/example.ext\": \"\", \"project/BUILD\": \"tgt(source='example.ext')\"}\n )\n return rule_runner\n\n\ndef test_simple_python_lockfile(rule_runner):\n \"\"\"Test that the `LockfileType.PEX_SIMPLE` resolved the graph and generates the lockfile.\"\"\"\n result = rule_runner.run_goal_rule(\n GenerateLockfilesGoal,\n args=[\n \"--resolve=cowsay\",\n \"--cowsay-lockfile=aaa.lock\",\n ],\n env_inherit={\"PATH\", \"PYENV_ROOT\", \"HOME\"},\n )\n assert result\n lockfile_content = rule_runner.read_file(\"aaa.lock\")\n assert (\n dedent(\n f\"\"\"\\\n // \"generated_with_requirements\": [\n // \"{FakeToolWithSimpleLocking.default_version}\"\n // ],\n \"\"\"\n )\n in lockfile_content\n )\n\n\ndef test_setup_lockfile(rule_runner) -> None:\n global_constraint = \"CPython<4,>=3.8\"\n\n lockfile_sentinel = _get_generated_lockfile_sentinel(\n FakeToolWithSimpleLocking.rules(), FakeToolWithSimpleLocking\n )\n\n def assert_lockfile_request(\n build_file: str,\n expected_ics: list[str],\n *,\n extra_expected_requirements: list[str] | None = None,\n extra_args: list[str] | None = None,\n ) -> None:\n rule_runner.write_files({\"project/BUILD\": build_file, \"project/f.py\": \"\"})\n rule_runner.set_options(\n [\"--cowsay-lockfile=lockfile.txt\", *(extra_args or [])],\n env={\"PANTS_PYTHON_INTERPRETER_CONSTRAINTS\": f\"['{global_constraint}']\"},\n env_inherit={\"PATH\", \"PYENV_ROOT\", \"HOME\"},\n )\n lockfile_request = rule_runner.request(GeneratePythonLockfile, [lockfile_sentinel()])\n assert lockfile_request.interpreter_constraints == InterpreterConstraints(expected_ics)\n assert lockfile_request.requirements == FrozenOrderedSet(\n [\n FakeToolWithSimpleLocking.default_version,\n *FakeToolWithSimpleLocking.default_extra_requirements,\n *(extra_expected_requirements or ()),\n ]\n )\n\n assert_lockfile_request(\n \"python_sources()\", FakeToolWithSimpleLocking.default_interpreter_constraints\n )\n assert_lockfile_request(\"target()\", FakeToolWithSimpleLocking.default_interpreter_constraints)\n # Since the SIMPLE locking mechanism doesn't look at ICs, this will still use tool ICs.\n assert_lockfile_request(\n \"python_sources(interpreter_constraints=['CPython<4,>=3.7'])\",\n FakeToolWithSimpleLocking.default_interpreter_constraints,\n )\n","sub_path":"src/python/pants/backend/python/util_rules/lockfile_test.py","file_name":"lockfile_test.py","file_ext":"py","file_size_in_byte":5331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"341377727","text":"from pydub import AudioSegment\nfrom datetime import datetime, timedelta\nimport os\n\nbase_dir = \"Konvertiert\\\\\"\nexport_dir = base_dir + \"Merged\\\\\"\n\ndef export_audiofile(audio_file, date_start, suffix):\n print(\"Export \" + str(date_start))\n date_base = date_start.replace(minute=0, second=0)\n base_audio = AudioSegment.silent(duration=(date_start-date_base).total_seconds()*1000)\n full_audio = base_audio + audio_file\n itr_sec = 0\n while itr_sec < full_audio.duration_seconds:\n end_sec = itr_sec + 60 * 60\n part_audio = full_audio[itr_sec*1000:end_sec*1000]\n part_date = date_base + timedelta(seconds=itr_sec)\n part_file = export_dir + part_date.strftime('%Y-%m-%d %H-%M') + suffix\n if not os.path.isfile(part_file):\n print(\"Write \" + part_file)\n part_audio.export(part_file)\n itr_sec = end_sec\n\n\nstarted = False\n\nfor file in os.listdir(base_dir):\n filename = os.fsdecode(file)\n if filename.endswith(\".WAV\"): \n date_str = filename[:14]\n date_start = datetime.strptime(date_str, '%Y%m%d%H%M%S')\n song = AudioSegment.from_wav(base_dir + filename)\n dur_s = song.duration_seconds\n date_end = date_start + timedelta(seconds=dur_s) \n \n if not started: \n audio_file = song\n track_start = date_start\n track_suffix = filename[14:]\n started = True\n else:\n diff_to_prev = (date_start - prev_end).total_seconds()\n \n if diff_to_prev < 3600:\n between = AudioSegment.silent(duration=diff_to_prev*1000)\n audio_file = audio_file + between + song \n else:\n export_audiofile(audio_file, track_start, track_suffix)\n audio_file = song\n track_start = date_start\n track_suffix = filename[14:]\n prev_end = date_end\nexport_audiofile(audio_file, track_start, track_suffix)\n\n\n","sub_path":"audio-proc/proc.py","file_name":"proc.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"179288975","text":"from flask import Flask\n\nfrom werkzeug.contrib.profiler import ProfilerMiddleware\n\nfrom .core import db, babel\n\n\ndef load_config(app, additional_config={}):\n \"\"\"Load configuration from environment variable plus from additional\n dictionary for test cases.\"\"\"\n app.config.from_envvar(\"FLASK_CONFIG\")\n app.config.update(additional_config)\n return app\n\n\ndef add_profiler(app):\n \"\"\"Add a profiler that runs on every request when PROFILE set to True.\"\"\"\n if app.config.get(\"PROFILE\", False):\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app,\n restrictions=[30],\n sort_by=(\"time\", \"cumulative\"))\n return app\n\n\ndef create_db(app, db):\n \"\"\"Create database from models.\"\"\"\n with app.app_context():\n db.create_all()\n\n\ndef create_app(additional_config={}, name=\"atlas_core\", standalone=False):\n \"\"\"App factory. Creates a Flask `app` object and imports extensions, sets\n config variables etc.\"\"\"\n\n app = Flask(name)\n app = load_config(app, additional_config)\n\n if not standalone:\n # Register blueprints\n from .sample.views import sample_app\n app.register_blueprint(sample_app)\n\n # Load extensions\n db.init_app(app)\n babel.init_app(app)\n\n # Debug tools\n if app.debug:\n app = add_profiler(app)\n\n if standalone:\n create_db(app, db)\n\n return app\n","sub_path":"atlas_core/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"273361129","text":"#Write a function that takes a list value as an argument\r\n#and returns a string with all the items separated by a comma\r\n#and a space, with and inserted before the last item.\r\n#Be sure to test the case where an empty list [] is passed to your function.\r\n\r\n\r\nspam = ['apples', 'bananas', 'tofu', 'cats']\r\nfirst_aid = ['band-aid', 'gauze', 'flare']\r\nmusic_room = ['piano', 'guitar', 'uke', 'flute']\r\nempty_list = []\r\n\r\ndef separate(list):\r\n list.insert(-1, 'and')\r\n new_list = ', '.join(list)\r\n print(new_list)\r\n \r\nseparate(spam)\r\nseparate(first_aid)\r\nseparate(music_room)\r\nseparate(empty_list) #empty list test\r\n\r\n","sub_path":"Ch4 - Comma Code.py","file_name":"Ch4 - Comma Code.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"114576651","text":"import sys\nsys.path.insert(0, \"../\")\nfrom familyTree import FamilyTree\nfrom solution.problem1 import Problem1\n\n\nclass Problem2:\n\n @staticmethod\n def add_child(family):\n parent_name = input(\"Enter Parent Name: \")\n options = [\"Daughter\", \"Son\"]\n for index, name in enumerate(options):\n print(\"Enter \" + str(index) + \" If you want to \" + name + \".\")\n option_number = int(input(\"Your option : \"))\n child_name = input(\"Enter \"+options[option_number] + \" name :\")\n sex = \"F\" if option_number == 0 else \"M\"\n family.add_new_born(parent_name, child_name, sex)\n print(\"child Added successfully\")\n\n\nif __name__ == \"__main__\":\n family = FamilyTree().construct()\n Problem2.add_child(family)\n # using problem1 here to get a person's relatives based on a relationship\n Problem1.print_relatives_of_member(family)\n\n","sub_path":"solution/problem2.py","file_name":"problem2.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"215396879","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nimport pandas as pd\n\n\nclass Spider(object):\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'\n }\n web_site ='https://bbs2.ongacg.com/'\n comic_save_path = ''\n chapter_save_path = ''\n\n # 获取网页信息\n def get_html(self, url):\n html = requests.get(url, headers=self.headers, timeout=5000)\n html.encoding = html.apparent_encoding # 'utf8'\n html = html.text\n html = html.encode('gbk', \"ignore\").decode('gbk') # 先用gbk编码,忽略掉非法字符,然后再译码\n html = html.encode('utf-8').decode('utf-8')\n soup = bs(html, 'lxml')\n return soup\n\n def init_spider(self, web_url, page_num):\n post_list = []\n csv_title = ['标题', '内容简介', '下载链接', '提取码', '解压密码']\n\n for index in range(22, page_num):\n post_list = []\n url = web_url.format(str(index))\n print(url)\n page = self.get_html(url)\n # print(page)\n list_container = page.find('table', {'id': 'threadlisttableid'})\n # print(list_container)\n comic_list = list_container.find_all('tbody')\n # print(comic_list)\n for comic in comic_list:\n # print(comic)\n comic_info = comic.find('a', {'class': 's xst'})\n if comic_info:\n # 漫画标题\n comic_title = comic_info.text\n print(comic_title)\n # 漫画详情\n comic_content = ''\n # # 漫画作者\n # author_list = ''\n # # 漫画详情页地址\n comic_url = self.web_site + comic_info['href']\n comic_html = self.get_html(comic_url)\n # 漫画简介\n comic_content = comic_html.find('td', {'class': 't_f'})\n\n # 缓存这一条文章的全部信息,以备保存到CSV\n post_list.append([comic_title, comic_content, '', '', ''])\n\n post_data = pd.DataFrame(columns=csv_title, data=post_list)\n if index == 1:\n post_data.to_csv('ACG次元网列表2.csv', encoding='UTF-8')\n else:\n post_data.to_csv('ACG次元网列表2.csv', mode='a', header=False, encoding='UTF-8')\n\n\n\nurl = 'https://bbs2.ongacg.com/forum-37-{}.html'\nspider = Spider()\nspider.init_spider(url, 97)\n","sub_path":"51绅士资源吧/ACG次元网.py","file_name":"ACG次元网.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"611472083","text":"from django.shortcuts import render\nfrom urllib.parse import quote\nfrom bs4 import BeautifulSoup\nimport requests\n\nusers = [\n r\"闲闲小可爱\",\n r\"基拉的左轮\",\n r\"云天挽歌\",\n r\"我爱两仪式\",\n r\"菠萝快乐车\",\n r\"丨ELUNE丨\",\n r\"技不如人,甘拜下风。\",\n r\"相思寄于山海\",\n r\"忧伤暗火\",\n r\"喵了個咪的\",\n r\"三谷加奈惠\",\n r\"珈百璃小惡魔\",\n r\"不愿透露姓名的青某\",\n #r\"Nirvana_Y\"\n]\n\ndef index(request):\n latest_stats_list = []\n for user in users:\n stat = {}\n userId = quote(user)\n # print(userId)\n \n # 拉数据\n stat = {}\n url = \"http://wotbox.ouj.com/wotbox/index.php?r=default%2Findex&pn={id}\".format(id=userId)\n resp = requests.get(url)\n webContent = resp.content\n soup = BeautifulSoup(webContent, 'html.parser')\n\n stat[\"id\"] = user\n stat[\"power\"] = soup.find(class_='power fl').span.get_text()\n stat[\"win_rate\"] = soup.find(class_='title win-rate-1k').next_sibling.next_sibling.get_text()\n stat[\"win_rate_c\"] = 1 if int(soup.find(class_='title win-rate-1k').next_sibling.next_sibling.get_text().strip('%')) >= 50 else 0 \n stat[\"total\"] = soup.find(class_='total').get_text()[2:]\n stat[\"win\"] = soup.findAll(class_=\"win\")[5].get_text()[2:]\n stat[\"fail\"] = soup.find(class_='fail').get_text()[2:]\n stat[\"damage\"] = soup.findAll(class_='num')[6].get_text()\n stat[\"exp\"] = soup.findAll(class_='num')[7].get_text()\n stat[\"destroy\"] = soup.findAll(class_='num')[8].get_text()\n stat[\"discover\"] = soup.findAll(class_='num')[11].get_text()\n stat[\"level\"] = soup.find(class_='title avg-lv-1k').next_sibling.next_sibling.get_text()\n stat[\"hit_rate\"] = soup.find(class_='title hit-rate-1k').next_sibling.next_sibling.get_text()\n for i in range(5):\n stat[\"p{n}c\".format(n=i+1)] = 1 if soup.findAll(class_='recent-list__right')[i].td.get_text() == '胜利' else 0 \n stat[\"p{n}\".format(n=i+1)] = soup.findAll(class_='recent-list__right')[i].td.next_sibling.next_sibling.get_text()\n print(stat) \n latest_stats_list.append(stat)\n context = {\n 'latest_stats_list': latest_stats_list,\n }\n return render(request, 'stats/index.html', context)\n","sub_path":"stats/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"354817780","text":"import xlrd as xl\nimport re, json, io\n\n\ndef special_match(strg, search=re.compile(r'[^0-9.]').search):\n return not bool(search(strg))\n\ndef unify_row(row):\n temprow = row\n if temprow:\n if not temprow.strip()[0].isdigit():\n temprow = temprow.replace(' -', temprow[:temprow.find(';') + 2], 1).strip()\n temprow = temprow[currow.find(';') + 1:]\n while ' -' in temprow:\n temprow = temprow.replace(' -', temprow[temprow.find('г.') + 2:temprow.find(';', temprow.find('г.'))+1])\n print(temprow)\n return temprow\n\ndef get_dates(dates):\n #dates = dates[:len(dates)\n temp = list(filter(None, dates.split(';')))\n temp2 = list()\n date_arr = []\n cur_day, cur_month, cur_year = '', '', ''\n for each in temp:\n temp2.append(list(filter(None, each.strip().split(','))))\n if temp2:\n for x in temp2[::-1]:\n for y in x[::-1]:\n temp = y\n cur_day = ''\n if special_match(temp):\n if '.' in temp:\n if temp.count('.') == 2:\n cur_year = temp[temp.rfind('.'):]\n temp = temp[:temp.rfind('.')]\n if temp.count('.') == 1:\n cur_month = temp[temp.rfind('.'):]\n temp = temp[:temp.rfind('.')]\n if '.' not in temp:\n cur_day = temp\n date_arr.append(str(cur_day) + str(cur_month) + str(cur_year))\n # print(temp)\n return sorted(date_arr)\n\n\nfile = xl.open_workbook(\"/home/inteldoter/Downloads/schedule.xls\", encoding_override=\"cp1252\")\nsheet = file.sheet_by_index(0)\nrows = []\noutput = []\nfor rownum in range(11, sheet.nrows):\n rows.append(sheet.row_values(rownum))\nfor row in rows:\n curpair = int(row[1]) if row[1] else 0\n curlist = []\n cursubj, cursubjtype, curprof = '', '', ''\n currow = str(row[2])\n if 'День самоподготовк' in currow:\n currow = ''\n currow = currow.replace('б; ', 'б ##').replace('.; ', '. ##')\n if not currow.split('##'):\n curlist = list(unify_row(currow))\n else:\n for each in currow.split('##'):\n curlist.append(unify_row(each))\n for each in curlist:\n temp = each\n while temp:\n if ':' in temp:\n curprof = temp[temp.rfind(':') + 2:].strip()\n temp = temp[:temp.rfind(':')]\n curdates = (get_dates(temp[:temp.find('г.')]))\n temp = temp[temp.find('г.') + 2:]\n cursubj = temp[:temp.find(';')].strip()\n temp = temp[temp.find(';') + 1:]\n temp = temp.replace('теория; практика', 'теория, практика')\n cursubjtype = temp[:temp.find(';')].strip() if ';' in temp else temp.strip()\n temp = temp[temp.find(';'):]\n if temp and curpair:\n # print(curpair, curprof, curdates, cursubj, cursubjtype)\n for each in curdates:\n output.append({\"date\": each, \"order\": curpair, \"name\": cursubj, \"subjType\": cursubjtype, \"subjProf\": curprof})\n\njson_string = json.dumps({\"SubjArr\": output}, ensure_ascii=False).encode('utf-8')\nwith io.open('data.json', 'w', encoding='utf8') as json_file:\n json.dump({\"subjArr\": output}, json_file, ensure_ascii=False)\n\n","sub_path":"source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"487800659","text":"from __future__ import print_function\nimport argparse\nimport os\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom torch.autograd import Variable\nfrom apex.fp16_utils import to_python_float\nimport pprint\nimport logging\nimport time\n\nimport torch.utils.data\nimport torch.utils.data.distributed\nimport torch.distributed as dist\n\n# Training settings\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--batch-size', type=int, default=4096, metavar='N',\n help='Matrix M')\n\nparser.add_argument('--input-size', type=int, default=2048, metavar='N',\n help='Matrix K')\n\nparser.add_argument('--hidden-size', type=int, default=2048, metavar='N',\n help='Matrix K')\n\nparser.add_argument('--output-size', type=int, default=2048, metavar='N',\n help='Matrix N')\n\nparser.add_argument('--bias', action='store_true', default=False,\n help='whether use debug apex')\n\nparser.add_argument('--logfile', type=str, default=None, help='logging output')\n\nparser.add_argument('--hidden-layers', type=int, default=4, metavar='N',\n help='input batch size for training (default: 64)')\n\nparser.add_argument('--bucket-size', type=int, default=10000000, metavar='N',\n help='input batch size for training (default: 64)')\n\nparser.add_argument('--layers-per-bucket', type=int, default=0, metavar='N',\n help='input batch size for training (default: 64)')\n\nparser.add_argument('--delay-allreduce', action='store_true', default=False,\n help='whether use delay allreduce')\n\nparser.add_argument('--datatype', type=int, default=10000000, metavar='N',\n help='input batch size for training (default: 64)')\n\nparser.add_argument('--iteration-number', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n\nparser.add_argument('--warmup-number', type=int, default=10, metavar='N',\n help='input batch size for training (default: 64)')\n\nparser.add_argument('--debug-apex', action='store_true', default=False,\n help='whether use debug apex')\n\nparser.add_argument('--debug-apex-dir', type=str, default='/home/scratch.shawnw_gpu/docker/apex/apex/parallel', \n help='custom-apex-dir')\n\nparser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n\nparser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n\nparser.add_argument(\"--local_rank\", default=0, type=int)\n\nargs = parser.parse_args()\n\nif args.debug_apex:\n import sys\n sys.path.insert(0, args.debug_apex_dir)\n from distributed import DistributedDataParallel as DDP\nelse:\n from apex.parallel import DistributedDataParallel as DDP\n \nargs.cuda = not args.no_cuda and torch.cuda.is_available()\n\nargs.distributed = False\nif 'WORLD_SIZE' in os.environ:\n args.distributed = int(os.environ['WORLD_SIZE']) > 1\n\ndef rank0print(*nargs):\n print_str = \"\"\n for item in nargs:\n print_str += str(item)\n if args.distributed:\n if torch.distributed.get_rank() == 0:\n print(print_str)\n else:\n print(print_str)\n\n\nif args.distributed:\n assert args.cuda, \"Distributed mode requires running with CUDA.\"\n torch.cuda.set_device(args.local_rank)\n torch.distributed.init_process_group(backend='nccl', init_method='env://')\n world_size = torch.distributed.get_world_size()\n if torch.distributed.get_rank() == 0:\n if args.logfile == None:\n logfile = \"logfile_rank0.log\"\n args.logfile = logfile\n logging.basicConfig(format='%(filename)s:%(lineno)d:%(levelname)s:%(message)s', filename=args.logfile, level=logging.DEBUG)\n else:\n logging.basicConfig(format='%(filename)s:%(lineno)d:%(levelname)s:%(message)s', level=logging.INFO)\nelse:\n world_size = 1\n\nrank0print(\"world_size:\", world_size)\n\nclass Net(nn.Module):\n def __init__(self, args):\n super(Net, self).__init__()\n\n self.layers = []\n self.args = args\n\n #input layer\n layer = torch.nn.Linear(args.input_size, args.hidden_size, args.bias)\n self.add_module(\"input_layer\", layer)\n self.layers.append(layer)\n \n #hidden layer\n for i in range(args.hidden_layers):\n layer = torch.nn.Linear(args.hidden_size, args.hidden_size, args.bias)\n self.add_module(\"hidden_layer_\"+str(i), layer)\n self.layers.append(layer)\n\n #output layer\n layer = torch.nn.Linear(args.hidden_size, args.output_size, args.bias)\n self.add_module(\"output_layer\", layer)\n self.layers.append(layer)\n\n def forward(self, x):\n for layer in self.layers:\n x = layer(x)\n return x\n\n def register_hook(self):\n self.grad_accs = []\n for name, param in self.named_parameters():\n if param.requires_grad:\n def wrapper(param, name):\n param_tmp = param.expand_as(param)\n grad_acc = param_tmp.grad_fn.next_functions[0][0]\n rank0print(grad_acc)\n def allreduce_hook(*unused):\n rank0print(\"running param:\",name, \" grad at time:\", time.time())\n grad_acc.register_hook(allreduce_hook)\n self.grad_accs.append(grad_acc)\n wrapper(param, name)\n\nmodel = Net(args)\n\nif args.cuda:\n model = model.cuda()\n\nif args.debug_apex:\n model.register_hook()\n\nif args.distributed:\n if args.layers_per_bucket != 0:\n args.bucket_size = args.layers_per_bucket * args.hidden_size *args.hidden_size \n model = DDP(model,message_size=args.bucket_size,delay_allreduce=args.delay_allreduce)\n\ndef train():\n \n rank0print(\"**************************\")\n rank0print(model)\n rank0print(\"**************************\")\n for name, param in model.named_parameters():\n rank0print(name, \" : \", param.size())\n rank0print(\"**************************\")\n\n model.train()\n\n x = torch.randn(args.batch_size, args.input_size, requires_grad=False) \n target = torch.randn(args.batch_size, args.output_size, requires_grad=False) \n loss_fn = torch.nn.MSELoss()\n\n total_backward_time = 0\n total_e2e_time = 0\n total_loss_time = 0\n total_forward_time = 0\n\n if args.cuda:\n x = x.cuda()\n target = target.cuda()\n loss_fn = loss_fn.cuda()\n\n forward_start = torch.cuda.Event(enable_timing=True)\n forward_end = torch.cuda.Event(enable_timing=True)\n nccl_end = torch.cuda.Event(enable_timing=True)\n\n compute_stream = torch.cuda.current_stream()\n\n pipeline_start = True\n\n for i in range(args.iteration_number):\n\n rank0print(\"===ITERATION:%s====\", i)\n\n if (pipeline_start != True):\n grads = [param.grad.data for param in self.module.parameters() if param.grad is not None]\n buckets = split_by_type(grads) \n for tp in buckets:\n bucket = buckets[tp]\n coalesced = flatten(bucket)\n if extra_args is not None:\n call(coalesced, *extra_args)\n else:\n call(coalesced)\n if call is dist.all_reduce:\n coalesced /= dist.get_world_size()\n \n for buf, synced in zip(bucket, unflatten(coalesced, bucket)):\n buf.copy_(synced)\n\n model.zero_grad() \n\n forward_start_time = time.time()\n output = model(x)\n compute_stream.synchronize()\n forward_end_time = time.time()\n\n loss = loss_fn(output, target)\n compute_stream.synchronize()\n loss_end_time = time.time()\n\n loss.backward()\n compute_stream.synchronize()\n\n backward_end_time = time.time()\n\n rank0print(\"step \", i, \n \",e2e:\", backward_end_time-forward_start_time, \n \",forward:\", forward_end_time-forward_start_time, \n \",loss:\",loss_end_time - forward_end_time,\n \",backward:\",backward_end_time - loss_end_time)\n\n if i+1 > args.warmup_number:\n total_e2e_time += backward_end_time - forward_start_time\n total_backward_time += backward_end_time - loss_end_time\n total_loss_time += loss_end_time - forward_end_time\n total_forward_time += forward_end_time-forward_start_time\n \n average_e2e_time = total_e2e_time/(args.iteration_number-args.warmup_number)\n average_forward_time = total_forward_time/(args.iteration_number-args.warmup_number)\n average_loss_time = total_loss_time/(args.iteration_number-args.warmup_number)\n average_backward_time = total_backward_time/(args.iteration_number-args.warmup_number)\n\n rank0print(\"===RUN EPIOLOG===\")\n rank0print(\"Average e2e:\", average_e2e_time,\n \",forward:\", average_forward_time,\n \",loss:\", average_loss_time,\n \",backward:\", average_backward_time)\n\n rank0print(\"csv;{};{};{};{};{};{}\".format(world_size, args.layers_per_bucket, average_e2e_time, average_forward_time, average_loss_time, average_backward_time))\n\ntrain()\n\nlogging.shutdown()\n","sub_path":"examples/toy/bucket_optimized.py","file_name":"bucket_optimized.py","file_ext":"py","file_size_in_byte":9044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"13581861","text":"\"\"\"\nMetadata loading\n\"\"\"\nfrom pathlib import Path\nfrom typing import Any, List, Union, TYPE_CHECKING\n\nfrom .constants import (\n ALBUMART_SUPPORTED_FILENAMES,\n BOOKLET_SUPPORTED_FILENAMES,\n)\nfrom ..exceptions import MetadataError\nfrom .formats.albumart import AlbumArt\nfrom .formats.booklet import Booklet\n\nif TYPE_CHECKING:\n from ..configuration import Configuration\n from ..library.album import Album\n from ..library.tree import LibraryItem\n\n\nclass AlbumMetadata:\n \"\"\"\n Metadata loader for album objects\n \"\"\"\n album: 'Album'\n albumart: List[AlbumArt]\n booklets: List[Booklet]\n\n def __init__(self, album: 'Album') -> None:\n self.album = album\n self.albumart = []\n self.booklets = []\n\n @property\n def config(self) -> 'Configuration':\n \"\"\"\n Return configuration via the Album's library object\n \"\"\"\n return self.album.library.config\n\n def debug(self, *args: List[Any]) -> None:\n \"\"\"\n Send debug message to stderr if debug mode is enabled\n \"\"\"\n self.album.debug(*args)\n\n def error(self, *args: List[Any]) -> None:\n \"\"\"\n Send error message to stderr\n \"\"\"\n self.album.error(*args)\n\n def message(self, *args: List[Any]) -> None:\n \"\"\"\n Show message to stdout unless silent flag is set\n \"\"\"\n self.album.message(*args)\n\n def add_metadata_file(self, metadata_file: Union['LibraryItem', Path]) -> 'LibraryItem':\n \"\"\"\n Add a metadata file to the album\n \"\"\"\n kwargs = {\n 'config': self.config,\n 'library': self.album.library,\n 'album': self.album,\n 'path': metadata_file,\n }\n if metadata_file.name in ALBUMART_SUPPORTED_FILENAMES:\n albumart = AlbumArt(**kwargs)\n self.albumart.append(albumart)\n return albumart\n if metadata_file.name in BOOKLET_SUPPORTED_FILENAMES:\n booklet = Booklet(**kwargs)\n self.booklets.append(booklet)\n return booklet\n raise MetadataError(f'Unknown file type: {metadata_file}')\n","sub_path":"oodi/metadata/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"428549380","text":"from PyQt5 import QtWidgets, uic, QtCore\nfrom PyQt5.QtMultimedia import QMediaPlayer, QMediaContent, QVideoFrame\nfrom PyQt5.QtCore import QUrl, QTimer\nfrom PyQt5.QtGui import QPixmap, QImage\nimport sys\nimport os\nimport threading\napp = QtWidgets.QApplication(sys.argv)\nimport cv2\nimport matplotlib.pyplot as plt\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom time import sleep\n\n\nclass UI(QtWidgets.QMainWindow):\n\t# changePixmap = QtCore.pyqtSignal()\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.ui = uic.loadUi('/workspace/demo_cv2_load_entire.ui', self)\n\t\tself.ui.button_loadvideo.clicked.connect(self.get_videopath)\n\t\tself.ui.button_playpause.clicked.connect(self.playpause)\n\t\tself.ui.button_playpause.setEnabled(False)\n\t\tself.lineEdit.returnPressed.connect(self.set_videopath)\n\t\t# self.slider_video.setRange(0, 0)\n\t\t# self.slider_video.sliderMoved.connect(self.set_video_position)\n\n\t\t# self.player = QMediaPlayer(None, QMediaPlayer.VideoSurface)\n\t\t# self.player.setVideoOutput(self.videowidget)\n\t\t# self.player.positionChanged.connect(self.set_slider_position)\n\t\t# self.player.durationChanged.connect(self.set_slider_duration)\n\n\t\tself.slider_cv2.setRange(0, 0)\n\t\tself.slider_cv2.setValue(0)\n\t\tself.slider_cv2.valueChanged.connect(self.read_videoframe)\n\t\tself.slider_cv2.sliderReleased.connect(self.read_videoframe)\n\t\tself.slider_cv2.sliderPressed.connect(self.stop_timer)\n\t\tself.timer = QTimer(self)\n\t\tself.timer.timeout.connect(self.set_slider_position)\n\n\t\tself.fig = plt.Figure()\n\t\tself.canvas = FigureCanvas(self.fig)\n\t\tself.graph_layout.addWidget(self.canvas)\n\n\t\t# self.changePixmap = QtCore.pyqtSignal(QImage)\n\t\t# self.changePixmap.connect(self.show_videoframe)\n\n\t\t#-------------\n\t\tself.video_format = ('mp4', 'avi', 'mpeg')\n\t\tself.videopath = ''\n\t\tself.frames = []\n\t\tself.isplaying = False\n\t\tself.position = 0\n\t\tself.cap = None\n\t\tself.max_frame = None\n\t\tself.values = []\n\t\t#-------------\n\n\t\tself.ui.show()\n\t\n\tdef set_videopath(self):\n\t\tself.videopath = self.lineEdit.text()\n\t\t# print(self.videopath)\n\t\tif (self.videopath != '') \\\n\t\tand os.path.isfile(self.videopath) \\\n\t\tand self.videopath.split('.')[-1] in self.video_format:\n\t\t\t# self.player.setMedia(QMediaContent(QUrl.fromLocalFile(self.videopath)))\n\t\t\t# self.read_videoframe()\n\t\t\tself.cap = cv2.VideoCapture(self.videopath)\n\t\t\tself.max_frame = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\t\t\tself.slider_cv2.setRange(0, self.max_frame)\n\t\t\tself.values = [0 for _ in range(self.max_frame)]\n\t\t\tself.ax = self.fig.add_subplot(111)\n\t\t\tself.ax.plot(self.values)\n\t\t\tself.ax.set_xlabel(\"frame\")\n\t\t\tself.ax.set_ylabel(\"confidence\")\n\t\t\tself.ax.set_title(\"TEMP graph\")\n\t\t\tself.ax.set_ylim([0., 1.])\n\t\t\tself.ax.legend()\n\t\t\tself.read_videoframe()\n\t\t\tself.ui.button_playpause.setEnabled(True)\n\t\n\tdef get_videopath(self):\n\t\tfilter = \"Videos(*.mp4 *.avi *.mpeg)\"\n\t\tself.videopath = QtWidgets.QFileDialog.getOpenFileName(self, filter=filter)[0]\n\t\t# print(self.videopath)\n\t\tif self.videopath != '':\n\t\t\t# self.player.setMedia(QMediaContent(QUrl.fromLocalFile(self.videopath)))\n\t\t\t# self.read_videoframe()\n\t\t\tself.lineEdit.setText(self.videopath)\n\t\t\tself.cap = cv2.VideoCapture(self.videopath)\n\t\t\tself.max_frame = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\t\t\tself.slider_cv2.setRange(0, self.max_frame)\t\t\t\n\t\t\tself.values = [0 for _ in range(self.max_frame)]\n\t\t\tself.ax = self.fig.add_subplot(111)\n\t\t\tself.ax.plot(self.values)\n\t\t\tself.ax.set_xlabel(\"frame\")\n\t\t\tself.ax.set_ylabel(\"confidence\")\n\t\t\tself.ax.set_title(\"TEMP graph\")\n\t\t\tself.ax.set_ylim([0., 1.])\n\t\t\tself.ax.legend()\n\t\t\tself.read_videoframe()\n\t\t\tself.ui.button_playpause.setEnabled(True)\n\t\t\t# self.changePixmap.emit()\n\n\tdef playpause(self):\n\t\t# if self.player.state() == QMediaPlayer.PlayingState:\n\t\t# \tself.player.pause()\n\t\t# \t# pass\n\t\t# else:\n\t\t# \tself.player.play()\n\t\tif not self.isplaying:\n\t\t\tself.timer.start(500)\n\t\t\tself.isplaying = True\n\t\t\t# print(\"BTN clicked\")\n\t\telse:\n\t\t\tself.timer.stop()\n\t\t\tself.isplaying = False\n\t\t\t# self.changePixmap.emit()\n\t\t\t# self.read_videoframe()\n\t\n\t# def set_video_position(self, position):\n\t# \tself.player.setPosition(position)\n\t\n\tdef set_slider_position(self):\n\t\t# self.slider_video.setValue(position)\n\t\tself.position += 1\n\t\tself.slider_cv2.setValue(self.position)\n\n\t# def set_slider_duration(self, duration):\n\t# \tself.slider_video.setRange(0, duration)\n\t\n\tdef read_videoframe(self):\n\t\t# cap = cv2.VideoCapture(self.videopath)\n\t\t# while cap.isOpened():\n\t\t# \tret, frame = cap.read()\n\t\t# \tif ret:\n\t\t# \t\tself.frames.append(frame)\n\t\t# \t\tprint(\"Loading frame: \", len(self.frames))\n\t\t# \telse:\n\t\t# \t\tbreak\n\t\t# \t# cv2.waitKey(30)\n\t\t# \t# sleep(1)\n\t\t# print(\"End loading\")\n\t\tself.position = self.slider_cv2.value()\n\t\tself.cap.set(1, self.slider_cv2.value())\n\t\tret, frame = self.cap.read()\n\t\tif ret:\n\t\t\tself.show_videoframe(frame)\n\t\telse:\n\t\t\tprint(\"Nothing to show\")\n\t\t# cap.release()\n\t\t# cv2.destroyAllWindows()\n\t\tself.process_frames_temp(frame)\n\t\n\tdef show_videoframe(self, frame):\n\t\t# print(self.slider_cv2.value())\n\t\t# frame = self.frames[self.slider_cv2.value()]\n\t\tframe = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\t\th, w, ch = frame.shape\n\t\tframe = QImage(frame.data, w, h, ch * w, QImage.Format_RGB888)\n\t\tframe = QPixmap(frame)\n\t\tframe = frame.scaled(320, 270, QtCore.Qt.KeepAspectRatio)\n\t\tself.label.setPixmap(frame)\n\t\t# self.label.update()\n\t\n\t# def show_videoframe_stop_timer(self):\n\t# \tself.timer.stop()\n\t# \tframe = self.frames[self.slider_cv2.value()]\n\t# \tself.position = self.slider_cv2.value()\n\t# \tframe = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\t# \th, w, ch = frame.shape\n\t# \tframe = QImage(frame.data, w, h, ch * w, QImage.Format_RGB888)\n\t# \tframe = QPixmap(frame)\n\t# \tframe = frame.scaled(320, 270, QtCore.Qt.KeepAspectRatio)\n\t# \tself.label.setPixmap(frame)\n\t# \tself.timer.start(500)\n\n\tdef stop_timer(self):\n\t\tself.timer.stop()\n\t\tself.isplaying = False\n\t\n\tdef process_frames_temp(self, frame):\n\t\t# values = [0.8 for _ in range(len(self.frames))]\n\t\t# print(values)\n\t\tself.values[self.slider_cv2.value()] = 0.8\n\t\tself.ax.plot(self.values)\n\t\tself.canvas.draw()\n\n\nif __name__ == \"__main__\":\n\t# import cv2\n\t# app = QtWidgets.QApplication(sys.argv)\n\twindow = UI()\n\tapp.exec_()\n","sub_path":"demo_load_lazy.py","file_name":"demo_load_lazy.py","file_ext":"py","file_size_in_byte":6159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"255640597","text":"'''\nCopyright (c) 2014 Adam Giermanowski\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n'''\n\nimport os\nimport csv\nimport urllib2\nfrom xml.dom import minidom\n\n\ndef get_stock_names():\n \"\"\" Returns a dictionary with stock names and IDs\n :return: dictionary (name: stockID).\n \"\"\"\n try:\n page = urllib2.urlopen(\"http://finanse.wp.pl/isin,PLOPTTC00019,stocks.xml\")\n dom = minidom.parse(page)\n stocks = dom.getElementsByTagName('item')\n except:\n raise Exception('Could not parse stocks from: http://finanse.wp.pl/isin,PLOPTTC00019,stocks.xml')\n\n names = [str(name.getAttribute('name')) for name in stocks]\n ids = [str(name.getAttribute('value')) for name in stocks]\n data = dict(zip(names, ids))\n\n # remove trash data\n for name in data.keys():\n digit_counter = list(name)\n digit_counter = filter(lambda x: x in '1234567890', digit_counter)\n num_digits = len(digit_counter)\n if num_digits >= 3:\n data.pop(name)\n\n return data\n\n\ndef get_stock_names_csv():\n \"\"\" Saves a list of stock names to csv\n \"\"\"\n\n data = get_stock_names()\n\n try:\n os.remove('names.csv')\n except:\n pass\n\n with open('names.csv', 'w') as f:\n csv_writer = csv.writer(f)\n csv_writer.writerows(data)\n\n\ndef get_fund_names():\n \"\"\" Returns 2 lists - fund names and fund IDs\n \"\"\"\n page = urllib2.urlopen(\"http://finanse.wp.pl/fundslist.xml\")\n dom = minidom.parse(page)\n funds = dom.getElementsByTagName('item')\n\n names = [str(fund.getAttribute('name').encode('utf-8')) for fund in funds]\n ids = [str(fund.getAttribute('value').encode('utf-8')) for fund in funds]\n\n return names, ids\n\n\ndef get_fund_names_csv():\n \"\"\" Saves a list of fund names to csv\n \"\"\"\n names, ids = get_fund_names()\n\n try:\n os.remove('funds.csv')\n except:\n pass\n\n lists = zip(names, ids)\n with open('funds.csv', 'w') as file:\n csv_writer = csv.writer(file)\n csv_writer.writerows(lists)\n","sub_path":"get_names.py","file_name":"get_names.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"219726182","text":"import pandas as pd\n\n\n# visualization\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n##reading data set\ntrain_df = pd.read_csv('/content/train.csv')\n\n\n\ntrain_df['Sex'] = train_df['Sex'].map( {'female': 1, 'male': 0} )\nprint(train_df[[\"Sex\", \"Survived\"]].groupby(['Sex'], as_index=False).mean().sort_values(by='Survived', ascending=False))\n\n# print(train_df['Survived'].corr(train_df['Sex']))\n\n##Analyze by visualizing data\n####Correlating numerical features\ntrain_df.corr()\ng = sns.FacetGrid(train_df, col='Survived')\ng.map(plt.hist, 'Sex', bins=20)\nplt.show()","sub_path":"Coreelation.py","file_name":"Coreelation.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"483291852","text":"import os\nfrom Bio.PDB import PDBParser, Residue, Polypeptide\nfrom Bio import PDB\n\n\nclass Protein(object):\n\t\"\"\" \n\tThis is a class that obtains protein sequence \n\tand structure information \n \n\tAttributes: \n\t\td_seq (dict of {int: str): connects PDB \n\t\t\tresidue number with its aminoacid type\n\t\"\"\"\n\n\tdef __init__(self, pdb_path):\n\t\tself.structure = PDBParser().get_structure(\"\", pdb_path)\n\t\tself.residues = []\n\t\tself.d_sequence = {}\n\n\t\tself.parse_structure()\n\n\tdef parse_structure(self):\n\t\tfor residue in self.structure.get_residues():\n\t\t\tif PDB.is_aa(residue, standard=True):\t#only consider standard 20 residues\n\t\t\t\tres = residue.id[1]\n\t\t\t\tif res not in self.residues:\t#dont doublecount mutated residues\t(ex. 1ORC)\t\n\t\t\t\t\tself.residues.append(res)\n\t\t\t\t\tself.d_sequence[res] = Polypeptide.three_to_one(Residue.Residue.get_resname(residue))\n\n\tdef get_residues_sequence(self):\n\t\treturn self.residues, self.d_sequence\n","sub_path":"utlts/pdb_info.py","file_name":"pdb_info.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"359989376","text":"#-*- coding:utf-8 -*-\n# author:isyuan\n# datetime:27/03/2019 14:57\n# software: PyCharm\n\nimport os\nimport smtplib\nimport time\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.image import MIMEImage\nimport mail_setting\n\nclass Mail:\n '''\n self.mail_host = \"smtp.sina.com\" # 设置服务器\n self.mail_user = \"xiaowang\" # 用户名\n self.mail_pass = \"XXXXX\" # 口令\n self.mail_sender = 'xiaowang@sina.com' # 发送者\n '''\n\n def __init__(self, mail_host=\"210.77.136.200\", mail_user=\"user\", mail_pass=\"pass\",\n mail_sender=\"da山\", port=465):\n # 第三方 SMTP 服务\n self.mail_host = mail_host\n self.mail_user = mail_user\n self.mail_pass = mail_pass\n self.mail_sender = mail_sender\n self.port = port\n\n\n def SendHtmlMail(self, mail_tolist, mail_subject, mail_body, fileList, mail_cclist, mail_bcclist):\n '''\n 发送Html邮件\n :param mail_tolist: 接收者邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com']\n :param mail_subject: 邮件主题\n :param mail_body: 邮件体主题内容\n :param fileList: 附件列表,就文件名列表(包含路径)\n :param mail_cclist: 抄送邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com'],默认不传\n :param mail_bcclist: 密送邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com'],默认不传\n :return:\n '''\n message = MIMEText(mail_body, _subtype='html', _charset='gb2312')\n message['Subject'] = mail_subject\n message['From'] = self.mail_sender\n if len(mail_cclist) > 0:\n message['Cc'] = \",\".join(mail_cclist)\n mail_tolist.extend(mail_cclist)\n if len(mail_bcclist) > 0:\n message['Bcc'] = \",\".join(mail_bcclist)\n mail_tolist.extend(mail_bcclist)\n\n try:\n smtpObj = smtplib.SMTP(self.mail_host, self.port)\n # smtpObj.connect(self.mail_host, 25) # 25 为 SMTP 端口号\n # smtpObj.login(self.mail_user, self.mail_pass)\n smtpObj.sendmail(self.mail_sender, mail_tolist, message.as_string())\n smtpObj.close()\n print(\"邮件发送成功\")\n except smtplib.SMTPException as e:\n print(\"Error: 无法发送邮件\")\n\n def SendMailAttach(self, mail_tolist, mail_subject, mail_body, fileList, mail_cclist):\n '''\n 发送带附件的邮件\n :param mail_tolist: 接收者邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com']\n :param mail_subject: 邮件主题\n :param mail_body: 邮件体主题内容\n :param fileList: 附件列表,就文件名列表(包含路径)\n :param mail_cclist: 抄送邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com'],默认不传\n :param mail_bcclist: 密送邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com'],默认不传\n :return:\n '''\n msg = MIMEMultipart()\n message = MIMEText(mail_body, _subtype='plain', _charset='utf-8')\n msg.attach(message)\n\n # 构造附件\n for f in fileList:\n if os.path.isfile(f):\n att = MIMEText(open(f, 'rb').read(), 'base64', 'utf-8')\n att[\"Content-Type\"] = 'application/octet-stream'\n att[\"Content-Disposition\"] = 'attachment;filename=' + os.path.basename(f)\n msg.attach(att)\n\n msg['Subject'] = mail_subject\n msg['From'] = self.mail_sender\n msg['To'] = \",\".join(mail_tolist)\n if len(mail_cclist) > 0:\n msg['Cc'] = \",\".join(mail_cclist)\n mail_tolist.extend(mail_cclist)\n # if len(mail_bcclist) > 0:\n # msg['Bcc'] = \",\".join(mail_bcclist)\n # mail_tolist.extend(mail_bcclist)\n\n message = ''\n try:\n server = smtplib.SMTP()\n server.connect(self.mail_host)\n server.login(self.mail_user, self.mail_pass)\n server.sendmail(self.mail_sender, mail_tolist, msg.as_string())\n server.close()\n result = '邮件发送成功'\n\n except smtplib.SMTPException as e:\n # print \"Error: 无法发送邮件\", e\n message = 'Error: 无法发送邮件:'\n return message\n\n def SendMail(self, mail_subject, mail_body, mail_tolist,mail_cclist):\n '''\n 发送普通邮件\n :param mail_tolist: 接收者邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com']\n :param mail_subject: 邮件主题\n :param mail_body: 邮件体主题内容\n :param fileList: 附件列表,就文件名列表(包含路径)\n :param mail_cclist: 抄送邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com'],默认不传\n :param mail_bcclist: 密送邮件列表,如:['xiaoli@sina.com','xiaoMa@qq.com'],默认不传\n :return:\n '''\n message = MIMEText(mail_body, _subtype='plain', _charset='utf-8')\n message['Subject'] = mail_subject\n message['From'] = self.mail_sender\n if mail_tolist:\n message['To'] = \",\".join(mail_tolist)\n if len(mail_cclist) > 0:\n message['Cc'] = \",\".join(mail_cclist)\n mail_tolist.extend(mail_cclist)\n\n result = ''\n try:\n server = smtplib.SMTP()\n server.connect(self.mail_host)\n server.login(self.mail_user,self.mail_pass)\n server.sendmail(self.mail_sender, mail_tolist, message.as_string())\n server.close()\n result = '邮件发送成功'\n # print \"邮件发送成功\"\n except smtplib.SMTPException as e:\n result = 'Error: 无法发送邮件'\n return result\n\n def test(self,mail_body,mail_subject,mail_sendto_user,fileName):\n\n fileList = []\n fileList.append(fileName)\n\n mail_tolist = []\n mail_tolist.append(mail_sendto_user)\n\n # 多个人,中间用逗号分隔\n # cc_tolist = ['xx','dd']\n cc_tolist =[]\n mail_bcclist = []\n\n # result = self.SendMail( mail_subject, mail_body, mail_tolist,cc_tolist)\n result = self.SendMailAttach(mail_tolist,mail_subject, mail_body,fileList, cc_tolist)\n return result\n\n\n# setobj = mail_setting.mail_setting()\n# m = Mail(setobj.mail_host,setobj.mail_user,setobj.mail_pass,setobj.mail_send_user,setobj.port)\n# # result = m.test('信息1',u'小测试测试',setobj.mail_receive_user)\n# fileName=\"D:\\\\work\\\\python36_crawl\\\\src\\\\2018-07-02-14-01-55.csv\"\n# result = m.test('信息1',u'小测试测试',setobj.mail_receive_user,fileName)\n# print (result)","sub_path":"AutoFramework/utils/sendEmail1.py","file_name":"sendEmail1.py","file_ext":"py","file_size_in_byte":6703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"482232108","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sampling as samp\n\n# coal_mine = np.loadtxt(\"coal-mine.csv\", dtype=float)\n# plt.plot(coal_mine, 'g')\n# plt.scatter(range(1,192), coal_mine, alpha=0.3)\n# plt.title(\"trend transition of how frequently a disaster happened\")\n# plt.xlabel(\"n_th disaster\")\n# plt.ylabel(\"year\")\n# plt.show()\n\nn = 1e3\nburn_in = 1e2\nd =5\nnu = 1\nrho = 0.1\nhybrid_sample = samp.Sampling(d=d, nu=nu, rho=rho, n=n, burn_in=burn_in)\ntheta, lambdas, t = hybrid_sample.hybrid_sample()\n","sub_path":"MCMC/mix_sampler/hybrid_sampler.py","file_name":"hybrid_sampler.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"138853865","text":"#!/usr/bin/env python\n\nimport argparse\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error\n\nfrom scripts.supervised_model_utils import score_model, load_model, prepare_data_for_model, load_model_params, \\\n add_supervised_target\n\n\ndef find_production_dates(model_params):\n sup_df = prepare_data_for_model(country=model_params[\"country\"],\n mode=\"production\",\n resampling_method=model_params[\"resampling_method\"],\n variables=model_params[\"variables\"],\n hm_days=model_params[\"hm_days\"],\n functions=model_params[\"functions\"],\n day_windows=model_params[\"day_windows\"],\n verbose=0)\n\n start = sup_df.index.min()\n end = sup_df.index.max()\n\n return start, end, sup_df\n\n\ndef evaluate_production_performance(df, hm_days):\n df = add_supervised_target(df, hm_days=hm_days)\n\n df = df.iloc[:-hm_days]\n\n return df[\"target\"]\n\n\ndef plot_production_results(true, preds):\n fig, ax = plt.subplots(1, 2, figsize=(20, 6))\n ax[0].set_title(\"true vs pred, production data\")\n ax[0].plot(preds, label=\"preds\")\n ax[0].plot(true, label=\"true\")\n ax[0].set_ylabel(\"revenue\")\n ax[0].legend()\n\n ax[1].set_title(\"absolute error, production data\")\n ax[1].plot(abs(preds - true))\n ax[1].set_ylabel(\"error\")\n plt.show()\n\n\ndef compute_production_error(true, preds):\n prod_mae = mean_absolute_error(true, preds)\n prod_rmse = mean_squared_error(true, preds, squared=False)\n\n prod_error = np.mean([prod_mae, prod_rmse])\n prod_error = round(prod_error, 2)\n\n return prod_error\n\n\ndef monitor(country, hm_days):\n model, model_name = load_model(country_name=country)\n model_params = load_model_params(model_name)\n\n start, end, sup_df = find_production_dates(model_params)\n\n starting_dates = list(pd.date_range(start, end))\n\n prod_preds = score_model(starting_dates, model_name, test=False, mode=\"production\")\n\n prod_true = evaluate_production_performance(sup_df, hm_days)\n cleaned_prod_preds = pd.Series(prod_preds[:-hm_days],\n index=prod_true.index).apply(lambda x: x[0])\n\n prod_error = compute_production_error(prod_true, cleaned_prod_preds)\n\n print(f\"Error on production data: {prod_error}\")\n\n plot_production_results(prod_true, cleaned_prod_preds)\n\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(description='monitor production data')\n parser.add_argument('-c', '--country', required=True, help='name of the country or None')\n parser.add_argument('-d', '--hm_days', default=30, type=int, help='how many days in the future to predict')\n\n args = parser.parse_args()\n\n monitor(args.country, args.hm_days)\n","sub_path":"monitoring.py","file_name":"monitoring.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"136263413","text":"'''\r\nhttps://jeffknupp.com/blog/2014/06/18/improve-your-python-python-classes-and-object-oriented-programming/\r\n'''\r\nclass Node:\r\n def __init__(self,data):\r\n self.data= data;\r\n self.next= None;\r\n \r\nclass LinkedList:\r\n def __init__(self):\r\n self.head= None;\r\n \r\n def insertAtbegin(self,data):\r\n newnode= Node(data);\r\n\r\n if not self.head:\r\n self.head= newnode;\r\n return;\r\n newnode.next= self.head\r\n self.head= newnode;\r\n \r\n def insertAtend(self,data):\r\n newnode= Node(data)\r\n\r\n if not self.head:\r\n self.head= newnode;\r\n return;\r\n tmp= self.head;\r\n while tmp.next:\r\n tmp= tmp.next;\r\n tmp.next= newnode;\r\n \r\n def printList(self):\r\n tmp= self.head;\r\n\r\n while tmp:\r\n print (tmp.data),\r\n tmp= tmp.next;\r\n \r\n\r\nllist= LinkedList();\r\n\r\nllist.insertAtbegin(1);\r\nllist.insertAtbegin(11);\r\nllist.insertAtbegin(111);\r\nllist.insertAtend(2);\r\nllist.insertAtend(22);\r\nllist.insertAtend(222);\r\nllist.printList();\r\n","sub_path":"clg_programs/python/linkedList.py","file_name":"linkedList.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"374335819","text":"#!/usr/bin/python\n# Classification (U)\n\n\"\"\"Program: rabbitmqpub_setqueue.py\n\n Description: Unit test of rabbitmqpub.set_queue in rabbitmq_class.py.\n\n Usage:\n test/unit/rabbitmq_class/rabbitmqpub_setqueue.py\n\n Arguments:\n\n\"\"\"\n\n# Libraries and Global Variables\n\n# Standard\nimport sys\nimport os\n\nif sys.version_info < (2, 7):\n import unittest2 as unittest\nelse:\n import unittest\n\n# Third-party\nimport mock\n\n# Local\nsys.path.append(os.getcwd())\nimport rabbitmq_class\nimport version\n\n__version__ = version.__version__\n\n\nclass UnitTest(unittest.TestCase):\n\n \"\"\"Class: UnitTest\n\n Description: Class which is a representation of a unit testing.\n\n Methods:\n setUp\n test_setup_queue\n\n \"\"\"\n\n def setUp(self):\n\n \"\"\"Function: setUp\n\n Description: Initialization for unit testing.\n\n Arguments:\n\n \"\"\"\n\n self.name = None\n self.host = \"ServerName\"\n self.port = 5555\n self.connection = None\n self.exchange_name = \"Exchange_Name\"\n self.queue_name = \"Queue_Name\"\n self.routing_key = \"Route_Key\"\n self.auto_delete = True\n\n @mock.patch(\"rabbitmq_class.RabbitMQPub.check_confirm\")\n @mock.patch(\"rabbitmq_class.RabbitMQPub.bind_queue\")\n @mock.patch(\"rabbitmq_class.RabbitMQPub.create_queue\")\n @mock.patch(\"rabbitmq_class.RabbitMQPub.setup_exchange\")\n @mock.patch(\"rabbitmq_class.pika\")\n def test_setup_queue(self, mock_pika, mock_setup, mock_create, mock_bind,\n mock_check):\n\n \"\"\"Function: test_setup_queue\n\n Description: Test setup_queue method.\n\n Arguments:\n\n \"\"\"\n\n mock_pika.PlainCredentials.return_value = \"PlainCredentials\"\n mock_pika.ConnectionParameters.return_value = \"ConnectionParameters\"\n mock_setup.return_value = True\n mock_create.return_value = True\n mock_bind.return_value = True\n mock_check.return_value = True\n rmq = rabbitmq_class.RabbitMQPub(self.name, \"xxxxx\")\n\n self.assertFalse(rmq.setup_queue())\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/unit/rabbitmq_class/rabbitmqpub_setqueue.py","file_name":"rabbitmqpub_setqueue.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"5223071","text":"import numpy as np\nimport pandas as pd\nimport KNNLearner as knn\nimport LinRegLearner as ll\nimport BagLearner as bl\nimport datetime as dt\nimport matplotlib.pyplot as plt\nfrom sklearn import linear_model\nfrom util import get_data, plot_data\n\n# get price data: Sine, IBM\nstart_date = dt.datetime(2007,12,31)\nend_date = dt.datetime(2009,12,31)\nsymbols = ['IBM','SINE_FAST','SINE_SLOW','GOOG','AAPL']\ndates = pd.date_range(start_date, end_date)\nprices_all = get_data(symbols, dates)\n\ntag = 'IBM'\npibm = prices_all[tag]\n\n# contruct features X\ndef get_feature(pibm):\n indates = dates[1:]\n sma = pibm.rolling(window = 20, min_periods=0)\n bbup = sma.mean() + 2*sma.std() \n bblow = sma.mean() - 2*sma.std() \n bbvals = (pibm[1:] - sma.mean()[1:])/(4*sma.std()[1:])\n vtl = sma.std()[1:]/sma.mean()[1:]*8\n #mmtn1 = pibm.values[1:]/pibm.values[:-1]-1 \n #mmtn2 = pibm.values[2:]/pibm.values[:-2]-1\n #mmtn3 = pibm.values[3:]/pibm.values[:-3]-1\n #mmtn4 = pibm.values[4:]/pibm.values[:-4]-1\n mmtn5 = pibm.values[5:]/pibm.values[:-5]-1\n X = pd.DataFrame({'x0':bbvals[4:-5], 'vtl':vtl[4:-5],'x5':mmtn5[:-5]})\n return X, bbvals[4:-5]\n\n# construct Y\ndef get_Y(pibm):\n Y = pibm.values[5:] \n Y = Y[5:]/Y[:-5] - 1\n return Y\n\ndef trade_naive(pfl):\n for idx in range(pfl.shape[0]-1):\n if pfl['pred'].ix[idx] < pfl['pred'].ix[idx+1]:\n if pfl['shares'].ix[idx] <= 0:\n pfl['cash'].ix[idx:] = pfl['cash'].ix[idx] - pfl['price'].ix[idx]*(100 - pfl['shares'].ix[idx])\n pfl['shares'].ix[idx:] = 100\n if pfl['pred'].ix[idx] > pfl['pred'].ix[idx+1]:\n if pfl['shares'].ix[idx] >= 0:\n pfl['cash'].ix[idx:] = pfl['cash'].ix[idx] + pfl['price'].ix[idx]*(100 + pfl['shares'].ix[idx])\n pfl['shares'].ix[idx:] = -100\n pv = pfl['price']*pfl['shares'] + pfl['cash']\n return pv \n\ndef trade(pfl):\n if pfl['price'].ix[0] < pfl['pema40'].ix[0] and pfl['pema40'].ix[0] < pfl['pema40'].ix[5]:\n pfl['shares'][:] = 100\n pfl['cash'][:] = pfl['cash'].ix[0] - pfl['price'].ix[0]*100\n elif pfl['price'].ix[0] > pfl['pema40'].ix[0] and pfl['pema40'].ix[0] > pfl['pema40'].ix[5]:\n pfl['shares'][:] = -100\n pfl['cash'][:] = pfl['cash'].ix[0] + pfl['price'].ix[0]*100\n\n sigs = pfl['price'].values - pfl['ema40'].values\n for idx in range(1, pfl.shape[0]):\n if sigs[idx]*sigs[idx-1] < 0:\n if sigs[idx] > 0 and pfl['shares'].ix[idx] <= 0:\n pfl['cash'].ix[idx:] = pfl['cash'].ix[idx] - pfl['price'].ix[idx]*(100 - pfl['shares'].ix[idx])\n pfl['shares'].ix[idx:] = 100\n if sigs[idx] < 0 and pfl['shares'].ix[idx] >= 0:\n pfl['cash'].ix[idx:] = pfl['cash'].ix[idx] + pfl['price'].ix[idx]*(100 + pfl['shares'].ix[idx])\n pfl['shares'].ix[idx:] = -100\n return pfl\n\ndef train(X, Y):\n kl = knn.KNNLearner()\n Ypred = np.zeros(Y.size)\n Ypred[:5] = Y[:5]\n for i in range(5, X.shape[0]):\n kl.addEvidence(X.values[:i], Y[:i])\n Ypred[i] = kl.query(X.values[i])[0]\n return Ypred, kl\n\n \n \n#----------------------In-sample test-----------------------------#\nX, _ = get_feature(pibm)\nY = get_Y(pibm)\n\nYpred, kl = train(X, Y)\n# convert predicted Y back to price, in-sample backtest\nppred = pibm.values[5:-5]*(Ypred + 1)\n\npdiff = pd.DataFrame(index = pibm.index[10:], data = {'price':pibm.values[10:], 'pred':ppred})\nplot_data(pdiff)\n\nppred = pd.Series(index = pibm.index[10:], data = ppred)# convert numpy array to pandas.Series\nema40 = pibm.ewm(span = 40, min_periods=0).mean()\npema40 = pd.concat((pibm[:10],ppred)).ewm(span = 40, min_periods=0).mean()\n# initial portfolio\npfl = pd.DataFrame({'price':pibm[10:], 'ema40':ema40[10:], 'pema40':pema40[10:], 'shares':np.zeros(pibm.size-10), 'cash':np.ones(pibm.size-10)*10000})\n\n\n# trading\npfl = trade(pfl)\npv = pfl['price']*pfl['shares'] + pfl['cash']\npspy = prices_all['SPY'][pfl.index]\npfl_vs_spy = pd.DataFrame(index = pfl.index, data = {'my_portval':pv/pv.ix[0], 'SPY':pspy/pspy.ix[0]})\nplot_data(pfl_vs_spy, title = \"My_Portfolio vs SPY\", ylabel = \"Accumulative Return\")\n\n\n#------------------------Out-Sample test---------------------------# \ntsd = dt.datetime(2009,12,31)\nted = dt.datetime(2011,12,31)\nsymbols = [tag]\ndates = pd.date_range(tsd, ted)\ntprices = get_data(symbols, dates)\ntpibm = tprices[tag]\n\ntX, _ = get_feature(tpibm)\n# compare to the true price\ntYpred = kl.query(tX.values)\ntppred = tpibm.values[5:-5]*(tYpred + 1)\ntppred = pd.Series(index = tpibm.index[10:], data = tppred)# convert numpy array to pandas.Series\n#tppred = tpibm.values[5:-5]*(tX.values.dot(clf.coef_.T) + clf.intercept_ + 1)\ntema40 = tpibm.ewm(span = 40, min_periods=0).mean()\ntpema40 = pd.concat((tpibm[:10],tppred)).ewm(span = 40, min_periods=0).mean()\n# initial portfolio\ntpfl = pd.DataFrame({'price':pibm[10:], 'ema40':tema40[10:], 'pema40':tpema40[10:], 'shares':np.zeros(pibm.size-10), 'cash':np.ones(pibm.size-10)*10000})\n\ntpdiff = pd.DataFrame(index = tpibm.index[10:], data = {'price':tpibm.values[10:], 'pred':tppred})\nplot_data(tpdiff)\n\ntpfl = pd.DataFrame({'price':tpibm[10:], 'pred':tppred, 'shares':np.zeros(tppred.size), 'cash':np.ones(tppred.size)*10000})\n\ntpfl = trade(tpfl)\ntpv = tpfl['price']*tpfl['shares'] + tpfl['cash']\ntpspy = tprices['SPY'][tpfl.index]\ntpfl_vs_tspy = pd.DataFrame(index = tpfl.index, data = {'my_portval':tpv/tpv.ix[0], 'SPY':tpspy/tpspy.ix[0]})\nplot_data(tpfl_vs_tspy, title = \"My_Portfolio vs SPY\", ylabel = \"Accumulative Return\")\n\n\n# For report\n\n\n","sub_path":"p4/trade.py","file_name":"trade.py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"321545312","text":"from django.shortcuts import render, redirect, reverse\nfrom fundstrackerapp.models import FinancialGoal\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nimport datetime\n\n\n@login_required\ndef journal_entry_form(request):\n\n if request.method == 'GET':\n\n # loops through all incompleted financial goals and\n # filters out the current goals vs goals that have expired,\n # then passing all those current and incomplete financial \n # goals for the logged-in user into the form template to \n # display them in a dropdown menu to attach entry to a goal\n\n incomplete_financial_goals = FinancialGoal.objects.filter(user=request.user.id, is_completed=0)\n current_goals = []\n past_goals = []\n \n for goal in incomplete_financial_goals:\n goal_date_str = str(goal.created_at) \n exp_year = int(goal_date_str.split('-')[0])\n exp_month = int(goal_date_str.split('-')[1]) + goal.timeframe\n if exp_month > 12:\n exp_year += 1\n exp_month -= 12\n exp_day_str = goal_date_str.split('-')[2]\n exp_day = int(exp_day_str.split()[0])\n\n curr_date = str(datetime.datetime.now())\n curr_date_str = curr_date.split()[0]\n curr_year = int(curr_date_str.split('-')[0])\n curr_month = int(curr_date_str.split('-')[1])\n curr_day = int(curr_date_str.split('-')[2])\n\n if exp_year < curr_year:\n past_goals.append(goal)\n elif exp_year == curr_year and exp_month < curr_month:\n past_goals.append(goal)\n elif exp_year == curr_year and exp_month == curr_month and exp_day < curr_day:\n past_goals.append(goal)\n\n for goal in incomplete_financial_goals:\n if goal not in past_goals:\n current_goals.append(goal)\n\n\n template = 'journal/form.html'\n context = {\n 'current_goals': current_goals\n }\n\n return render(request, template, context)","sub_path":"fundstrackerproject/fundstrackerapp/views/journal/form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"537635849","text":"#!/usr/bin/python3\n\n# Builds JSON files from CSV files\n#\n# The CSV files need to be placed in a device subfolder.\n# Devices subfolder are labeled with a shorthand that is described in detail in the device.json file.\n# The device folder\n\nimport os\nimport glob\nimport csv\nimport json\n\ncsv_data = {}\n\n\ndef files_in_folder(folder):\n \"\"\"Returns a list of files in the folder and all\n its subfolders recursively. The folder can be\n written with wildcards as with the Unix find command.\n \"\"\"\n files = []\n for f in glob.glob(folder):\n if os.path.isdir(f):\n files.extend(files_in_folder(f + os.sep + \"**\"))\n else:\n files.append(f)\n return files\n\n\ndef parse_sig(sig):\n function_args = sig.split(\"/\")[0]\n size = int(sig.split(\"/\")[1])\n time_type = sig.split(\"/\")[2]\n first_prim = function_args.find(\"Prim\")\n first_rev = function_args.find(\"Rev\")\n if first_rev == -1:\n args_start = first_prim\n elif first_prim == -1:\n args_start = first_rev\n else:\n args_start = min(first_prim, first_rev)\n function = function_args[: args_start - 1]\n args = function_args[args_start:].split(\"_\")\n data_params = \"\"\n types = \"\"\n for i in range(len(args)):\n if i % 2 == 0:\n if args[i] == \"Prim\":\n data_params += \"data,\"\n else:\n data_params += \"param,\"\n else:\n new_type = args[i]\n if new_type == \"int1\":\n new_type = \"array[] int\"\n types += new_type + \",\"\n return function, types[:-1], data_params[:-1], size, time_type\n\n\ndef process_file(csv_filename):\n line_off = 0\n device_label = csv_filename.split(\"/\")[1]\n with open(csv_filename) as f:\n # google benchmark writes some non-csv data at beginning\n for line in iter(f.readline, \"\"):\n if line.startswith(\"name,iterations\"):\n f.seek(f.tell() - len(line) - line_off, os.SEEK_SET)\n break\n line_off = -1\n data = csv.reader(f)\n header_read = False\n for i in data:\n if not header_read:\n header_read = True\n continue\n function, types, data_params, s, t = parse_sig(i[0])\n if not (function in csv_data):\n csv_data[function] = {}\n function_data = csv_data[function]\n if not (types in function_data):\n function_data[types] = {}\n sig_types_data = function_data[types]\n if not (data_params in sig_types_data):\n sig_types_data[data_params] = {}\n sig_data = sig_types_data[data_params]\n if not (device_label in sig_data):\n sig_data[device_label] = {\"mean\": [[], []], \"stddev\": [[], []]}\n sig_device_data = sig_data[device_label]\n if t == \"manual_time_mean\":\n sig_device_data[\"mean\"][0].append(s)\n sig_device_data[\"mean\"][1].append(float(i[2]))\n if t == \"manual_time_stddev\":\n sig_device_data[\"stddev\"][0].append(s)\n sig_device_data[\"stddev\"][1].append(float(i[2]))\n\n\nfor f in files_in_folder(\"csv\"):\n process_file(f)\n\nfor f in csv_data:\n with open(\"json/\" + f + \".json\", \"w\") as fp:\n json.dump(csv_data[f], fp, indent=4, sort_keys=True)\n","sub_path":"scripts/csv_to_json.py","file_name":"csv_to_json.py","file_ext":"py","file_size_in_byte":3365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"540167463","text":"# %load q01_load_data_tfidf/build.py\n# Default imports\n\nimport pandas as pd\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n# Write your solution here :\n\ndef q01_load_data_tfidf(path,max_df=0.95,min_df=2,no_features=1000):\n variable1=pd.read_csv(path)\n tf_vect=TfidfVectorizer(max_df=max_df,min_df=min_df,max_features=no_features,stop_words='english')\n variable2=tf_vect.fit_transform(variable1['talkTitle'])\n variable3=tf_vect.get_feature_names()\n return variable1,variable2,variable3\n\n# def q01_load_data_tfidf(path, max_df=0.95, min_df=2, no_features=1000):\n# dataset = pd.read_csv(path)\n# tfidf_vectorizer = TfidfVectorizer(max_df=max_df, min_df=min_df, max_features=no_features, stop_words='english')\n# tfidf = tfidf_vectorizer.fit_transform(dataset['talkTitle'])\n# tfidf_feature_names = tfidf_vectorizer.get_feature_names()\n# return dataset, tfidf, tfidf_feature_names\n\n\n\n","sub_path":"q01_load_data_tfidf/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"122334517","text":"import json\nimport pprint\nfrom urllib.request import urlopen\n\napi_key = 'j2ZaiJfpax7zzbstijJJ'\nquery_string = '?auth_token=' + api_key\n\nrequest_data = urlopen('https://www.quandl.com/api/v1/datasets/WIKI/AAPL.json' + query_string)\nstock_prices = json.loads(request_data.read().decode())\n#pprint.pprint(stock_prices['data'][0])\nprint(stock_prices)\n\n\nfor stock_price in stock_prices['data']:\n #opening day value is [1]\n print(stock_price[0], stock_price[1])\n","sub_path":"examples/turtle-data.py","file_name":"turtle-data.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"548425231","text":"import os\nimport time\nimport requests\nimport sys\nfrom time import sleep\nimport json\n\nsys.stdout.write(\"\\x1b]2;Experience\\x07\")\nrefresh_time = 15 # seconds\ncolor = '0A' # like u would type \"color 0A\" into cmd / leave empty for default\n\n\nwith open('config.json') as f:\n js = json.load(f)\nsalad_auth = js['salad_key']\ncookie = {\n \"Salad.Authentication\": salad_auth\n}\nheaders = {\n \"User-Agent\": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Salad/0.4.2 Chrome/78.0.3904.130 Electron/7.1.9 Safari/537.36'\n}\nwith open('art.txt', encoding='utf-8') as f:\n art = f.read()\nos.system('color ' + color)\n\n\ndef main():\n while True:\n os.system('cls')\n rxp = requests.get(\n url='https://app-api.salad.io/api/v1/profile/xp', headers=headers, cookies=cookie)\n\n rxp = rxp.json()\n\n print(art)\n\n print('Experience: \\u001b[1m\\u001b[33m' +\n str(rxp['lifetimeXp']) + 'XP')\n print(' \\u001b[32m')\n print('-------------------------------------')\n\n try:\n print('Press ctrl+c to Return!')\n sleep(5)\n except KeyboardInterrupt:\n print(\"Quitting...\")\n os.system('python \"Start.py\"')\n\n\nmain()\n","sub_path":"Salad CLI+/Salad CLI USA/XP.py","file_name":"XP.py","file_ext":"py","file_size_in_byte":1244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"498475339","text":"# Copyright 2019 Jussi Löppönen\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import absolute_import, division, print_function\n\nimport numpy as np\nimport tensorflow as tf\n\ndef _encoder_cpu_lstm(*args, **kwargs):\n return tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell(*args, **kwargs)\n\ndef _decoder_cpu_lstm(*args, **kwargs):\n return tf.contrib.rnn.LSTMBlockCell(*args, **kwargs)\n\nclass Encoder:\n def __init__(self, embeddings_shape, num_layers, units, is_training=False):\n assert num_layers > 1, \"at least 2 layers required\"\n self.units = units\n self.num_layers = num_layers\n self.is_training = is_training\n self.cells_fw = []\n self.cells_bw = []\n with tf.device(\"/cpu:0\"):\n with tf.variable_scope(\"encoder_embedding\"):\n self.from_embeddings = tf.Variable(tf.constant(0.0, shape=embeddings_shape),\n trainable=False, name=\"from_embeddings\")\n self.from_embedding_placeholder = tf.placeholder(tf.float32, embeddings_shape,\n name=\"from_embeddings_placeholder\")\n self.from_embedding_init = self.from_embeddings.assign(self.from_embedding_placeholder)\n\n def __call__(self, X):\n inputs = self._from_embeddings_lookup(X)\n inputs = tf.transpose(inputs, [1, 0, 2])\n encoder_outputs = self._cudnnLSTM(inputs)\n return tf.transpose(encoder_outputs, [1, 0, 2])\n\n def _from_embeddings_lookup(self, X):\n with tf.device(\"/cpu:0\"):\n return tf.nn.embedding_lookup(self.from_embeddings, X)\n\n def _cudnnLSTM(self, X):\n with tf.variable_scope(\"encoder\"):\n encoder_lstm = tf.contrib.cudnn_rnn.CudnnLSTM(self.num_layers // 2, self.units, direction=\"bidirectional\")\n outputs, _ = encoder_lstm(X, training=self.is_training)\n self.cells_fw = encoder_lstm\n return outputs\n\n\nclass BaseDecoder(object):\n def __init__(self, Ty, embeddings_shape, to_start_index, end_index, vocabulary_size, num_layers, units, batch_size, is_training=False):\n self.Ty = Ty\n self.num_layers = num_layers\n self.units = units\n self.batch_size = batch_size\n self.to_start_index = to_start_index\n self.vocabulary_size = vocabulary_size\n self.end_index = end_index\n self.is_training = is_training\n with tf.device(\"/cpu:0\"):\n with tf.variable_scope(\"decoder_embedding\"):\n self.to_embeddings = tf.Variable(tf.constant(0.0, shape=embeddings_shape),\n trainable=False, name=\"to_embeddings\")\n self.to_embedding_placeholder = tf.placeholder(tf.float32, embeddings_shape,\n name=\"to_embeddings_placeholder\")\n self.to_embedding_init = self.to_embeddings.assign(self.to_embedding_placeholder)\n\n def _to_embeddings_lookup(self, Y):\n with tf.device(\"/cpu:0\"):\n return tf.nn.embedding_lookup(self.to_embeddings, Y)\n\n def _decoder_cell(self, encoder_outputs):\n attention_mechanism = tf.contrib.seq2seq.BahdanauAttention(\n num_units = self.units,\n memory = encoder_outputs)\n cells = [_decoder_cpu_lstm(self.units) for _ in range(self.num_layers)]\n ret = tf.contrib.seq2seq.AttentionWrapper(\n cell = tf.nn.rnn_cell.MultiRNNCell(cells),\n attention_mechanism = attention_mechanism,\n attention_layer_size = self.units)\n return ret, cells\n\nclass TrainingDecoder(BaseDecoder):\n def __init__(self, *args, **kwargs):\n kwargs[\"is_training\"] = True\n super().__init__(*args, **kwargs)\n\n def _training_helper(self, Y):\n # feed start index to training helper\n with tf.variable_scope(\"training_helper_inputs\"):\n starts = tf.fill([self.batch_size, 1], self.to_start_index)\n training_Y = tf.slice(Y, [0, 0], [self.batch_size, self.Ty - 1])\n training_Y = tf.concat((starts, training_Y), axis=1)\n helper_inputs = self._to_embeddings_lookup(training_Y)\n return tf.contrib.seq2seq.TrainingHelper(\n inputs = helper_inputs,\n sequence_length = tf.convert_to_tensor([self.Ty]*self.batch_size))\n\n def __call__(self, encoder_outputs, Y):\n output_layer = tf.layers.Dense(self.vocabulary_size)\n decoder_cells, cells = self._decoder_cell(encoder_outputs)\n\n training_decoder = tf.contrib.seq2seq.BasicDecoder(\n cell = decoder_cells,\n helper = self._training_helper(Y),\n initial_state = decoder_cells.zero_state(self.batch_size, tf.float32),\n output_layer = output_layer)\n output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n decoder = training_decoder,\n impute_finished = True,\n maximum_iterations = self.Ty)\n for i, lstm_cell in enumerate(cells):\n kernel, bias = lstm_cell.variables\n tf.summary.histogram(\"Decoder {} Kernel\".format(i), kernel)\n tf.summary.histogram(\"Decoder {} Bias\".format(i), bias)\n for v in output_layer.trainable_variables:\n tf.summary.histogram(\"Output {}\".format(v.name), v)\n return output.rnn_output\n\nclass BeamSearchDecoder(BaseDecoder):\n def __init__(self, beam_width, *args, **kwargs):\n self.beam_width = beam_width\n super().__init__(*args, **kwargs)\n\n def __call__(self, encoder_outputs):\n alpha = 0.7\n beta = 0.4\n output_layer = tf.layers.Dense(self.vocabulary_size)\n\n tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(\n encoder_outputs, multiplier=self.beam_width)\n\n decoder_cell, _ = self._decoder_cell(tiled_encoder_outputs)\n initial_state=decoder_cell.zero_state(self.batch_size*self.beam_width, dtype=tf.float32)\n\n decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n cell = decoder_cell,\n embedding = self._to_embeddings_lookup,\n start_tokens=tf.convert_to_tensor([self.to_start_index] * self.batch_size),\n end_token=self.end_index,\n initial_state=initial_state,\n beam_width=self.beam_width,\n output_layer=output_layer,\n length_penalty_weight=alpha,\n coverage_penalty_weight=beta\n )\n output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n decoder = decoder,\n maximum_iterations = self.Ty)\n # impute_finished = True,\n return output.predicted_ids[:,:,0]\n\nclass GreedyDecoder(BaseDecoder):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __call__(self, encoder_outputs):\n output_layer = tf.layers.Dense(self.vocabulary_size)\n decoder_cells, _ = self._decoder_cell(encoder_outputs)\n\n decoder = tf.contrib.seq2seq.GreedyEmbeddingHelper(\n embedding = self._to_embeddings_lookup,\n start_tokens=tf.convert_to_tensor([self.to_start_index]*self.batch_size),\n end_token=self.end_index\n )\n infer_decoder = tf.contrib.seq2seq.BasicDecoder(\n cell = decoder_cells,\n helper = decoder,\n initial_state = decoder_cells.zero_state(self.batch_size, tf.float32),\n output_layer = output_layer)\n output, _, _ = tf.contrib.seq2seq.dynamic_decode(\n decoder = infer_decoder,\n impute_finished = True,\n maximum_iterations = self.Ty)\n output = tf.argmax(output.rnn_output, -1, output_type=tf.int32)\n return output\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"36756631","text":"\"\"\"Pytest fixture for pywinauto app\"\"\"\n\nimport pytest\nfrom .providers import DriverFactory, ConfigProvider\nfrom . import App\n\nTEST_CONFIG = ConfigProvider().get_config('config.txt')\nAPP_PATH = TEST_CONFIG.get_param('app_path')\n\n# @pytest.yield_fixture(scope=\"function\", params=[DriverFactory.get_chrome_driver,\n# DriverFactory.get_firefox_driver])\n@pytest.yield_fixture(scope=\"function\", params=[DriverFactory.get_chrome_driver])\ndef driver(request):\n \"\"\"driver fixture\"\"\"\n driver = request.param()\n driver.get(APP_PATH)\n yield driver\n driver.close()\n\n@pytest.fixture(scope=\"function\")\ndef app(driver):\n \"\"\"driver fixture\"\"\"\n return App(driver)","sub_path":"PythonFlow_March2018/Examples/_09_Python_Acceptance_testing_Selenium_OOP_in_tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"49950942","text":"import mysql.connector\nfrom collections import deque\nimport app\nimport os\nfrom datetime import datetime\nfrom itertools import cycle\n\n\nclass Database(object):\n def __init__(self):\n self.user = \"root\"\n self.pwd = \"mysql\"\n self.db = None\n self.createConnection()\n\n def createConnection(self):\n self.db = mysql.connector.connect(\n host=self.getHost(),\n user=self.user,\n passwd=self.pwd,\n database=\"proyecto\",\n port=3306\n )\n # app.logger().info(\"Connected!\")\n\n def getHost(self):\n docker = os.environ.get('DOCKER', False)\n if docker:\n return \"db\"\n else:\n return \"localhost\"\n\n def checkConnection(self):\n try:\n self.db.ping(reconnect=True)\n except Exception as err:\n app.logger().error(\"error on ping db: reconnect failed-- {}\".format(err))\n\n def createCursor(self, cursor_type=None):\n self.checkConnection()\n return self.db.cursor(cursor_type)\n\n # Busca info de los robots\n def getAllRobots(self):\n try:\n cursor = self.createCursor()\n cursor.execute(\"select * from robot WHERE estado <> 'ELIMINADO'\")\n row_headers = [x[0] for x in cursor.description] # this will extract row headers\n rv = cursor.fetchall()\n json_data = []\n for result in rv:\n json_data.append(dict(zip(row_headers, result)))\n cursor.close()\n return json_data, True\n except Exception as err:\n app.logger().exception(\"Error en select getallrobots -- {}\".format(err))\n return None, False\n\n def getRobot(self, id_r):\n try:\n cursor = self.createCursor()\n cursor.execute(\"SELECT * FROM robot WHERE id = %s AND estado <> 'ELIMINADO'\", (id_r,))\n res = cursor.fetchone()\n cursor.close()\n if not res:\n return None, True\n res = {cursor.description[pos][0]: value for pos, value in enumerate(res)}\n return res, True\n except Exception as err:\n app.logger().exception(\"Error en select getRobot -- {}\".format(err))\n return None, False\n\n def getRobotLoc(self, id_r):\n try:\n cursor = self.createCursor()\n cursor.execute(\"SELECT loc1, loc2 FROM robot WHERE id = %s AND estado <> 'ELIMINADO'\", (id_r,))\n pos = cursor.fetchone()\n cursor.close()\n if pos:\n pos = list(pos)\n return pos, True\n else:\n return None, True\n except Exception as err:\n app.logger().exception(\"Error en select getRobotLoc -- {}\".format(err))\n return None, False\n\n def insertRobot(self, pos):\n id_r = 0\n cursor = self.createCursor()\n try:\n sql = \"INSERT INTO robot (loc1, loc2, estado) VALUES (%s, %s, UCASE(%s))\"\n val = (pos[0], pos[1], 'LIBRE',)\n cursor.execute(sql, val)\n id_r = cursor.lastrowid\n self.db.commit()\n except Exception as err:\n app.logger().exception(\"Error en INSERT insertRobot -- {}\".format(err))\n return None, False\n finally:\n cursor.close()\n return id_r, True\n\n def deleteRobot(self, id_r):\n try:\n cursor = self.createCursor()\n sql = \"UPDATE robot SET estado = 'ELIMINADO' WHERE id = %s\"\n cursor.execute(sql, (id_r,))\n self.db.commit()\n cursor.close()\n return True\n except Exception as err:\n app.logger().exception(\"Error en DELETE eliminarRobot -- {}\".format(err))\n self.db.rollback()\n return False\n\n def updateRobot(self, id_r, estado):\n try:\n cursor = self.createCursor()\n if id_r is not None:\n sql = \"UPDATE robot SET estado = %s WHERE id = %s AND estado <> 'ELIMINADO'\"\n cursor.execute(sql, (estado, id_r))\n else:\n sql = \"UPDATE robot SET estado = %s WHERE estado <> 'ELIMINADO'\"\n cursor.execute(sql, (estado, ))\n self.db.commit()\n cursor.close()\n return True\n except Exception as err:\n app.logger().exception(\"Error en UPDATE updateRobot -- {}\".format(err))\n self.db.rollback()\n return False\n\n def updatePid(self, pid, id_r):\n sql = \"UPDATE robot SET pid = %s WHERE id = %s AND estado <> 'ELIMINADO'\"\n val = (pid, id_r)\n cursor = self.createCursor()\n try:\n cursor.execute(sql, val)\n self.db.commit()\n except Exception as err:\n app.logger().exception(\"Error en INSERT insertRobot -- {}\".format(err))\n return False\n finally:\n cursor.close()\n return True\n\n def getPid(self, id_r):\n cursor = self.createCursor()\n try:\n sql = \"SELECT pid, estado FROM robot WHERE id = %s AND estado <> 'ELIMINADO'\"\n val = (str(id_r),)\n cursor.execute(sql, val)\n res = cursor.fetchone()\n cursor.close()\n return res, True\n except Exception as err:\n app.logger().exception(\"Error en select getRobotLoc -- {}\".format(err))\n return None, False\n\n def updateRobotPath(self, path, id_r):\n sql = \"UPDATE robot SET camino = %s WHERE id = %s AND estado <> 'ELIMINADO'\"\n val = (path, id_r)\n cursor = self.createCursor()\n try:\n cursor.execute(sql, val)\n self.db.commit()\n except Exception as err:\n app.logger().exception(\"Error en INSERT updateRobotPath -- {}\".format(err))\n return False\n finally:\n cursor.close()\n return True\n\n def updaterobotpos(self, pos, id_r):\n sql = \"UPDATE robot SET actual = %s WHERE id = %s AND estado <> 'ELIMINADO'\"\n val = (pos, id_r)\n try:\n cursor = self.createCursor()\n cursor.execute(sql, val)\n self.db.commit()\n cursor.close()\n except Exception as err:\n app.logger().exception(\"Error en INSERT updateRobotPath -- {}\".format(err))\n return False\n return True\n\n def towerAvailable(self, id_t):\n try:\n cursor = self.createCursor()\n sql = \"SELECT estado from torre WHERE id = %s\"\n val = (id_t,)\n cursor.execute(sql, val)\n result = cursor.fetchone()\n cursor.close()\n if result[0] == \"LIBRE\":\n return True\n else:\n return False\n except Exception as err:\n app.logger().exception(\"Error en select getRobotLoc -- {}\".format(err))\n return False\n\n def moveTowers(self, movments):\n for i in movments: # Por cada movimiento lo traduzco en la base de datos y en la matriz.\n print(\"movimiento\")\n return {\n \"Status\": 200,\n \"Message\": \"to bien\"\n }\n\n def getTowerLoc(self, id_r):\n try:\n cursor = self.createCursor()\n sql = \"SELECT T.loc1, T.loc2 FROM torre T INNER JOIN articulo a on T.id = a.id_torre INNER JOIN pedidos p on a.id = p.id_articulo WHERE p.id_robot = %s AND p.estado = 'LISTO'\"\n val = (id_r,)\n cursor.execute(sql, val)\n a = cursor.fetchall()\n if not a:\n cursor.execute(\"SELECT T.loc1, T.loc2 FROM torre T INNER JOIN pedidos p on T.id = p.id_torre WHERE p.id_robot = %s AND p.estado = 'LISTO'\", (id_r,))\n a = cursor.fetchall()\n cursor.close()\n if a:\n return list(a[0]), True\n else:\n return None, True\n except Exception as err:\n app.logger().exception(\"Error en select getTowerLoc -- {}\".format(err))\n return None, False\n\n def getAllTowers(self):\n try:\n cursor = self.createCursor()\n cursor.execute(\"select * from torre WHERE estado <> 'ELIMINADO'\")\n row_headers = [x[0] for x in cursor.description] # this will extract row headers\n rv = cursor.fetchall()\n json_data = []\n for result in rv:\n json_data.append(dict(zip(row_headers, result)))\n cursor.close()\n return json_data, True\n except Exception as err:\n app.logger().exception(\"Error en select getAllTowers -- {}\".format(err))\n return None, False\n\n def insertTowers(self, matrix):\n try:\n cursor = self.db.cursor(buffered=True)\n for fila in range(len(matrix)):\n for columna in range(len(matrix[fila])):\n if matrix[fila][columna] == 1:\n sql = \"INSERT INTO torre (loc1, loc2, estado) VALUES (%s, %s, %s)\"\n val = (fila, columna, 'LIBRE')\n cursor.execute(sql, val)\n elif matrix[fila][columna] == 2:\n sql = \"INSERT INTO plataforma_origen (loc1, loc2, estado) VALUES (%s, %s, %s)\"\n val = (fila, columna, 'LIBRE')\n cursor.execute(sql, val)\n self.db.commit()\n cursor.execute(\"SELECT id from plataforma_origen \")\n r = cursor.fetchall()\n with open('platforms.txt', 'w') as f:\n for i in r:\n f.write(f'{i[0]}\\n')\n cursor.close()\n return True\n except Exception as err:\n app.logger().exception(\"Error en INSERT insertTowers -- {}\".format(err))\n return False\n\n def getPlatfoms(self):\n try:\n cursor = self.createCursor()\n cursor.execute(\"select * from plataforma_origen WHERE estado <> 'ELIMINADO'\")\n row_headers = [x[0] for x in cursor.description] # this will extract row headers\n rv = cursor.fetchall()\n json_data = []\n for result in rv:\n json_data.append(dict(zip(row_headers, result)))\n cursor.close()\n return json_data, True\n except Exception as err:\n app.logger().exception(\"Error en select getPlatforms -- {}\".format(err))\n return None, False\n\n def getPlatformLoc(self, id_r):\n try:\n cursor = self.createCursor()\n sql = \"SELECT P.loc1, P.loc2 FROM plataforma_origen P INNER JOIN pedidos p2 on P.id = p2.id_plataforma WHERE p2.id_robot = %s AND p2.estado = 'ENPROGRESO'\"\n val = (id_r,)\n cursor.execute(sql, val)\n a = cursor.fetchall()\n cursor.close()\n if a:\n return list(a[0]), True\n else:\n return None, True\n except Exception as err:\n app.logger().exception(\"Error en SELECT getPlatformLoc -- {}\".format(err))\n return None, False\n\n def getPlatformState(self, id_r):\n try:\n cursor = self.createCursor()\n sql = \"SELECT P.estado FROM plataforma_origen P INNER JOIN pedidos p2 on P.id = p2.id_plataforma WHERE id_robot = %s AND p2.estado != 'FINALIZADO'\"\n val = (id_r,)\n cursor.execute(sql, val)\n res = cursor.fetchone()\n cursor.close()\n return res, True\n except Exception as err:\n app.logger().exception(\"Error en UPDATE getPlatformLoc -- {}\".format(err))\n return None, False\n\n def leavePlatform(self, id_plat, id_pedido):\n try:\n cursor = self.createCursor()\n sql = \"UPDATE pedidos SET estado = 'LISTO' where id = %s\"\n cursor.execute(sql, (id_pedido,))\n self.db.commit()\n sql = \"SELECT id FROM pedidos WHERE estado = 'EN TERMINAL' AND id_plataforma = %s\"\n cursor.execute(sql, (id_plat,))\n pedidos = cursor.fetchall()\n cursor.close()\n if pedidos:\n return True, [i[0] for i in pedidos]\n else:\n return True, None\n except Exception as err:\n app.logger().exception(\"Error en UPDATE getPlatformLoc -- {}\".format(err))\n return False, None\n\n def leavePlatform2(self, id_pedido, id_plat):\n try:\n cursor = self.createCursor()\n sql = \"UPDATE pedidos SET estado = 'LISTO' where id= %s AND estado = %s\"\n val = (id_pedido, 'EN TERMINAL')\n cursor.execute(sql, val)\n self.db.commit()\n # Si no hay rows afectadas por el update significa que el pedido no estaba en la plataforma porque no tenia estado EN TERMINAL\n if cursor.rowcount == 0:\n return True, None, None\n sql = \"SELECT id FROM pedidos WHERE estado = 'EN TERMINAL' AND id_plataforma = %s\"\n cursor.execute(sql, (id_plat,))\n pedidos = cursor.fetchall()\n cursor.close()\n if pedidos:\n return True, True, [i[0] for i in pedidos]\n return True, True, None\n except Exception as err:\n app.logger().exception(\"Error en UPDATE getPlatformLoc -- {}\".format(err))\n return False, False\n\n def platformAvailable(self, id_p):\n try:\n cursor = self.createCursor()\n sql = \"SELECT id, estado from plataforma_origen WHERE id = %s\"\n val = (id_p,)\n cursor.execute(sql, val)\n result = cursor.fetchone()\n if result[1] == \"LIBRE\":\n return True\n else:\n return False\n except Exception as err:\n app.logger().exception(\"Error en SELECT platformAvailable -- {}\".format(err))\n return False\n\n # Cuando el robot llega a la plataforma\n def arrivedPlatform(self, id_r):\n try:\n cursor = self.createCursor()\n sql = \"UPDATE pedidos SET estado = 'EN TERMINAL' WHERE id_robot = %s AND estado <> 'FINALIZADO' AND estado <> 'ELIMINADO'\"\n val = (id_r,)\n cursor.execute(sql, val)\n self.db.commit()\n cursor.close()\n return True\n except Exception as err:\n self.db.rollback()\n app.logger().exception(\"Error en SELECT arrivedPlatform -- {}\".format(err))\n return False\n\n # Cuando el robot se va de la plataforma\n def leavedPlatform(self, id_r):\n try:\n # Marco el pedido como actualizado\n cursor = self.createCursor()\n sql = \"UPDATE pedidos SET estado = 'FINALIZADO' WHERE id_robot = %s AND estado <> 'ELIMINADO'\"\n cursor.execute(sql, (id_r,))\n # Marco el articulo como entregado\n sql = \"UPDATE articulo a INNER JOIN pedidos p ON a.id = p.id_articulo SET a.estado = 'ENTREGADO' WHERE p.id_robot = %s AND a.estado = 'OCUPADO'\"\n cursor.execute(sql, (id_r,))\n # Marco la plataforma de origen como libre\n sql = \"UPDATE plataforma_origen INNER JOIN pedidos p on plataforma_origen.id = p.id_plataforma SET \" \\\n \"plataforma_origen.estado = 'LIBRE' WHERE p.id_robot = %s \"\n cursor.execute(sql, (id_r,))\n self.db.commit()\n cursor.close()\n return True\n except Exception as err:\n self.db.rollback()\n app.logger().exception(\"Error en UPDATE leavedPlatform -- {}\".format(err))\n return False\n\n # Cuando el robot deja la torre donde estaba\n def finishedJob(self, id_r):\n try:\n cursor = self.createCursor()\n sql = \"UPDATE robot SET estado = 'LIBRE' where id= %s AND estado <> 'ELIMINADO'\"\n val = (id_r,)\n cursor.execute(sql, val)\n sql = \"UPDATE torre INNER JOIN articulo a on torre.id = a.id_torre INNER JOIN pedidos p on a.id = \" \\\n \"p.id_articulo SET torre.estado = 'LIBRE' WHERE p.id_robot = %s AND p.estado = 'FINALIZADO' \"\n cursor.execute(sql, val)\n if cursor.rowcount == 0:\n sql = \"UPDATE torre t INNER JOIN pedidos p on t.id = p.id_torre SET t.estado = 'LIBRE' WHERE p.id_robot = %s AND p.estado = 'FINALIZADO'\"\n cursor.execute(sql, (id_r,))\n self.db.commit()\n cursor.close()\n return True\n except Exception as err:\n self.db.rollback()\n app.logger().exception(\"Error en UPDATE finishedJob -- {}\".format(err))\n return False\n\n def getPedidos(self, id=None, id_orden_compra=None, id_articulo=None, id_plataforma=None, id_robot=None,\n estado=None):\n try:\n cursor = self.createCursor()\n sql = \"SELECT p.id , p.id_orden_compra, p.id_articulo, p2.name, p.estado, p.id_robot, p.id_torre 'id_torre1', a.id_torre FROM pedidos as p LEFT JOIN \" \\\n \"articulo a on p.id_articulo = a.id LEFT JOIN producto p2 on a.id_producto = p2.id\"\n where = []\n params = {}\n if id is not None:\n where.append(\"p.id = %(id)s\")\n params['id'] = id\n if id_orden_compra is not None:\n where.append(\"id_orden_compra = %(id_orden_compra)s\")\n params['id_orden_compra'] = id_orden_compra\n if id_articulo is not None:\n where.append(\"id_articulo = %(id_articulo)s\")\n params['id_articulo'] = id_articulo\n if id_plataforma is not None:\n where.append(\"id_plataforma = %(id_plataforma)s\")\n params['id_plataforma'] = id_plataforma\n if id_robot is not None:\n where.append(\"id_robot = %(id_robot)s\")\n params['id_robot'] = id_robot\n if estado is not None:\n where.append(\"p.estado = %(estado)s\")\n params['estado'] = estado\n where.append(\"p.estado <> 'ELIMINADO'\")\n if where:\n sql = '{} WHERE {}'.format(sql, ' AND '.join(where))\n cursor.execute(sql, params)\n row_headers = [x[0] for x in cursor.description]\n rv = cursor.fetchall()\n arts = []\n for result in rv:\n arts.append(dict(zip(row_headers, result)))\n cursor.close()\n return arts, True\n except Exception as err:\n app.logger().exception(\"Error en SELECT getPedidos -- {}\".format(err))\n return None, False\n\n def insertPedido(self, codorder, codart):\n platforms_file = \"platforms.txt\"\n try:\n cursor = self.createCursor()\n # Verifico que ninguno de los articulos solicitados no haya sido ocupado\n id_list = ','.join(['%s'] * len(codart))\n cursor.execute(\"SELECT id FROM articulo WHERE id IN (%s) AND estado <> 'LIBRE'\" % id_list, tuple(codart))\n articulos_ocupados = cursor.fetchall()\n if not articulos_ocupados:\n with open(platforms_file, 'r') as f:\n platforms = deque(f.read().splitlines())\n for art in codart:\n sql = \"INSERT INTO pedidos (id_orden_compra, id_articulo, id_plataforma, estado, fecha_creacion) VALUES (%s, %s, %s, %s, %s)\"\n val = (codorder, art, platforms[0], \"ENCOLADO\", datetime.now())\n cursor.execute(sql, val)\n sql = \"UPDATE articulo SET estado = 'OCUPADO' WHERE id = %s\"\n cursor.execute(sql, (art,))\n self.db.commit()\n else:\n return articulos_ocupados, True\n cursor.close()\n except Exception as err:\n with open(platforms_file, 'w') as f: # Guardo el archivo como estaba antes porque fallo el insert\n for p in platforms:\n f.write(\"%s\\n\" % p)\n self.db.rollback()\n app.logger().exception(\"Error en INSERT insertPedidos -- {}\".format(err))\n return art, False\n platforms.rotate(-1) # Roto el archivo Round Robin y lo guardo\n with open(platforms_file, 'w') as f:\n for s in platforms:\n f.write(\"%s\\n\" % s)\n return None, True\n\n def insertPedidoTorre(self, id_plat, id_torre):\n try:\n cursor = self.createCursor()\n sql = \"INSERT INTO pedidos (id_orden_compra, id_articulo, id_plataforma, estado, id_torre, fecha_creacion) VALUES (%s, %s, %s, %s, %s, %s)\"\n val = (None, None, id_plat, \"ENCOLADO\", id_torre, datetime.now())\n cursor.execute(sql, val)\n self.db.commit()\n return True\n except Exception as err:\n self.db.rollback()\n print(\"erorr\")\n app.logger().exception(\"Error en INSERT insertPedidoTorre -- {}\".format(err))\n return False\n\n def deletePedido(self, id):\n try:\n cursor = self.createCursor()\n sql = \"SELECT * FROM pedidos WHERE id = %s AND estado <> 'ENCOLADO' AND estado <> 'FINALIZADO'\"\n cursor.execute(sql, (id,))\n if cursor.fetchall():\n return \"pend\"\n sql = \"UPDATE pedidos SET estado = 'ELIMINADO' WHERE id = %s\"\n cursor.execute(sql, (id,))\n if cursor.rowcount == 0:\n res = None\n else:\n res = True\n cursor.execute(\n \"UPDATE articulo a INNER JOIN pedidos p ON a.id = p.id_articulo SET a.estado = 'LIBRE' WHERE p.id=%s\",\n (id,))\n self.db.commit()\n cursor.close()\n return res\n except Exception as err:\n self.db.rollback()\n app.logger().exception(\"Error en DELETE deletePedido -- {}\".format(err))\n return False\n\n def getPedidoState(self, id_r):\n # Busco el estado de todos los pedidos que tiene asignado el robot. Si alguno esta en terminal signfica que no se puede ir de la plataforma todavia\n try:\n cursor = self.createCursor()\n sql = \"SELECT estado FROM pedidos WHERE id_robot = %s AND estado = 'EN TERMINAL'\"\n cursor.execute(sql, (id_r,))\n res = cursor.fetchall()\n cursor.close()\n if res:\n return False, True\n else:\n return True, True\n except Exception as err:\n app.logger().exception(\"Error en SELECT getPedidoState -- {}\".format(err))\n return None, False\n\n # def deleteProduct(self, codart):\n # try:\n # cursor = self.createCursor()\n # # sql = \"DELETE FROM producto WHERE id = %s\"\n # sql = \"UPDATE producto SET \"\n # val = (codart,)\n # cursor.execute(sql, val)\n # self.db.commit()\n # cursor.close()\n # return True\n # except Exception as err:\n # app.logger().exception(\"Error en DELETE deleteProduct -- {}\".format(err))\n # return False\n\n def insertArticlesCsv(self, csv):\n try:\n cursor = self.createCursor()\n cont = 0\n for row in csv:\n cursor.execute(\"INSERT INTO articulo (id_torre, id_producto, estado) VALUES(%s, %s, UCASE(%s))\", row)\n cont = cont + 1\n self.db.commit()\n cursor.close()\n return (None, cont), True\n except Exception as err:\n app.logger().exception(\"Error en INSERT insertArticlesCsv -- {}\".format(err))\n self.db.rollback()\n return (row, cont), False\n\n def insertArticles(self, art):\n try:\n cursor = self.createCursor()\n sql = \"INSERT INTO articulo (id_torre, id_producto, estado) VALUES (%s, %s, UCASE(%s))\"\n val = (art[\"id_torre\"], art[\"id_producto\"], art[\"estado\"])\n cursor.execute(sql, val)\n self.db.commit()\n id_a = cursor.lastrowid\n cursor.close()\n return id_a, True\n except Exception as err:\n app.logger().exception(\"Error en INSERT insertArticles -- {}\".format(err))\n self.db.rollback()\n return None, False\n\n def getArticles(self, id=None, id_torre=None, id_producto=None, estado=None):\n try:\n cursor = self.createCursor()\n sql = \"SELECT * FROM articulo\"\n where = []\n params = {}\n if id is not None:\n where.append(\"id = %(id)s\")\n params['id'] = id\n if id_torre is not None:\n where.append(\"id_torre = %(id_torre)s\")\n params['id_torre'] = id_torre\n if id_producto is not None:\n where.append(\"id_producto = %(id_producto)s\")\n params['id_producto'] = id_producto\n if estado is not None:\n where.append(\"estado = %(estado)s\")\n params['estado'] = estado\n where.append(\"estado <> 'ELIMINADO'\")\n if where:\n sql = '{} WHERE {}'.format(sql, ' AND '.join(where))\n cursor.execute(sql, params)\n row_headers = [x[0] for x in cursor.description]\n res = cursor.fetchall()\n arts = []\n for result in res:\n arts.append(dict(zip(row_headers, result)))\n cursor.close()\n return arts, True\n except Exception as err:\n app.logger().exception(\"Error en SELECT getArticles -- {}\".format(err))\n return None, False\n\n def deleteArticle(self, id):\n try:\n cursor = self.createCursor()\n sql = \"SELECT * FROM pedidos WHERE id_articulo = %s AND estado != 'ENCOLADO'\"\n cursor.execute(sql, (id,))\n pedidos = cursor.fetchall()\n if pedidos:\n return \"pending\", [i[0] for i in pedidos]\n sql = \"UPDATE articulo SET estado = 'ELIMINADO' WHERE id = %s\"\n cursor.execute(sql, (id,))\n if cursor.rowcount == 0:\n res = None\n else:\n cursor.execute(\"UPDATE pedidos SET estado = 'ELIMINADO' WHERE id_articulo = %s\", (id,))\n res = True\n self.db.commit()\n cursor.close()\n return res, None\n except Exception as err:\n self.db.rollback()\n app.logger().exception(\"Error en DELETE deleteArticle -- {}\".format(err))\n return False, None\n\n # Busca el proximo pedido y lockea los recursos que se van a utilizar. Retorna a donde hay que ir\n def getpath(self, id_r):\n self.db.autocommit = True # SE CAMBIO A COMO ESTABA ANTES, VER QUE NO HAYA ROTO NADA\n cursor = self.createCursor()\n cursor.execute(\"SELECT * FROM pedidos WHERE estado='ENCOLADO'\") # Busco el primer pedido encolado\n pedido = cursor.fetchall()\n cont = 0 # Contador para iterar por los pedidos\n # Busco los datos de la torre donde esta el articulo del primer pedido\n while cont != len(pedido): # Mientras que haya resultados para iterar\n if self.platformAvailable(pedido[cont][3]): # Si la plataforma esta libre\n if pedido[cont][2] is None: # Si el codigo articulo es Null significa que es un pedido para buscar torre\n cursor.execute(\"SELECT t.id, t.loc1, t.loc2, t.estado FROM torre as t WHERE t.id = %s\", (pedido[cont][6],))\n else:\n cursor.execute(\n \"SELECT t.id, t.loc1, t.loc2, t.estado FROM torre as t INNER JOIN articulo a on t.id = a.id_torre WHERE a.id = %s\",\n (pedido[cont][2],))\n torre = cursor.fetchone()\n if torre[3] == \"LIBRE\": # Si la torre esta libre\n try:\n sql = \"UPDATE pedidos SET id_robot = %s, estado = 'ENPROGRESO' WHERE id = %s\"\n val = (id_r, pedido[cont][0])\n sql2 = \"UPDATE torre SET estado = 'OCUPADO' WHERE id = %s\"\n val2 = (torre[0],)\n sql3 = \"UPDATE robot SET estado = 'OCUPADO' WHERE id = %s\"\n val3 = (id_r,)\n sql4 = \"UPDATE plataforma_origen SET estado = 'OCUPADO' WHERE id = %s\"\n val4 = (pedido[cont][3],)\n cursor.execute(sql, val)\n cursor.execute(sql2, val2)\n cursor.execute(sql3, val3)\n cursor.execute(sql4, val4)\n if pedido[cont][2] is not None: # Si el pedido es de traer torre no tengo que hacer nada con los articulos\n sql5 = \"UPDATE articulo SET estado = 'OCUPADO' WHERE id = %s\"\n val5 = (pedido[cont][2],)\n cursor.execute(sql5, val5)\n # Marco los demas articulos que estan en la misma torre y misma orden de compra\n sql = \"SELECT a.id FROM articulo a INNER JOIN pedidos p on a.id = p.id_articulo WHERE p.id_orden_compra = %s AND a.id_torre = %s AND a.estado <> 'ENTREGADO' AND a.estado <> 'ELIMINADO'\"\n cursor.execute(sql, (pedido[cont][1], torre[0]))\n articulos = cursor.fetchall()\n sql = \"UPDATE articulo SET estado = 'OCUPADO' WHERE id = %s\"\n cursor.executemany(sql, articulos)\n sql = \"UPDATE pedidos SET estado = 'ENPROGRESO', id_robot = {} WHERE id_articulo = %s\".format(\n id_r)\n cursor.executemany(sql, articulos)\n self.db.commit()\n except Exception as err:\n app.logger().exception(\"Error en UPDATE getPath -- {}\".format(err))\n return None, False\n finally:\n cursor.close()\n return [torre[1], torre[2]], True\n cont = cont + 1\n self.db.autocommit = False\n return None, True\n\n # User modified matrix, check new towers to insert and old ones to delete\n def updatematrix(self, matrix, deposito, col_der, col_izq, fila_arr, fila_abj):\n cursor = self.createCursor()\n inserted = [] # New towers\n deleted = [] # Deleted towers\n pinserted = [] # New platforms\n pdeleted = [] # Deleted platfoms\n # Apply new dimensions to the deposito\n if fila_abj < 0:\n # Registrar que positiciones ya no estan de las filas eliminadas\n for fila in range(len(deposito) - (-fila_abj), len(deposito)):\n for columna in range(len(deposito[fila])):\n if deposito[fila][columna] == 1:\n deleted.append([fila, columna])\n if deposito[fila][columna] == 2:\n pdeleted.append([fila, columna])\n deposito = deposito[:fila_abj] # Eliminar filas abajo del deposito\n elif fila_abj > 0:\n new = [0 for i in deposito[0]] # Creo una fila de 0 de igual longitud que las demas\n for k in range(fila_abj): # Hago append de fila de 0 por la cantidad de nuevas filas\n deposito.append(new)\n if fila_arr < 0: # Elimino las filas de arriba\n for fila in range(0, (-fila_arr)): # agrego los elementos que ya no estan en la matriz a deleted\n for columna in range(len(deposito[fila])):\n if deposito[fila][columna] == 1:\n deleted.append([fila, columna])\n if deposito[fila][columna] == 2:\n pdeleted.append([fila, columna])\n deposito = deposito[-fila_arr:]\n for t in deleted:\n t[0] = t[0] + fila_arr\n for p in pdeleted:\n p[0] = p[0] + fila_arr\n cursor.execute(\"UPDATE torre SET loc1 = loc1 + %s\", (fila_arr,)) # actualizo posiciones en la base\n cursor.execute(\"UPDATE plataforma_origen SET loc1 = loc1 + %s\", (fila_arr,))\n elif fila_arr > 0:\n new = [0 for i in deposito[0]]\n for k in range(fila_arr): # Insert al principo las nuevas filas del deposito\n deposito.insert(0, new)\n cursor.execute(\"UPDATE torre SET loc1 = loc1 + %s\", (fila_arr, ))\n cursor.execute(\"UPDATE plataforma_origen SET loc1 = loc1 + %s\", (fila_arr,))\n if col_der < 0:\n for fila in range(len(deposito)):\n for columna in range(len(deposito[fila]) - (-col_der), len(deposito[fila])):\n if deposito[fila][columna] == 1:\n deleted.append([fila, columna])\n if deposito[fila][columna] == 2:\n pdeleted.append([fila, columna])\n for j in range(len(deposito)): # Saco la cantidad de elementos en cada fila\n deposito[j] = deposito[j][:col_der]\n elif col_der > 0:\n for k in range(col_der): # Agrego la cantidad de columnas nuevas en cada fila\n for j in deposito:\n j.append(0)\n if col_izq < 0:\n for fila in range(len(deposito)):\n for columna in range(0, (-col_izq)):\n if deposito[fila][columna] == 1:\n deleted.append([fila, columna])\n if deposito[fila][columna] == 2:\n pdeleted.append([fila, columna])\n for j in range(len(deposito)): # Saco los elementos del principio de cada fila\n deposito[j] = deposito[j][-col_izq:]\n cursor.execute(\"UPDATE torre SET loc2 = loc2 + %s WHERE estado <> 'ELIMINADO'\", (col_izq, ))\n cursor.execute(\"UPDATE plataforma_origen SET loc2 = loc2 + %s WHERE estado <> 'ELIMINADO'\", (col_izq, ))\n elif col_izq > 0:\n for k in range(col_izq): # Agrego elementos nuevos en cada fila\n for j in deposito:\n j.insert(0, 0)\n cursor.execute(\"UPDATE torre SET loc2 = loc2 + %s WHERE estado <> 'ELIMINADO'\", (col_izq, ))\n cursor.execute(\"UPDATE plataforma_origen SET loc2 = loc2 + %s WHERE estado <> 'ELIMINADO'\", (col_izq, ))\n # Actualizo las posiciones de lo que ya marque para borrar\n for t in deleted:\n #t[0] = t[0] + fila_arr\n t[1] = t[1] + col_izq\n for p in pdeleted:\n #p[0] = p[0] + fila_arr\n p[1] = p[1] + col_izq\n for fila in range(len(matrix)): # Iterate new matriz to check changes\n for columna in range(len(matrix[fila])):\n # Cuando el nuevo deposito es mas grande\n if matrix[fila][columna] == 1 and deposito[fila][columna] != 1: # New Tower\n inserted.append([fila, columna]) # Insert on inserted array\n if deposito[fila][columna] == 1 and matrix[fila][columna] != 1: # Tower deleted from original\n deleted.append([fila, columna])\n if matrix[fila][columna] == 2 and deposito[fila][columna] != 2: # New Platform\n pinserted.append([fila, columna]) # Insert on inserted array\n if deposito[fila][columna] == 2 and matrix[fila][columna] != 2: # Platform deleted from original\n pdeleted.append([fila, columna])\n try:\n if inserted: # If inserted has new positions\n for i in inserted:\n query = \"INSERT INTO torre (loc1, loc2, estado) VALUES (%s, %s, %s)\"\n val = (i[0], i[1], 'LIBRE')\n cursor.execute(query, val)\n if deleted:\n for i in deleted:\n query = \"SELECT id FROM torre WHERE loc1 = %s AND loc2 = %s AND estado <> 'ELIMINADO'\"\n cursor.execute(query, (i[0], i[1]))\n ok = self.deleteArticlebyTower(cursor, cursor.fetchone()[0])\n # Si hay algun pedido pendiente hago rollback, si hubo error hago rollback\n if not ok or ok == \"pending\":\n self.db.rollback()\n return ok\n query = \"UPDATE torre SET estado = 'ELIMINADO' WHERE loc1 = %s AND loc2 = %s AND estado <> 'ELIMINADO'\"\n cursor.execute(query, (i[0], i[1]))\n if pinserted: # If pinserted has new positions\n for i in pinserted:\n query = \"INSERT INTO plataforma_origen (loc1, loc2, estado) VALUES (%s, %s, %s)\"\n val = (i[0], i[1], 'LIBRE')\n cursor.execute(query, val)\n r = None\n if pdeleted:\n # Primero marco todas las platafomras como eliminadas\n for i in pdeleted:\n query = \"UPDATE plataforma_origen SET estado = 'ELIMINADO' WHERE loc1 = %s AND loc2 = %s AND estado = 'LIBRE'\"\n cursor.execute(query, (i[0], i[1]))\n # Si hay algun pedido en curso hago rollback, si hubo error hago rollback\n if cursor.rowcount == 0:\n self.db.rollback()\n return \"pending\"\n # Selecciono todas las plataformas disponibles\n cursor.execute(\"SELECT id FROM plataforma_origen WHERE estado != 'ELIMINADO'\")\n r = cursor.fetchall()\n for i in pdeleted:\n # Selecciono todos los pedidos que estaban encolados a esa plataforma\n query = \"SELECT p.id_orden_compra FROM pedidos p INNER JOIN plataforma_origen po on p.id_plataforma = po.id WHERE po.loc1 = %s AND po.loc2 = %s AND p.estado = 'ENCOLADO'\"\n cursor.execute(query, (i[0], i[1]))\n pedidos = cursor.fetchall()\n # Armo una lista circular para ir iterando por todas las plataformas\n plataformas = cycle(r)\n for j in pedidos:\n sql = \"UPDATE pedidos SET id_plataforma = %s WHERE id_orden_compra = %s\"\n cursor.execute(sql, (next(plataformas)[0], j[0]))\n self.db.commit()\n if r is None:\n cursor.execute(\"SELECT id from plataforma_origen WHERE estado != 'ELIMINADO'\")\n r = cursor.fetchall()\n with open('platforms.txt', 'w') as f:\n for i in r:\n f.write(f'{i[0]}\\n')\n except Exception as err:\n app.logger().exception(\"Error en UPDATE updateMatrix -- {}\".format(err))\n self.db.rollback()\n return False\n finally:\n cursor.close()\n return True\n # TODO todavia esta guardando la matriz nueva cuando hay pedidos encolados\n\n def deleteArticlebyTower(self, cursor, id_torre):\n try:\n # Busco todos los articulos que estan en esa torre\n sql = \"SELECT id FROM articulo WHERE id_torre = %s AND estado != 'ELIMINADO'\"\n cursor.execute(sql, (id_torre,))\n articulos = cursor.fetchall() # Lista de tuplas con solo el id\n # Por cada articulo si hay un pedido ejecutandose con ese articulo rollback all\n for articulo in articulos:\n sql = \"SELECT * FROM pedidos WHERE id_articulo = %s AND estado != 'ENCOLADO' AND estado != 'FINALIZADO' AND estado != 'ELIMINADO'\"\n cursor.execute(sql, (articulo[0],))\n pedidos = cursor.fetchall()\n if pedidos:\n return \"pending\"\n # Elimino logicamente el articulo\n sql = \"UPDATE articulo SET estado = 'ELIMINADO' WHERE id = %s AND estado <> 'ENTREGADO'\"\n cursor.execute(sql, (articulo[0],))\n # Elimino logicamente los pedidos asociados a ese articulo\n sql = \"UPDATE pedidos SET estado = 'ELIMINADO' WHERE id_articulo = %s AND estado <> 'FINALIZADO'\"\n cursor.execute(sql, (articulo[0],))\n return True\n except Exception as err:\n app.logger().exception(\"Error en DELETE deleteArticlebyTower -- {}\".format(err))\n return False\n\n def pedidosExecuting(self):\n try:\n sql = \"SELECT id FROM pedidos WHERE estado <> 'ENCOLADO' AND estado <> 'FINALIZADO' AND estado <> 'ELIMINADO'\"\n cursor = self.createCursor()\n cursor.execute(sql)\n res = cursor.fetchall()\n cursor.close()\n if not res:\n return False\n else:\n return True\n except Exception as err:\n app.logger().exception(\"Error en UPDATE updateMatrix -- {}\".format(err))\n return False\n","sub_path":"app/services/db/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":41114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"106786342","text":"from datetime import datetime\nimport pandas as pd\n\n\ndef convert_date_to_formatted_str(date, frequency):\n \"\"\"\n Converts datetime to a string basing on the frequency passed.\n \"\"\"\n if frequency == 'monthly':\n return datetime.strftime(date, '%B %Y')\n elif frequency == 'daily':\n return datetime.strftime(date, '%Y-%m-%d')\n else:\n return datetime.strftime(date, '%Y-%m-%d %H:%M')\n\n\ndef date_to_formatted_str(date):\n \"\"\"\n Converts datetime to a string\n \"\"\"\n return datetime.strftime(date, '%Y-%m-%d %H:%M')\n\n\ndef str_to_date(st):\n \"\"\"\n Converts a string to datetime\n \"\"\"\n return datetime.strptime(st, '%Y-%m-%dT%H:%M:%S.%fZ')\n\n\ndef date_to_str(date):\n \"\"\"\n Converts datetime to a string\n \"\"\"\n return datetime.strftime(date, '%Y-%m-%dT%H:%M:%S.%fZ')\n\n\ndef date_to_str2(date):\n \"\"\"\n Converts datetime to a string\n \"\"\"\n return datetime.strftime(date, '%Y-%m-%dT%H:%M:%SZ')\n\n\ndef str_to_date_find(st):\n \"\"\"\n Converts a string of different format to datetime\n \"\"\"\n return datetime.strptime(st, '%Y-%m-%dT%H:%M:%SZ')\n\n\ndef generate_datetime(date, time):\n if date is None or date == \"\":\n return None\n else:\n if time is None or time == \"\":\n time = \"00:00\"\n date_time = date+\"T\"+time+\":00Z\"\n return date_time\n\n\ndef set_pm25_category_background(pm25_conc_value):\n category_color = \"\"\n if pm25_conc_value > 0.0 and pm25_conc_value <= 12.0:\n category_color = '#45e50d'\n elif pm25_conc_value > 12.0 and pm25_conc_value <= 35.4:\n category_color = '#f8fe28'\n elif pm25_conc_value > 35.4 and pm25_conc_value <= 55.4:\n category_color = '#ee8310'\n elif pm25_conc_value > 55.4 and pm25_conc_value <= 150.4:\n category_color = '#fe0000'\n elif pm25_conc_value > 150.4 and pm25_conc_value <= 250.4:\n category_color = '#8639c0'\n elif pm25_conc_value > 250.4 and pm25_conc_value <= 500.4:\n category_color = '#81202e'\n else:\n category_color = '#808080'\n\n return category_color\n\n\ndef assign_color_to_pollutant_category(pollutant_category):\n category_color = \"\"\n if pollutant_category == 'Good':\n category_color = '#45e50d'\n elif pollutant_category == 'Moderate':\n category_color = '#f8fe28'\n elif pollutant_category == 'UH4SG':\n category_color = '#ee8310'\n elif pollutant_category == 'Unhealthy':\n category_color = '#fe0000'\n elif pollutant_category == 'Very Unhealthy':\n category_color = '#8639c0'\n elif pollutant_category == 'Hazardous':\n category_color = '#81202e'\n else:\n category_color = '#808080'\n\n return category_color\n\n\ndef flattencolumns(df1, cols):\n df = pd.concat([pd.DataFrame(df1[x].values.tolist()).add_prefix(x)\n for x in cols], axis=1)\n return pd.concat([df, df1.drop(cols, axis=1)], axis=1)\n","sub_path":"pipeline/cloud-functions/python/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"131519050","text":"import re\nimport sublime, sublime_plugin\nfrom .coqtop import Coqtop\n\n# Coqtop manager is managing the Coqtop instance and the views in a singleton way\n\nclass CoqtopManager:\n def __init__(self):\n self.coqtop = None\n self.reset()\n\n def start(self):\n settings = sublime.load_settings('Sublime-Coq.sublime-settings')\n path = settings.get(\"coqtop_path\")\n self.coqtop = Coqtop(self, path)\n\n def send(self, statement):\n self.output_view.run_command('coqtop_clear')\n self.coqtop.send(statement)\n\n def receive(self, output, prompt):\n self.output_view.run_command('coqtop_output', {'output': output})\n\n if prompt != 'Coq < ':\n self.focused_proof_mode = True\n else:\n self.focused_proof_mode = False\n\n if self.expects_result:\n if not re.search(r'(^Error:|^Syntax [eE]rror:)', output, re.M):\n self.file_view.run_command('coqtop_success')\n self.expects_result = False\n\n def reset(self):\n if self.coqtop is not None:\n self.coqtop.kill()\n self.coqtop = None\n self.output_view = None\n self.file_view = None\n self.focused_proof_mode = False\n self.current_position = 0\n self.current_comment_number = 0\n self.current_statement_number = 0\n self.current_proof_number = 0\n self.proof_mode = False\n self.expects_result = False\n\nmanager = CoqtopManager()\n\n# Commands exposed to user\n\nclass CoqtopClearCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n entire_region = sublime.Region(0, self.view.size())\n self.view.set_read_only(False)\n self.view.erase(edit, entire_region)\n self.view.set_read_only(True)\n\nclass CoqtopSuccessCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n coqfile_view = manager.file_view\n\n r = coqfile_view.find(r'(.|\\n)*?[^.]\\.( |\\n|$)', manager.current_position)\n text = coqfile_view.substr(r).strip()\n\n if 'keyword.other.coq' in coqfile_view.scope_name(manager.current_position).split(' '):\n if text == 'Proof.':\n manager.proof_mode = True\n\n if manager.proof_mode:\n if text == 'Qed.' or text == 'Admitted.' or text == 'Save.' or text == 'Defined.':\n manager.proof_mode = False\n name = 'proof: ' + repr(manager.current_proof_number)\n manager.current_proof_number = manager.current_proof_number + 1\n else:\n name = 'statement: ' + repr(manager.current_statement_number)\n manager.current_statement_number = manager.current_statement_number + 1\n\n coqfile_view.show(r)\n manager.current_position = r.end()\n coqfile_view.add_regions(name, [r], 'meta.coq.proven')\n\nclass CoqNextStatementCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n coqfile_view = manager.file_view\n\n manager.current_position = coqfile_view.find(r'\\s*', manager.current_position).end()\n\n indicator = coqfile_view.substr(manager.current_position) + coqfile_view.substr(manager.current_position + 1)\n\n if indicator == '(*':\n r = coqfile_view.find(r'\\(\\*(.|\\n)*?\\*\\)', manager.current_position)\n name = 'comment: ' + repr(manager.current_comment_number)\n manager.current_comment_number = manager.current_comment_number + 1\n\n coqfile_view.show(r)\n manager.current_position = r.end()\n coqfile_view.add_regions(name, [r], 'meta.coq.proven')\n else:\n manager.expects_result = True\n\n r = coqfile_view.find(r'(.|\\n)*?[^.]\\.(?!\\.)', manager.current_position)\n print(\"sending \" + coqfile_view.substr(r))\n manager.send(coqfile_view.substr(r))\n\nclass CoqUndoStatementCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n coqfile_view = manager.file_view\n\n if manager.proof_mode:\n previous_proof_number = manager.current_proof_number - 1\n previous_region = coqfile_view.get_regions('proof: ' + repr(previous_proof_number))[0]\n if coqfile_view.substr(previous_region) == 'Proof.':\n manager.proof_mode = False\n else:\n manager.send('Undo.')\n manager.current_proof_number = previous_proof_number\n coqfile_view.erase_regions('proof: ' + repr(previous_proof_number))\n else:\n no_comment = False\n no_statement = False\n try:\n previous_comment_number = manager.current_comment_number - 1\n previous_comment_region = coqfile_view.get_regions('comment: ' + repr(previous_comment_number))[0]\n except IndexError:\n no_comment = True\n try:\n previous_statement_number = manager.current_statement_number - 1\n previous_statement_region = coqfile_view.get_regions('statement: ' + repr(previous_statement_number))[0]\n except IndexError:\n no_statement = True\n if no_statement or (not no_comment and previous_comment_region.begin() > previous_statement_region.begin()):\n previous_region = previous_comment_region\n coqfile_view.erase_regions('comment: ' + repr(previous_comment_number))\n manager.current_comment_number = previous_comment_number\n else:\n previous_region = previous_statement_region\n name = coqfile_view.substr(coqfile_view.word(coqfile_view.word(previous_region.begin()).end() + 1))\n if manager.focused_proof_mode:\n manager.send('Abort.')\n else:\n manager.send('Reset ' + name + '.')\n while True:\n if manager.current_proof_number == 0:\n break\n else:\n previous_proof_number = manager.current_proof_number - 1\n previous_proof_region = coqfile_view.get_regions('proof: ' + repr(previous_proof_number))[0]\n if previous_proof_region.begin() < previous_region.begin():\n break\n else:\n coqfile_view.erase_regions('proof: ' + repr(previous_proof_number))\n manager.current_proof_number = previous_proof_number\n manager.current_statement_number = previous_statement_number\n coqfile_view.erase_regions('statement: ' + repr(previous_statement_number))\n manager.current_position = previous_region.begin()\n\n\nclass CoqStopCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n coqfile_view = manager.file_view\n manager.output_view.window().run_command('close')\n coqfile_view.settings().set('coqtop_running', False)\n for number in range(0, manager.current_comment_number):\n coqfile_view.erase_regions('comment: ' + repr(number))\n for number in range(0, manager.current_statement_number):\n coqfile_view.erase_regions('statement: ' + repr(number))\n for number in range(0, manager.current_proof_number):\n coqfile_view.erase_regions('proof: ' + repr(number))\n\n manager.reset()\n\n# Internal commands\n\nclass CoqtopOutputCommand(sublime_plugin.TextCommand):\n def run(self, edit, output):\n entire_region = sublime.Region(0, self.view.size())\n self.view.set_read_only(False)\n self.view.erase(edit, entire_region)\n self.view.insert(edit, 0, output)\n self.view.set_read_only(True)\n\nclass RunCoqCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n coq_syntax = self.view.settings().get('syntax')\n window = self.view.window()\n editor_group = window.active_group()\n self.view.settings().set('coqtop_running', True)\n\n manager.current_position = 0\n manager.current_comment_number = 0\n manager.current_statement_number = 0\n manager.current_proof_number = 0\n manager.proof_mode = False\n\n window.run_command('new_pane', {\"move\": False})\n window.focus_group(editor_group)\n coq_group = window.num_groups() - 1\n coqtop_view = window.active_view_in_group(coq_group)\n coqtop_view.set_syntax_file(coq_syntax)\n coqtop_view.set_name('*COQTOP*')\n coqtop_view.set_read_only(True)\n coqtop_view.set_scratch(True)\n coqtop_view.settings().set('coqtop_running', True)\n\n manager.file_view = self.view\n manager.output_view = coqtop_view\n\n manager.start()\n\n\nclass CoqContext(sublime_plugin.EventListener):\n def on_query_context(self, view, key, operator, operand, match_all):\n if key == 'running_coqtop':\n running = view.settings().get('coqtop_running')\n if running is None:\n return None\n if operator == sublime.OP_EQUAL:\n return running\n elif operator == sublime.OP_NOT_EQUAL:\n return not running\n else:\n return False\n return None\n\n def on_selection_modified(self, view):\n if view.settings().get('coqtop_running') == True:\n regions = []\n for number in range(0, manager.current_statement_number):\n regions += view.get_regions('statement: ' + repr(number))\n for number in range(0, manager.current_proof_number):\n regions += view.get_regions('proof: ' + repr(number))\n\n selection = view.sel()\n\n view.set_read_only(False)\n for region in regions:\n for selected in selection:\n if region.intersects(selected):\n view.set_read_only(True)\n break\n","sub_path":"sublimecoq.py","file_name":"sublimecoq.py","file_ext":"py","file_size_in_byte":9861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"190229063","text":"\"\"\"empty message\n\nRevision ID: 70fc4a497980\nRevises: 2591f538e592\nCreate Date: 2019-01-15 19:09:27.361461\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '70fc4a497980'\ndown_revision = '2591f538e592'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index('ix_business_connection_dm_connection_id', table_name='business_connection')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_index('ix_business_connection_dm_connection_id', 'business_connection', ['dm_connection_id'], unique=True)\n # ### end Alembic commands ###\n","sub_path":"backend-master/src/business/main/migrations/versions/70fc4a497980_.py","file_name":"70fc4a497980_.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"240588071","text":"# proj3_william_verthein.py\n#\n# This program takes two 14-base integers and adds them together.\n# It then outputs the sum of those two integers.\n#\nnmbr_b14 = ['0', '1', '2', '3', '4', '5', '6',\n '7', '8', '9', 'A', 'J', 'Q', 'K']\n\ndef main():\n '''Program works in a few steps. Takes an input of 14-base numbers.\n It then creates a list of each user input value, and converts to 10-base.\n The sum is calculated in 10-base, and then converted back to 14-base.\n Finally, the program finalizes and outputs the sum.'''\n\n print('This program gives the sum of two 14-base integers.\\n')\n nmbr1, nmbr2 = get_input()\n lst_nmbr1, lst_nmbr2 = convert_alphas(nmbr1, nmbr2)\n tot1, tot2 = convert_to_10b(lst_nmbr1, lst_nmbr2)\n total_10b = summation(tot1, tot2)\n bkwrds_answer = convert_to_14b(total_10b)\n final_answer = answer(bkwrds_answer)\n print()\n print(final_answer)\n\n# Get user input\ndef get_input():\n nmbr1 = input('Enter the first number: ')\n nmbr2 = input('Enter the second number: ')\n return nmbr1, nmbr2\n\n# Convert any alpha integers into their numerical value(s)\ndef convert_alphas(nmbr1, nmbr2):\n lst_nmbr1 = []\n lst_nmbr2 = []\n nmbr1 = str(nmbr1)\n nmbr2 = str(nmbr2)\n for ch in nmbr1:\n lst_nmbr1 += ch\n for ch in nmbr2:\n lst_nmbr2 += ch\n for i, v in enumerate(lst_nmbr1):\n if v == 'A':\n lst_nmbr1.remove('A')\n lst_nmbr1.insert(i, '10')\n elif v == 'J':\n lst_nmbr1.remove('J')\n lst_nmbr1.insert(i, '11')\n elif v == 'Q':\n lst_nmbr1.remove('Q')\n lst_nmbr1.insert(i, '12')\n elif v == 'K':\n lst_nmbr1.remove('K')\n lst_nmbr1.insert(i, '13')\n for i, v in enumerate(lst_nmbr2):\n if v == 'A':\n lst_nmbr2.remove('A')\n lst_nmbr2.insert(i, '10')\n elif v == 'J':\n lst_nmbr2.remove('J')\n lst_nmbr2.insert(i, '11')\n elif v == 'Q':\n lst_nmbr2.remove('Q')\n lst_nmbr2.insert(i, '12')\n elif v == 'K':\n lst_nmbr2.remove('K')\n lst_nmbr2.insert(i, '13')\n return lst_nmbr1, lst_nmbr2\n\n# Convert the input 14-base numbers and convert to 10-base\ndef convert_to_10b(lst_nmbr1, lst_nmbr2):\n n1 = len(lst_nmbr1)-1\n n2 = len(lst_nmbr2)-1\n tot1 = 0\n tot2 = 0\n while n1 >= 0:\n for i in lst_nmbr1:\n i = int(i)\n tot1 += i * 14**n1\n n1 -= 1\n while n2 >= 0:\n for i2 in lst_nmbr2:\n i2 = int(i2)\n tot2 += i2 * 14**n2\n n2 -= 1\n return tot1, tot2\n\n# Get the sum of the two now 10-base numbers\ndef summation(tot1, tot2):\n total_10b = tot1 + tot2\n return total_10b\n\n# Convert the 10-base sum back to 14-base\ndef convert_to_14b(total_10b):\n bkwrds_answer = \"\"\n quotient = total_10b\n if quotient == 0:\n bkwrds_answer += '0'\n else:\n while quotient > 0:\n remainder = quotient % 14\n bkwrds_answer += nmbr_b14[remainder]\n quotient //= 14\n return bkwrds_answer\n\n# Finalize formatting of the final answer by reversing the string\ndef answer(bkwrds_answer):\n final_answer = bkwrds_answer[::-1]\n return final_answer\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"proj3_william_verthein.py","file_name":"proj3_william_verthein.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"485033635","text":"#!https://realpython.com/introduction-to-mongodb-and-python/\nimport pymongo\nclient = pymongo.MongoClient(\"mongodb://localhost:27017/\")\n\n\ndb = client['pymongo_test']\nposts = db.posts\n\nname= raw_input(\"What is your name?\")\nfather_name= raw_input(\"What is your father name?\")\n\npost_data = {'Name': name, 'father_name': father_name}\n\n\nresult = posts.insert_one(post_data)\n\n\nimport pymongo\n\nmyclient = pymongo.MongoClient()\ndb = myclient[\"pymongo_test\"]\nposts = db.posts\n\nfor x in posts.find():\n print(x) \n","sub_path":"pymongo.py","file_name":"pymongo.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"564891201","text":"# -*- coding: utf-8 -*-\n# for mac\nimport matplotlib\nmatplotlib.use('TkAgg')\n\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score\nfrom sklearn.metrics import auc\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.decomposition import PCA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.preprocessing import StandardScaler\nfrom ml_algorithms.ml_algorithm_interface import AlgorithmInterface\n\n\nclass LogisticRegressionAlgorithm(AlgorithmInterface):\n def __init__(self):\n super(LogisticRegressionAlgorithm, self).__init__()\n\n def feature_engineering(self):\n self.convert_symbolic_feature_into_continuous()\n\n def train_phase(self):\n pipe_logistic_regression = Pipeline([('sc', StandardScaler()),\n ('pca', PCA(n_components=2)),\n ('clf', LogisticRegression(random_state=1))\n ])\n\n # param_range = [10 ** c for c in range(-4, 4)]\n param_range = [0.0001, 0.001]\n hyper_parameter_grid = {\n 'clf__C': param_range,\n 'clf__gamma': param_range,\n 'clf__kernel': ['linear', 'rbf']\n }\n\n # Set up the random search with 4-fold cross validation\n self.classifier = RandomizedSearchCV(estimator=pipe_logistic_regression,\n param_distributions=hyper_parameter_grid,\n cv=4, n_iter=5,\n scoring='roc_auc',\n n_jobs=-1, verbose=2,\n return_train_score=True,\n random_state=42)\n\n # Fit on the training data\n self.classifier.fit(self.train_data, self.train_label)\n print(\"123\")\n\n def test_phase(self):\n y_predict = self.classifier.predict(self.test_data)\n print(\"accuracy: %f\" % accuracy_score(self.test_label, y_predict))\n print(\"precision: %f\" % precision_score(self.test_label, y_predict, average=\"macro\"))\n print(\"recall: %f\" % recall_score(self.test_label, y_predict, average=\"macro\"))\n\n fpr, tpr, thresholds = metrics.roc_curve(y_predict, self.test_label)\n plt.plot(fpr, tpr, marker='o')\n plt.show()\n auc_score = auc(fpr, tpr)\n print(\"AUC: %f\" % auc_score)\n","sub_path":"ml_algorithms/logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"301639469","text":"\"\"\"\nThe dreammarket sink module provides the ability to scrape raw data (HTML) from\nthe onion site that is hosting it, then (if specified) save it to disk, send it\nthrough an ingestionion point, and save it in a datastore.\n\"\"\"\nimport os\nimport logging\n\nfrom pyvirtualdisplay import Display\n\nfrom dminer.ingestion.dreammarket import DreammarketParser\nfrom dminer.stores.interfaces import ElasticsearchInterface, STDOutInterface\nfrom dreammarket import *\n\nlogger = logging.getLogger(__name__)\n\n\ndef prepare_cli(parser):\n \"\"\"\n Prepares the CLI subgroup parser by adding arguments specific to the\n dreammarket sink. It also sets the entry point for the CLI to use when\n specifying this subgroup.\n \"\"\"\n # Sink related arguments\n parser.add_argument(\n \"-u\", \"--dreammarket-username\",\n default=os.environ.get(\"DMINER_SINK_DREAMMARKET_USERNAME\", None),\n help=\"\"\"\n Specifies the username to use for the login form on Dream Market. It is\n also able to be specified as an environment variable: DMINER_SINK_DREAMMARKET_USERNAME.\n This is required for this sink module.\n \"\"\"\n )\n parser.add_argument(\n \"-p\", \"--dreammarket-password\",\n default=os.environ.get(\"DMINER_SINK_DREAMMARKET_PASSWORD\", None),\n help=\"\"\"\n Specifies the password to use for the login form on Dream Market. It is\n also able to be specified as an environment variable: DMINER_SINK_DREAMMARKET_PASSWORD.\n This is a required for this sink module.\n \"\"\"\n )\n parser.add_argument(\n \"-k\", \"--dbc-access-key\",\n default=os.environ.get(\"DMINER_DBC_ACCESS_KEY\", None),\n help=\"\"\"\n Specifies the access key to use for deathbycaptcha. It is also able to\n be specified as an environment variable: DMINER_DBC_ACCESS_KEY.\n This is required for this sink module.\n \"\"\"\n )\n parser.add_argument(\n \"-s\", \"--dbc-secret-key\",\n default=os.environ.get(\"DMINER_DBC_SECRET_KEY\", None),\n help=\"\"\"\n Specifies the secret key to use for deathbycaptcha. It is also able to\n be specified as an environment variable: DMINER_DBC_SECRET_KEY.\n This is required for this sink module.\n \"\"\"\n )\n parser.add_argument(\n \"--onion-url\",\n default=os.environ.get(\n \"DMINER_SINK_DREAMMARKET_ONION_URL\", \"http://lchudifyeqm4ldjj.onion\"\n ),\n help=\"\"\"\n Specifies the onion URL to use for this marketplace. It is also able to\n be specified as an environment variable: DMINER_SINK_DREAMMARKET_ONION_URL.\n This is required for this sink module. The default is: %(default)s.\n \"\"\"\n )\n\n url_category_exclusive_group = parser.add_mutually_exclusive_group()\n url_category_exclusive_group.add_argument(\n \"--url-file\",\n default=None,\n help=\"\"\"\n Specifies the file to use for defining URLs to be consumed by the\n scraper. If specified, only the URL's in the file will be scraped, and\n the sink will exit after all URL's from the file have been exhausted.\n \"\"\"\n )\n url_category_exclusive_group.add_argument(\n \"--category\",\n default=\"digital_goods.hacking\",\n help=\"\"\"\n Specifies the category to pull URLS from for consumption by the\n scraper. If specified, URL's will be pulled dynamically, and the\n category specified will be used to look up where to pull the URLs.\n The default is '%(default)s'.\n \"\"\"\n )\n\n parser.add_argument(\n \"--daemonize\",\n action=\"store_true\",\n help=\"\"\"\n If specified, the scraper will be put into a daemon mode, which will\n repeatedly run, refreshing URLS to scrape based on the CLI options\n provided (either --category or --url-file).\n \"\"\"\n )\n parser.add_argument(\n \"--request-interval\",\n default=15, type=int,\n help=\"\"\"\n The request interval is the maximum amount of time to wait in between\n requests for each page being scraped. The actual amount of time in\n between requests is random, ranging between 0 and the interval\n specified. The default is %(default)i seconds.\n \"\"\"\n )\n parser.add_argument(\n \"--request-retries\",\n default=5, type=int,\n help=\"\"\"\n The request retry metric is used to determine how many attempts should\n be made to scrape a particular page before skipping the URL. The\n default is %(default)i seconds.\n \"\"\"\n )\n parser.add_argument(\n \"--request-timeout\",\n default=5, type=int,\n help=\"\"\"\n The request timeout metric is used to determine how long a request\n should persist without a response. The default is %(default)i seconds.\n \"\"\"\n )\n parser.add_argument(\n \"--save-to-directory\",\n default=None,\n help=\"\"\"\n If specified, the sink will attempt to save all scraped HTML files to\n the specified directory.\n \"\"\"\n )\n parser.add_argument(\n \"-v\", \"--verbosity\",\n default=\"info\",\n choices=[\"debug\", \"info\", \"warn\", \"error\"],\n help=\"\"\"\n Controls the verbosity of the ingestion point. Default is %(default)s.\n \"\"\"\n )\n\n # Flag to also perform ingestion\n parser.add_argument(\n \"--ingest\",\n action=\"store_true\",\n help=\"\"\"\n If specified, the sink will pass all scraped HTML files to the Alpha\n Bay ingestion point, with the ingestion point being configured to use\n the specified datstore interface.\n \"\"\"\n )\n\n # Datastore related arguments\n parser.add_argument(\n \"--datastore\",\n default=\"stdout\",\n choices=[\"stdout\", \"elasticsearch\"],\n help=\"\"\"\n Specify the datastore to use during ingestion. The default datastore is\n %(default)s.\n \"\"\"\n )\n parser.add_argument(\n \"--datastore-host\",\n default=\"localhost\",\n help=\"\"\"\n Specify the datastore remote host. The default host is %(default)s.\n \"\"\"\n )\n parser.add_argument(\n \"--datastore-port\",\n default=9200,\n help=\"\"\"\n Specify the datastore remote port. The default port is %(default)s.\n \"\"\"\n )\n parser.set_defaults(func=entry)\n\n\ndef entry(arguments):\n \"\"\"\n The entry point for the dreammarket sink CLI interface. This defines the\n logic around the usage of command line arguments and the dreammarket sink in\n order to perform scraping, ingestion, and storage related functions.\n \"\"\"\n logger.setLevel(arguments.verbosity.upper())\n if not arguments.dreammarket_username:\n logger.error(\"This sink requires a username to be specified through CLI or enviornment variable.\")\n raise SystemExit()\n if not arguments.dreammarket_password:\n logger.error(\"This sink requires a password to be specified through CLI or environment variable.\")\n raise SystemExit()\n\n if not arguments.dbc_access_key:\n logger.error(\"This sink requires a deathbycaptcha access key to be specified through CLI or environment variable.\")\n raise SystemExit()\n if not arguments.dbc_secret_key:\n logger.error(\"This sink requires a deathbycaptcha secret key to be specified through CLI or environment variable.\")\n raise SystemExit()\n\n\n display = Display(visible=0, size=(1366, 768))\n display.start()\n sink = DreammarketSink(\n arguments.dreammarket_username, arguments.dreammarket_password,\n arguments.dbc_access_key, arguments.dbc_secret_key,\n url_file=arguments.url_file,\n save_to_directory=arguments.save_to_directory,\n onion_url=arguments.onion_url,\n request_interval=arguments.request_interval,\n request_retries=arguments.request_retries,\n request_timeout=arguments.request_timeout,\n category=arguments.category\n )\n sink.logger = logger\n\n if arguments.ingest:\n if arguments.datastore == \"stdout\":\n store = STDOutInterface()\n\n parser = DreammarketParser(datastore=store)\n parser.parse(scrape_results=sink.scrape())\n\n elif arguments.datastore == \"elasticsearch\":\n store = ElasticsearchInterface(\n host=arguments.datastore_host,\n port=arguments.datastore_port\n )\n\n parser = DreammarketParser(datastore=store)\n parser.parse(\n scrape_results=sink.scrape(\n daemon=arguments.daemonize\n )\n )\n else:\n list(sink.scrape())\n display.stop()\n","sub_path":"dminer/sinks/dreammarket/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"301804778","text":"#!/usr/bin/env python\n'''\nCreated on 27/10/2016\n\n@author: sium\n'''\nfrom __future__ import print_function\n\n\n__author__ = 'sium'\n\n__licence__=\"\"\"\nMIT License\n\nCopyright (c) 2017 Sinan Ugur Umu (SUU) sinanugur@gmail.com\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n\"\"\"\n\n__doc__=\"\"\"Variant caller for HPV project.\n\nUsage:\n hpv-variant-call.py [] (--chromosome | --auto) [--discordant] [--reference ] [--start=] [--end=] [--transformed] [--cpu=]\n hpv-variant-call.py [--chromosome | --auto] [--reference ] [--start=] [--end=]\n hpv-variant-call.py (-h | --help)\n hpv-variant-call.py --version\n\nArguments:\n BAM BAM or SAM File name.\n FASTA Output FASTA file name for soft clipped sequences.\n BED Output tab-seperated BED file name for soft clipped sequences.\n OUTCSV Write regular CSV output into a file, not STDOUT.\n -c , --chromosome The name of the chromosome.\n -r , --reference Reference FASTA file.\n -s , --start Start position [default : 0]\n -e , --end End position\n -j , --cpu The number of CPUs for parallel processing. [default : 1] \n\nOptions:\n -a --auto Autodetect chromosome name (with highest coverage) to be fetched. \n -t --transformed Mapped HPV genomes are transformed.\n -h --help Show this screen.\n --version Show version.\n\n\n\"\"\"\n\n\n#prevent sigpipe error\nfrom signal import signal, SIGPIPE, SIG_DFL\nsignal(SIGPIPE,SIG_DFL)\n#########\n\n\nimport pysam\nfrom collections import Counter\nfrom docopt import docopt\n\nimport sys\nfrom math import floor\n\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom re import search\nfrom re import match\nfrom re import compile\nfrom pathos.multiprocessing import ProcessPool\nfrom functools import reduce\nfrom itertools import repeat\n\n\ndef auto_detect_chromosome_by_coverage(samfile,bam_file):\n hpv_chromosomes = list(filter(lambda x: x.find(\"HPV\") >= 0, samfile.references)) # find HPV chromosomes\n the_list_of_chromosome_counts = list(\n map(lambda chr: [chr, samfile.count(chr)], hpv_chromosomes)) # estimate HPV chromosome coverages\n autodetected_chromosome = reduce(lambda x, y: x if x[1] > y[1] >= 0 else y,\n the_list_of_chromosome_counts) # find the highest coverage\n print(\"The contig with the highest coverage is %s for the BAM file, %s \" % (autodetected_chromosome[0], bam_file),\n file=sys.stderr)\n\n return(autodetected_chromosome[0])\n\n\n\ndef auto_detect_hpv_type_from_file_name(samfile,bam_file):\n\n hpv_name=search('(HPV[0-9]+)',bam_file).group(1)\n hpv_regex = compile(\"\\(\" + hpv_name + \"\\)\")\n\n autodetected_chromosome = list(filter(lambda x: search(hpv_regex,x), samfile.references)) # find HPV chromosome\n \n print(\"The HPV name detected is %s for the BAM file, %s \" % (autodetected_chromosome[0], bam_file),\n file=sys.stderr)\n\n return (autodetected_chromosome[0])\n\n\n\ndef function_position_counter(pileupread,position_counter,quality_counter):\n if not pileupread.is_refskip:\n if not pileupread.is_del:\n base = pileupread.alignment.query_sequence[pileupread.query_position]\n position_counter[base] += 1\n quality_counter[base] += pileupread.alignment.query_qualities[pileupread.query_position]\n else:\n position_counter[\"deletion\"] += 1\n\n else:\n position_counter[\"skip\"] += 1\n\n\n\ndef function_merge_two_dicts(x, y):\n \"\"\"Given two dicts, merge them into a new dict as a shallow copy.\"\"\"\n z = x.copy()\n z.update(y)\n return(z)\n\ndef function_reduce(x,y):\n return((x[0]+y[0],x[1]+y[1]))\n\ndef function_parallel_count(position,bam_file,chromosome):\n samfile = pysam.AlignmentFile(bam_file)\n\n\n position_counter = Counter()\n discordant_counter = Counter()\n quality_counter = Counter()\n discordant_quality_counter = Counter()\n\n if arguments['--discordant']:\n for pileupcolumn in samfile.pileup(chromosome, position, position + 1, truncate=True, max_depth=1000000000):\n for pileupread in pileupcolumn.pileups:\n if (pileupread.alignment.reference_name != pileupread.alignment.next_reference_name):\n function_position_counter(pileupread, discordant_counter, discordant_quality_counter)\n else:\n for pileupcolumn in samfile.pileup(chromosome, position, position + 1, truncate=True, max_depth=1000000000):\n for pileupread in pileupcolumn.pileups:\n function_position_counter(pileupread, position_counter, quality_counter)\n\n samfile.close()\n return({position:(position_counter,quality_counter,discordant_counter,discordant_quality_counter)})\n\n\ndef hpv_variant_table_create(bam_file,chromosome,reference_filename,start,end,csv1):\n\n samfile = pysam.AlignmentFile(bam_file)\n\n if arguments['--auto']:\n\n try:\n chromosome = auto_detect_hpv_type_from_file_name(samfile,bam_file)\n except:\n chromosome = auto_detect_chromosome_by_coverage(samfile, bam_file)\n\n if reference_filename is None:\n sequence = None\n\n else:\n\n\n for record in SeqIO.parse(reference_filename,\"fasta\"):\n if record.id == chromosome:\n sequence=str(record.seq)\n break\n\n\n\n\n start= int(0 if start is None else start) #start position of the fetched location\n end= int(samfile.lengths[samfile.references.index(chromosome)]) if end is None else int(end) #calculate the end by using the chromosome name\n length=int(samfile.lengths[samfile.references.index(chromosome)])\n\n second_half=length - floor(length/2) +1\n first_half=floor(length/2 -1)\n\n function_transformed_position = lambda position: int(\n position + 1 + first_half) if position + 1 <= second_half else int(position + 1 - second_half)\n\n\n\n print(\"chr\\tposition\\treference\\tcoverage\\tA\\tG\\tC\\tT\\tdeletion\\tskip\\tqA\\tqG\\tqC\\tqT\",\n file= csv1 if csv1 else sys.stdout)\n\n\n\n samfile.close()\n with ProcessPool(int(arguments['--cpu'])) as pool:\n res = pool.map(function_parallel_count, range(start,end),repeat(bam_file),repeat(chromosome))\n\n\n results=reduce(function_merge_two_dicts,res)\n\n for position in range(start,end):\n\n if not arguments['--transformed']: # is this a shifted genome, no\n pos = position + 1\n else:\n pos = function_transformed_position(position)\n\n if arguments['--discordant']:\n print_variant_csv_files(results[position][2],results[position][3],chromosome,sequence,position,pos,csv1 if csv1 else sys.stdout)\n else:\n print_variant_csv_files(results[position][0],results[position][1],chromosome,sequence,position,pos,csv1 if csv1 else sys.stdout)\n\n\n\ndef print_variant_csv_files(position_counter,quality_counter,chromosome,sequence,position,pos,where_to_print):\n\n\n print(\"{chromosome}\\t{position}\\t{reference}\\t{coverage}\\t{A}\\t{G}\\t{C}\\t{T}\\t{deletion}\\t{skip}\\t{qA:.2f}\\t{qG:.2f}\\t{qC:.2f}\\t{qT:.2f}\".format(\n chromosome=chromosome, position=pos,\n reference='NA' if sequence is None else sequence[position],\n coverage=position_counter[\"A\"] + position_counter[\"G\"] + position_counter[\"C\"] + position_counter[\"T\"],\n A=position_counter[\"A\"],\n G=position_counter[\"G\"],\n C=position_counter[\"C\"],\n T=position_counter[\"T\"],\n deletion=position_counter[\"deletion\"],\n skip=position_counter['skip'],\n qA=quality_counter[\"A\"] / (position_counter[\"A\"] +0.000000000001),\n qG=quality_counter[\"G\"] / (position_counter[\"G\"] +0.000000000001),\n qC=quality_counter[\"C\"] / (position_counter[\"C\"] +0.000000000001),\n qT=quality_counter[\"T\"] / (position_counter[\"T\"] +0.000000000001)\n ),file=where_to_print)\n\n\ndef fetch_soft_clipped(bam_file,chromosome,start,end,fasta_file,tsv_file):\n\n samfile = pysam.AlignmentFile(bam_file)\n\n if arguments['--auto']:\n\n try:\n chromosomes = list(auto_detect_hpv_type_from_file_name(samfile,bam_file))\n except:\n chromosomes = list(auto_detect_chromosome_by_coverage(samfile, bam_file))\n\n elif chromosome is None:\n chromosomes = samfile.references\n else:\n chromosomes = list(chromosome)\n\n cigarsoft = compile(\"([1-9][0-9]+)S\")\n\n with open(fasta_file,\"w\") as fasta,open(tsv_file,\"w\") as tsv:\n for chromosome in chromosomes:\n start = int(0 if start is None else start) # start position of the fetched location\n end = int(samfile.lengths[samfile.references.index(chromosome)]) if end is None else int(\n end) # calculate the end by using the chromosome name\n\n for read in samfile.fetch(chromosome,start,end):\n if not read.is_unmapped and search(cigarsoft,read.cigarstring):\n #seq_position=0\n #read_aligned_pairs=read.get_aligned_pairs()\n #for i in read.cigartuples:\n #if i[0] == 4 and i[1] >= 10: #detect soft clipped, 4 is for soft clip\n\n\n if match(cigarsoft, read.cigarstring): #if soft clipping at the beginning\n size=int(match(cigarsoft, read.cigarstring).group(1))\n sequence=read.seq[0:size]\n else: #if soft clipping at the end\n size = int(search(cigarsoft, read.cigarstring).group(1))\n sequence = read.seq[-size:]\n\n if read.is_reverse:\n sequence=str(Seq(sequence).reverse_complement()) #take reverse complement if on opposite strand\n\n\n print (\">{read_id}\\n{sequence}\".format(read_id=read.query_name,sequence=sequence),file=fasta)\n feat_start = read.reference_start if match(cigarsoft,read.cigarstring) else read.reference_end\n\n print (\"{ref_id}\\t{feat_start}\\t{feat_end}\\t{name}\\t{score}\\t{strand}\".format(ref_id=read.reference_name,\n feat_start=feat_start,\n feat_end=feat_start+size,\n name=read.query_name,score=1,strand=\".\"),file=tsv)\n\n #break\n #elif i[0] != 3: #3 is for Ns\n #elif i[0] != 3: # 3 is for Ns\n # seq_position=seq_position + i[1]\n\n\n\n else:\n pass\n\n\n\n\ndef main():\n\n if arguments['']:\n fetch_soft_clipped(arguments[''],arguments['--chromosome'],arguments['--start'],arguments['--end'],arguments[''],arguments[''])\n else:\n\n if arguments['']:\n with open(arguments[\"\"], \"w\") as csv1:\n hpv_variant_table_create(arguments[''], arguments['--chromosome'], arguments['--reference'],\n arguments['--start'], arguments['--end'], csv1)\n else:\n hpv_variant_table_create(arguments[''], arguments['--chromosome'], arguments['--reference'],\n arguments['--start'], arguments['--end'], csv1=None)\n\n\nif __name__ == '__main__':\n arguments = docopt(__doc__, version='0.95')\n main()\n","sub_path":"bin/hpv-variant-call.py","file_name":"hpv-variant-call.py","file_ext":"py","file_size_in_byte":12869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"208032690","text":"\n\n#\n# Central place to keep all \"compile time constants\"\n#\n\nyard_through_prob = 0.0 # prob of a train going straight through a yard\nyard_departure_prob = 1 # prob that a yard will start a departure in a given dispatch\nsection_through_prob = 0.25 # prob of train going straight through a dispatcher-controlled section\nmax_train_speed = 50 # highest possible train speed\ndefault_cruise_speed = 30 # default train running speed\ndefault_slowing_speed = 20 # default speed when slowing to stop\ndefault_stopping_speed = 10 # default speed just prior to stopping\ndefault_normal_acceleration = 8 # mph/sec (urgh)\ndefault_normal_deceleration = 12 # mph/sec (urgh)\ndefault_max_speed = 40 # mph (full size)\nscale_factor = 1/20.5 # correction for scale\nmph2fps = 22.0/15.0 # convert MPH to ft/sec\nspeed_fudge = 0.5 # fydge factor for speed\ndispatch_interval = 0.05 # seconds between real-life dispatcher runs\nsensor_active_time = 2.0 # time for a sensor to remain active once triggered\ndefault_time_dilution = 100 # time scaling factor for simulation\ntrain_length_fudge = 1.2 # over estimate of train length\ndefault_mean_wait = 15.0 # default mean train wait time\ndefault_max_wait = 40.0 # default max train wait time\nstopping_margin = 2.0 # distance before stop point to slow to stopping speed\nmin_speed_change_interval = 0.5 # minimum interval between loco speed changes\ndefault_sensor_offset = 1 # default sensor offset if present\nweibull_shape = 1.7 # shape parameter for Weibull distribution\nspeed_estop = -1 # emergency stop command\nnce_response_wait_time = 0.2 # time to wait for response from NCE controller\nlog_section_status = False # log status for all sections every 10S\nlog_train_status = False # log status for all trains every 10S\n\ndef _set_constant(args) :\n s = [ a.strip() for a in args.split('=')]\n if len(s) != 2 :\n raise ValueError(\"constant definition must have form 'name=value', not '%s'\" % (a,))\n try :\n c = globals()[s[1]]\n except KeyError :\n raise ValueError(\"unknown constant '%s'\" % (s[1],))\n try :\n v = type(c)(s[2])\n except ValueError :\n raise ValueError(\"illegal format '%s' for value for constant '%s'\" \\\n % (s[2], s[1]))\n c = v\n\ndef load_string(args) :\n for a in args :\n _set_constant(a)\n\ndef load_file(f) :\n for line in f :\n l = line.strip()\n if not l.empty() and l[0] != '#' :\n _set_constant(l)\n","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"581834510","text":"# we still need a way to classify what each part actually is (operator/number/function), operator levels. and correctly\n# differentiate between unary + and - operations from subtraction and addition\n\nfrom precedence import operators, functions # get operator and function dictionaries\nfrom expressionSplitter import is_number # get made function to check for numbers\n\n\n# process identification of parts\ndef is_which(arr):\n which = [] # list of what parts are in list\n precedence = [] # list of what operator level parts are in list (if applicable)\n part = 0 # index for parts list\n\n # parse through parts list to identify type and precedence\n while part < len(arr):\n # check is part is a number value\n if is_number(arr[part]):\n which.append(int) # add int as identifier in which list\n precedence.append(int) # add int as identifier in precedence list (ignorable)\n\n # check is part is an operator\n elif arr[part] in operators:\n which.append('op') # add 'op' as identifier in which list\n\n '''conditions to determine if '-','+' is level 2 or 5'''\n # expression begins with unary -\n if part == 0 and arr[part] == '-':\n if is_number(arr[part + 1]): # op, int\n precedence.append(2)\n elif arr[part + 1] in operators: # op, op\n precedence.append(2)\n # if unary - situation case is comma, op\n elif arr[part] == '-' and which[part - 1] == 'comma':\n precedence.append(2)\n # if unary - situation case is op,op,int\n elif arr[part] == '-' and which[part - 1] == 'op' and is_number(arr[part + 1]):\n if arr[part - 1] == ')': # op before is parentheses, likely to be level 5 subtraction\n precedence.append(5)\n else:\n precedence.append(2) # else level 2\n # if subtraction - situation case is int,op,int\n elif arr[part] == '-' and is_number(arr[part - 1]) and is_number(arr[part + 1]):\n precedence.append(5)\n\n # expression begins with unary +\n elif part == 0 and arr[part] == '+':\n if is_number(arr[part + 1]): # op, int\n precedence.append(2)\n elif arr[part + 1] in operators: # op, op\n precedence.append(2)\n # if unary + situation case is comma, op\n elif arr[part] == '+' and which[part - 1] == 'comma':\n precedence.append(2)\n # if unary + situation case is op,op,int\n elif arr[part] == '+' and which[part - 1] == 'op' and is_number(arr[part + 1]):\n if arr[part - 1] == ')': # op before is parentheses, likely to be level 5 addition\n precedence.append(5)\n else:\n precedence.append(2) # else level 2\n # if addition + situation case is int,op,int\n elif arr[part] == '+' and is_number(arr[part - 1]) and is_number(arr[part + 1]):\n precedence.append(5)\n\n # find level for non-confusing other operators\n else:\n precedence.append(operators[arr[part]])\n\n # if part is a function\n elif arr[part] in functions:\n which.append('func') # add 'func' as identifier in which list\n precedence.append('func') # add 'func' as identifier in precedence list (ignorable)\n # if part is a comma\n elif arr[part] == ',':\n which.append('comma') # add 'comma' as identifier in which list\n precedence.append('comma') # add 'comma' as identifier in precedence list (ignorable)\n # unsupported\n else:\n which.append(ValueError) # error, no clue what it is\n precedence.append(ValueError) # error, no clue what it is\n\n # move onto next part\n part += 1\n\n # returns 2 lists: which that tells what each part is, precedence that tells what the operator level is\n return which, precedence\n","sub_path":"classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":4166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"508542716","text":"from django.conf.urls import patterns, include, url\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom kiteclub.views import HomeView, Index6View, PrivacyView, LocalizationView, SitemapView\nfrom events.views import EventsView, SingleEventView, AboutView, HistoryView, ClubEventsView, FaqsView\nfrom blog.views import ArticlesView, SingleArticleView\nfrom lessons.views import LessonsView, SingleLessonView\nfrom video.views import VideosView, SingleVideoView\nfrom contacts.views import MailView\nfrom gallery.views import GalleryView\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^$', HomeView.as_view()),\n url(r'^mail/$', MailView.as_view()),\n url(r'^mail/feedback/$', MailView.as_view()),\n url(r'^about/$', AboutView.as_view()),\n url(r'^faqs/$', FaqsView.as_view()),\n url(r'^history/$', HistoryView.as_view()),\n url(r'^index6/$', Index6View.as_view()),\n url(r'^events/$', EventsView.as_view()),\n url(r'^club-events/$', ClubEventsView.as_view()),\n url(r'^events/([0-9a-zA-Z-]+)$', SingleEventView.as_view()),\n url(r'^articles/$', ArticlesView.as_view()),\n url(r'^articles/([0-9a-zA-Z-]+)$', SingleArticleView.as_view()),\n url(r'^lessons/$', LessonsView.as_view()),\n url(r'^lessons/([0-9a-zA-Z-]+)$', SingleLessonView.as_view()),\n url(r'^videos/$', VideosView.as_view()),\n url(r'^videos/([0-9a-zA-Z-]+)$', SingleVideoView.as_view()),\n url(r'^gallery/$', GalleryView.as_view()),\n url(r'^privacy/$', PrivacyView.as_view()),\n url(r'^sitemap.xml$', SitemapView.as_view()),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^i18n/(\\w+)/', LocalizationView.as_view()),\n url(r'^ckeditor/', include('ckeditor.urls')),\n\n)\n\nif settings.DEBUG:\n urlpatterns += patterns('',\n url(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),\n url(r'^static/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.STATIC_ROOT, 'show_indexes': True}),\n )\n","sub_path":"kiteclub/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"563517923","text":"### Question 4\n\nexpenditures = [\n {'Company': 'Apple', 'Date': '01 Jan 2010', 'Purpose': 'payroll', 'Cost':95}, \n {'Company': 'Microsoft', 'Date': '01 Feb 2010', 'Purpose': 'payroll', 'Cost':1000}, \n {'Company': 'Microsoft', 'Date': '16 June 2010', 'Purpose': 'philanthropy', 'Cost':250}, \n {'Company': 'Google', 'Date': '11 Dec 2010', 'Purpose': 'legal', 'Cost':110}, \n {'Company': 'Apple', 'Date': '30 Aug 2010', 'Purpose': 'building renovations', 'Cost':10000}, \n {'Company': 'Google', 'Date': '29 Dec 2010', 'Purpose': 'equipment', 'Cost':25}, \n {'Company': 'Microsoft', 'Date': '01 Feb 2010', 'Purpose': 'payroll', 'Cost':1000}, \n {'Company': 'Oracle', 'Date': '01 Jan 2010', 'Purpose': 'payroll', 'Cost':30}\n]\n\ndef question1(expenditures, company):\n total = 0\n for entry in expenditures:\n if entry[\"Company\"] == company:\n total += entry[\"Cost\"] \n return total\n\ndef question2(expenditures, company):\n total = 0\n for entry in expenditures:\n if entry[\"Company\"] == company and entry[\"Purpose\"] == 'payroll':\n total += entry[\"Cost\"] \n return total\n\ndef question3(expenditures):\n companies = set()\n for entry in expenditures:\n companies.add(entry['Company'])\n return companies\n \ndef question4(expenditures):\n companies = question3(expenditures)\n\n minimum_company = \"\"\n minimum_cost = 0\n for company in companies:\n cost = question1(expenditures, company)\n # Here, to avoid using MAX Ruth did not cover, we just \n # use \"\" to denote: we haven't seen any companies yet.\n # minimum_company == \"\" will be true the very first\n # time \n if minimum_company == \"\" or cost < minimum_cost:\n minimum_cost = cost\n minimum_company = company\n\n return minimum_company\n\ndef question5(expenditures):\n # Please check the solution online\n pass\n\n\n\nquestion4(expenditures)\n\n\n\n\n","sub_path":"cse160/sections/04/questions.py","file_name":"questions.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"206833285","text":"#This file extract the publication year in each study. The years are saved in the file YearsPublications.txt.\r\n# Script created in Python V3.8\r\nimport urllib3\r\nimport codecs\r\nimport json\r\nimport time\r\ncountpapers=0 #Variable to count papers\r\n#Block to open DOIs\r\nf=open('DOIs.txt','r') #Abrir archivo DOI.txt en caso contrario crear archivo con DOI\r\nDOIS=f.readlines()\r\nf.close()\r\n#Block for CrossRef Query\r\nhttp = urllib3.PoolManager()\r\ndest = codecs.open('YearsPublications.txt',\"w\") #txt file with years\r\n#This file will be used in order to plot the information\r\n\r\nfor p in DOIS:\r\n try:\r\n countpapers=countpapers+1\r\n r = http.request('GET', 'https://api.crossref.org/works/'+p)\r\n data= json.loads(r.data.decode('utf-8'))\r\n print (str(countpapers)+\":\"+str(data))\r\n D = (data['message']['published-print']) #Get Data for years. Data are encoding in UTF through JSON\r\n Year=D['date-parts'][0]\r\n dest.write(str(Year[0]))\r\n dest.write(\"\\n\")\r\n print (Year[0])\r\n time.sleep(10) #Ten seconds between requests\r\n except:\r\n time.sleep(10) # Ten seconds between requests\r\n dest.write(\"Error\")\r\n print(\"Error\")\r\n dest.write(\"\\n\")\r\n\r\ndest.close() #Close File of publications by years\r\n","sub_path":"Scripts/ExtractYears.py","file_name":"ExtractYears.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"650496566","text":"#!/home/cimatori/installed/anaconda/bin/python\n\"\"\"\nCompute statistics of plus/minus increments as in Zhou & Xia 2002\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as ppl\n\nimport h5py\n\nfrom MyLib import Statistics, hist2plot\n\n# Load paramters\nimport ConfigPM\nreload(ConfigPM)\nfrom ConfigPM import *\n\nppl.close('all')\n\nsets = (range(72,108), range(108,144), range(0,36), range(36,72))\n\n# Increment labels\ndeltar = '$\\\\Delta \\\\theta_r$'\nspacing = '$r$ $\\\\mathrm{[m]}$'\n\nF = ppl.figure(figsize=(15,11))\nF.subplots_adjust(left=0.1, bottom=0.1, right=0.98, top=0.97, \\\n wspace=0, hspace=0)\n\nfor nset,Thms in enumerate(sets):\n setName = '{}-{}'.format(Thms[0]+1,Thms[-1]+1)\n TideFile = OutDir+'results/PMIncrements_{}_StepX_{}_day_{}_{}.npz' \\\n .format(setName,StepX,Start,End)\n print ('Load previously computed results for plotting only.')\n Data = np.load(TideFile)\n for k,v in Data.iteritems():\n exec('{}=v'.format(k))\n del Data\n\n # Define colors\n colorsdT = ppl.cm.get_cmap('Dark2',ndTs)(range(ndTs))\n\n ax = F.add_subplot(2,2,nset+1)\n\n pl = []; labs = []\n for p in xrange(nT):\n if namT[p]==\"down\":\n mfcol = colorsT[p]\n else:\n mfcol = 'none'\n s, = ax.plot(dXs, skPlus[:,p], \\\n marker=markPM['Plus'], c=colorsT[p], mec=colorsT[p], \\\n mfc=mfcol, **pStyle)\n pl.append(s)\n labs.append('$\\\\Delta_r\\\\theta^+$, {}'.format(namT[p]))\n s, = ax.plot(dz, skPlusZ[p], \\\n marker=markPM['Plus'], c=colorsT[p], mec=colorsT[p], \\\n mfc=mfcol, **p2Style)\n pl.append(s)\n labs.append('$\\\\Delta_z\\\\theta^+$, {}'.format(namT[p]))\n s, = ax.plot(dXs, skMinus[:,p], \\\n marker=markPM['Minus'], c=colorsT[p], mec=colorsT[p], \\\n mfc=mfcol, **pStyle)\n pl.append(s)\n labs.append('$\\\\Delta_r\\\\theta^-$, {}'.format(namT[p]))\n s, = ax.plot(dz, skMinusZ[p], \\\n marker=markPM['Minus'], c=colorsT[p], mec=colorsT[p], \\\n mfc=mfcol, **p2Style)\n pl.append(s)\n labs.append('$\\\\Delta_z\\\\theta^-$, {}'.format(namT[p]))\n \n # Mooring section name\n ax.text(0.85,0.85, setLabels[setName], size='xx-large', transform=ax.transAxes)\n\n ax.set_xscale('log')\n\n ax.set_xlim(0.1,900)\n ax.set_ylim(0,12)\n\n if nset==2:\n ax.legend(pl, labs, numpoints=1, fontsize=16, ncol=2, loc='lower left')\n\n if nset in (0,2):\n ax.set_ylabel('$\\\\mu_3\\\\left(\\\\Delta_r\\\\theta^\\pm\\\\right)$, ' + \\\n '$\\\\mu_3\\\\left(\\\\Delta_z\\\\theta^\\pm\\\\right)$', \\\n fontsize='xx-large')\n else:\n ax.set_yticklabels('')\n if nset>1:\n ax.set_xlabel(spacing, fontsize='xx-large')\n else:\n ax.set_xticklabels('')\n\nF.savefig(OutDir+'figures/Taylor_StepX_{}' \\\n .format(StepX) + \\\n '/Skewness_PMincrm_summary_day_{}_{}.tif' \\\n .format(Start,End), dpi=300)\n","sub_path":"LIS131/TemperatureDissipation/plot_PM_summary.py","file_name":"plot_PM_summary.py","file_ext":"py","file_size_in_byte":3058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"432226178","text":"from django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import JsonResponse\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse\nfrom django.views.generic import TemplateView\nfrom mercurysms import sheets\nfrom mercurysms import twilio\nfrom mercurysms import worker\nfrom mercurysms.forms import SendSMSForm\n\nSHEETS_KEY = getattr(settings, \"SHEETS_KEY\", None)\nSHEETS_GID = getattr(settings, \"SHEETS_GID\", None)\n\nbg_worker = worker.Worker()\n\nclass SendSMSView(LoginRequiredMixin, TemplateView):\n template_name = 'sendSMS.html'\n\n def __init__(self):\n super(SendSMSView, self).__init__()\n self.sheet = sheets.Sheet(SHEETS_KEY, SHEETS_GID)\n bg_worker = worker.Worker()\n\n def get_context_data(self, **kwargs):\n con = super(SendSMSView, self).get_context_data(**kwargs)\n lists = self.sheet.lists\n con.update({'form': SendSMSForm(lists), 'lists': lists,\n 'sheets_url': sheets.SHEETS_URL.format(key=SHEETS_KEY, gid=SHEETS_GID)})\n return con\n\n def post(self, request, *args, **kwargs):\n lists = request.POST.getlist('lists')\n numbers = set()\n for list_ in lists:\n numbers = numbers.union(set(self.sheet.get_list(list_)))\n message = request.POST.get('message')\n bg_worker.start_process(message, list(numbers))\n return redirect('sending')\n\n\n@login_required\ndef succesfully_sent(request):\n nums = bg_worker.err_nums\n cost = bg_worker.cost\n bg_worker.reset()\n context = {'nums': nums, 'cost': cost}\n return render(request, 'sms_sent.html', context=context)\n\n\n@login_required\ndef sending(request):\n return render(request, 'sending.html')\n\n\ndef status(request):\n data = { 'finished': bg_worker.done }\n return JsonResponse(data)\n","sub_path":"mercurysms/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"156055738","text":"from lxml.html import fromstring\nimport requests\nfrom itertools import cycle\nimport traceback\nimport urllib3\n\n\ndef get_proxies():\n url = 'https://hidemy.name/api/proxylist.txt?country=US&maxtime=1700&type=hs&out=plain&lang=en&utf&code=709444465162232'\n response = requests.get(url)\n parser = response.text\n print(parser)\n # proxies = set()\n # for i in parser:\n # proxy = i\n # print(i)\n # proxies.add(proxy)\n # return proxies\n return parser\n\n\nif __name__ == \"__main__\":\n thing = get_proxies()\n proxies = thing.split()\n print(proxies)\n # If you are copy pasting proxy ips, put in the list below\n # proxies = ['121.129.127.209:80', '124.41.215.238:45169', '185.93.3.123:8080', '194.182.64.67:3128', '106.0.38.174:8080', '163.172.175.210:3128', '13.92.196.150:8080']\n # proxies = get_proxies()\n\n proxy_pool = cycle(proxies)\n\n url = 'https://httpbin.org/ip'\n for i in range(1, len(proxies) - 1):\n # Get a proxy from the pool\n proxy = next(proxy_pool)\n print(\"Request #%d\" % i)\n try:\n response = requests.get(\n url, proxies={\"https\": \"http://\" + proxy, \"http\": \"http://\" + proxy})\n print(response.json())\n except:\n # Most free proxies will often get connection errors. You will have retry the entire request using another proxy to work.\n # We will just skip retries as its beyond the scope of this tutorial and we are only downloading a single url\n print(\"Skipping. Connnection error\")\n","sub_path":"read-file.py","file_name":"read-file.py","file_ext":"py","file_size_in_byte":1557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"107387702","text":"from __future__ import print_function\n\nimport glob\nimport math\nimport os\n\nfrom IPython import display\nfrom matplotlib import cm\nfrom matplotlib import gridspec\nfrom matplotlib import pyplot as plt\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom sklearn import metrics\nimport tensorflow as tf\nfrom tensorflow.python.data import Dataset\n\n\ntf.logging.set_verbosity(tf.logging.ERROR)\npd.options.display.max_rows = 10\npd.options.display.float_format = '{:.1f}'.format\n\nmnist_dataframe = pd.read_csv(\n \"mnist_train_small.csv\",\n sep=\",\",\n header=None\n)\n\n# Use just the first 10000 records for training/validation\nmnist_dataframe = mnist_dataframe.head(10000)\nmnist_dataframe = mnist_dataframe.reindex(np.random.permutation(mnist_dataframe.index))\n# print(mnist_dataframe.head())\n\n\ndef parse_labels_and_features(dataset):\n \"\"\"\n Extracts labels and features\n This is a good place to scale or transform the features if needed\n :param dataset: A Pandas 'Dataframe', containing the label on the first column\n and monochrome pixel values on the remaining columns, in row major order\n :return: A 'tuple' ‘(labels, features)’\n labels: A Pandas 'Series' features: A Pandas 'Dataframe'\n \"\"\"\n labels = dataset[0]\n # Dataframe.loc index ranges are inclusive at both ends\n features = dataset.loc[:, 1:784]\n\n # Scale the data to [0, 1] by dividing out the max value, 255\n features = features / 255\n\n return labels, features\n\n\ntraining_targets, training_examples = parse_labels_and_features(mnist_dataframe[:7500])\n# print(training_examples.describe())\n\nvalidation_targets, validation_examples = parse_labels_and_features(mnist_dataframe[7500:10000])\n# print(validation_examples.describe())\n\n\nrand_example = np.random.choice(training_examples.index)\n_, ax = plt.subplots()\nax.matshow(training_examples.loc[rand_example].values.reshape(28, 28))\nax.set_title(\"Label: %i\" % training_targets.loc[rand_example])\nax.grid(False)\nplt.show()\n\n\ndef construct_feature_columns():\n \"\"\"\n Construct the TensorFlow Feature Columns\n :return: A set of feature columns\n \"\"\"\n # There are 784 pixels in each image\n return set([tf.feature_column.numeric_column('pixels', shape=784)])\n\n\ndef create_training_input_fn(features, labels, batch_size, num_epochs=None, shffle=True):\n \"\"\"\n A custom input_fn for sending MNIST data to the estimator for training\n\n :param features: The training features\n :param labels: The training labels\n :param batch_size: Batch size to use during training\n :param num_epochs:\n :param shffle:\n :return: A function that returns batches of training features and labels\n during training\n \"\"\"\n def _input_fn(num_epochs=None, shuffle=True):\n # Input pipelines are reset with each call to .train(). To ensure model\n # gets a good model sampling of data, even when number of steps is small,\n # we shuffle all the data before creating the Dataset object\n idx = np.random.permutation(features.index)\n raw_features = {\"pixels\": features.reindex(idx)}\n raw_targets = np.array(labels[idx])\n\n ds = Dataset.from_tensor_slices((raw_features, raw_targets))\n ds = ds.batch(batch_size).repeat(num_epochs)\n\n if(shuffle):\n ds = ds.shuffle(10000)\n\n feature_batch, label_batch = ds.make_one_shot_iterator().get_next()\n\n return feature_batch, label_batch\n\n return _input_fn\n\n\ndef create_predict_input_fn(features, labels, batch_size):\n \"\"\"\n A custom input_fn for sending mnist data to the estimator for predictions\n :param features: The features to base predictions on\n :param labels: The labels of the prediction examples\n :param batch_size:\n :return: A function that returns features and labels for predictions\n \"\"\"\n\n def _input_fn():\n raw_features = {\"pixels\": features.values}\n raw_targets = np.array(labels)\n\n ds = Dataset.from_tensor_slices((raw_features, raw_targets))\n ds = ds.batch(batch_size)\n\n feature_batch, label_batch = ds.make_one_shot_iterator().get_next()\n\n return feature_batch, label_batch\n\n return _input_fn\n\n\ndef train_linear_classification_model(\n learning_rate,\n steps,\n batch_size,\n training_examples,\n training_targets,\n validation_examples,\n validation_targets\n):\n \"\"\"\n Trains a linear classification model for the MNIST digits dataset\n\n In addition to training, this function also prints training progress information,\n a plot of the training and validation loss over time, and a confusion matrix\n :param learning_rate: An 'int', the learning rate to use\n :param steps: A non-zero 'int', the total number of training steps\n A training step consists of a forward and backward pass using a single batch\n :param batch_size:\n :param training_examples: Training features\n :param training_targets: Training labels\n :param validation_examples: Validation features\n :param validation_targets: Validation labels\n\n :return: The trained 'LinearClassifier' object\n \"\"\"\n\n periods = 10\n steps_per_period = steps / periods\n\n # Create the input functions\n predict_training_input_fn = create_predict_input_fn(training_examples, training_targets, batch_size)\n predict_validation_input_fn = create_predict_input_fn(validation_examples, validation_targets, batch_size)\n training_input_fn = create_training_input_fn(training_examples, training_targets, batch_size)\n\n # Create a LinearClassifier object\n my_optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)\n my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)\n classifier = tf.estimator.LinearClassifier(\n feature_columns=construct_feature_columns(),\n n_classes=10,\n optimizer=my_optimizer,\n config=tf.estimator.RunConfig(keep_checkpoint_max=1)\n )\n\n # Train the model, but do so inside a loop so that we can periodically assess\n # loss metrics\n print(\"Training model...\")\n print(\"LogLoss error (on validation data):\")\n training_errors = []\n validation_errors = []\n for period in range(0, periods):\n # Train the model, starting from the prior state\n classifier.train(\n input_fn=training_input_fn,\n steps=steps_per_period\n )\n\n # Take a break and compute probabilities\n\n\n\n\n\n\n\n","sub_path":"TensorFlow/tfBoy/multi-class_classification_of_handwritten_digits.py","file_name":"multi-class_classification_of_handwritten_digits.py","file_ext":"py","file_size_in_byte":6457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"506452679","text":"## How to define difficulty ?\n## Factors considered\n## Home : 50% increase over base case\n## Away : 50% decrease over base case\n\n\nimport sys\nfrom BeautifulSoup import BeautifulSoup\nimport urllib2\nimport re\n\nclass Team:\n\n\thomeDiffiPercent = 0.5\n\tawayDiffiPercent = -0.5\n\n\tdef __init__(self, name, baseDiffi):\n\t\tself.name = name\n\t\tself.baseDiffi = baseDiffi\n\t\tself.homeDiffi = baseDiffi*(1+0.5)\n\t\tself.awayDiffi = baseDiffi*(1-0.5)\n\ndef getDiffiDict(prev_table_file):\n\tdiffiDict = {}\n\tnumTeams = 20\n\tfor line in prev_table_file:\n\t\tif line == '\\n':\n\t\t\treturn diffiDict\n\n\t\tparts = line.split('\\n')[0].split('.')\n\t\tteamName = parts[1]\n\t\tteamFinish = int(parts[0])\n\n\t\tbaseDiffi = numTeams + 1 - teamFinish\n\t\tdiffiDict[teamName] = Team(teamName,baseDiffi)\n\treturn diffiDict\n\nclass TeamPosition:\n\tdef __init__(self,name, position, played, won, draw, lost, goalsScored, goalsGiven, points):\n\t\t\tself.name = name\n\t\t\tself.position = position\n\t\t\tself.played = played\n\t\t\tself.won = won\n\t\t\tself.draw = draw\n\t\t\tself.lost = lost\n\t\t\tself.goalsScored = goalsScored\n\t\t\tself.goalsGiven = goalsGiven\n\t\t\tself.points = points\n\ndef getTeamPosition(line):\n\n\t\tnamePart = line[0:26]\n\t\tstatPart = line[26:len(line)]\n\n\t\tposition,name = namePart.split('.')\n\n\t\tstatPart1,statPart2 = statPart.split('-')\n\t\tplayed,won,draw,lost,goalsScored = statPart1.split()\n\t\tgoalsGiven, points= statPart2.split()\n\n\t\treturn TeamPosition(name.strip(), position.strip(), played.strip(), won.strip(), draw.strip(), lost.strip(), goalsScored.strip(), goalsGiven.strip(), points.strip())\n\ndef getCurrentDifficultyAndTable(diffi_dict):\n\t\tcurrentDifficultyDict = {}\n\n\t\turl = 'http://www.rsssf.com/tablese/eng2014.html'\n\t\tresponse = urllib2.urlopen(url)\n\t\thtml = response.read()\n\n\t\tsoup = BeautifulSoup(html)\n\t\tcontent = soup.pre.contents[0]\n\n\t\tlines = content.split('\\n')\n\t\ttable = []\n\n\t\ttableFilled = 0\n\t\tfor line in lines:\n\t\t\tline = line.strip()\n\t\t\tif(line.startswith('Table:') or len(line) < 1 or line.startswith('-') or line.startswith('Round') or line.startswith('[')):\n\t\t\t\tif(len(table) > 0 and line.startswith('Round')):\n\t\t\t\t\ttableFilled = 1\n\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tif (re.search('^[1-9]',line) is not None):\n\t\t\t\t\t#print(line)\n\t\t\t\t\tif tableFilled == 0:\n\t\t\t\t\t\ttable.append(getTeamPosition(line))\n\t\t\t\telse:\n\t\t\t\t\thomeTeamName = line[0:14]\n\t\t\t\t\thomeTeamName = homeTeamName.strip()\n\n\t\t\t\t\tawayTeamName = line.split('-')[1][2:]\n\t\t\t\t\tawayTeamName = awayTeamName.strip()\n\n\t\t\t\t\tkeys = diffi_dict.keys()\n\t\t\t\t\tawayTeam = None\n\t\t\t\t\thomeTeam = None\n\t\t\t\t\tfor key in keys:\n\t\t\t\t\t\tif key.startswith(homeTeamName):\n\t\t\t\t\t\t\thomeTeam = diffi_dict[key]\n\t\t\t\t\t\tif key.startswith(awayTeamName):\n\t\t\t\t\t\t\tawayTeam = diffi_dict[key]\n\n\t\t\t\t\tif homeTeam.name in currentDifficultyDict:\n\t\t\t\t\t\tcurrentDifficultyDict[homeTeam.name] = currentDifficultyDict[homeTeam.name] + awayTeam.awayDiffi\n\t\t\t\t\telse:\n\t\t\t\t\t\tcurrentDifficultyDict[homeTeam.name] = awayTeam.awayDiffi\n\n\t\t\t\t\tif awayTeam.name in currentDifficultyDict:\n\t\t\t\t\t\tcurrentDifficultyDict[awayTeam.name] = currentDifficultyDict[awayTeam.name] + homeTeam.homeDiffi\n\t\t\t\t\telse:\n\t\t\t\t\t\tcurrentDifficultyDict[awayTeam.name] = homeTeam.homeDiffi\n\n\t\treturn currentDifficultyDict,table\n\n\n\ndef main():\n\tprev_table_file = open(sys.argv[1])\n\n\tdiffi_dict = getDiffiDict(prev_table_file)\n\n\tcurrentDiffi,table = getCurrentDifficultyAndTable(diffi_dict)\n\n\tfinal_table = ()\n\tperfIndex_file = open('perf_index.txt','w+')\n\tperfIndex_file.write('Position,Name,Points,PerformanceIndex\\n')\n\tfor teamPos in table:\n\t\tteamName = teamPos.name\n\t\tpoints = teamPos.points\n\t\tplayed = teamPos.played\n\t\tdifficulty = currentDiffi[teamName]\n\t\tperfIndex = float(points)*float(difficulty)/float(played)\n\t\tperfIndex = \"{:4.1f}\".format(perfIndex)\n\t\tfinal_table = final_table + ((teamPos.position, teamName, points, perfIndex),)\n\t\tperfIndex_file.write(str(teamPos.position)+','+ str(teamName)+','+ str(points)+','+ str(perfIndex)+'\\n')\n\nif __name__ == '__main__':\n main()\n","sub_path":"pl.py","file_name":"pl.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"26385218","text":"import torch\r\nfrom torchvision import transforms\r\nfrom torchvision import datasets\r\nfrom torch.utils.data import DataLoader\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport matplotlib.pyplot as plt\r\n\r\n# prepare dataset\r\n\r\nbatch_size = 64\r\ntransform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])\r\n\r\ntrain_dataset = datasets.MNIST(root='../dataset/mnist/', train=True, download=True, transform=transform)\r\ntrain_loader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size)\r\ntest_dataset = datasets.MNIST(root='../dataset/mnist/', train=False, download=True, transform=transform)\r\n\r\ntest_loader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size)\r\n\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\ninput = [\r\n 3,4,6,5,7,\r\n 2,4,6,8,2,\r\n 1,6,7,8,4,\r\n 9,7,4,6,2,\r\n 3,7,5,4,1\r\n]\r\ninput = torch.Tensor(input).view(1,1,5,5)\r\nconv_layer = torch.nn.Conv2d(1,1,kernel_size=3,stride=2,padding=1,bias=False)\r\nkernel = torch.Tensor([1,2,3,4,5,6,7,8,9]).view(1,1,3,3)\r\n\r\nconv_layer.weight.data = kernel.data\r\n\r\noutput = conv_layer(input)\r\nprint(output)\r\n\r\n\r\nclass Net(torch.nn.Module):\r\n def __init__(self):\r\n super(Net, self).__init__()\r\n self.conv1 = torch.nn.Conv2d(1,10,kernel_size=5)\r\n self.conv2 = torch.nn.Conv2d(10,20,kernel_size=5)\r\n self.pooling = torch.nn.MaxPool2d(2)\r\n self.fc = torch.nn.Linear(320,10)\r\n def forward(self, x):\r\n batch_size = x.size(0)\r\n x = F.relu(self.pooling(self.conv1(x)))\r\n x = F.relu(self.pooling(self.conv2(x)))\r\n x = x.view(batch_size , - 1)\r\n x = self.fc(x)\r\n return x\r\nmodel = Net()\r\nmodel.to(device)\r\n\r\ncriterion = torch.nn.CrossEntropyLoss()\r\noptimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.5)\r\n\r\ndef train(epoch):\r\n running_loss = 0.0\r\n for batch_idx,data in enumerate(train_loader,0):\r\n input,target = data\r\n inputs,target = input.to(device),target.to(device)\r\n optimizer.zero_grad()\r\n\r\n outputs=model(inputs)\r\n loss = criterion(outputs,target)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n\r\n running_loss += loss.item()\r\n if batch_idx % 300 == 299:\r\n print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, running_loss / 300))\r\n running_loss = 0.0\r\n\r\n\r\ndef test():\r\n correct = 0\r\n total = 0\r\n with torch.no_grad():\r\n for data in test_loader:\r\n images, labels = data\r\n images, labels = images.to(device), labels.to(device)\r\n outputs = model(images)\r\n _, predicted = torch.max(outputs.data, dim=1)\r\n total += labels.size(0)\r\n correct += (predicted == labels).sum().item()\r\n print('accuracy on test set: %d %% ' % (100 * correct / total))\r\n return correct / total\r\n\r\n\r\nif __name__ == '__main__':\r\n epoch_list = []\r\n acc_list = []\r\n\r\n for epoch in range(10):\r\n train(epoch)\r\n acc = test()\r\n epoch_list.append(epoch)\r\n acc_list.append(acc)\r\n\r\n plt.plot(epoch_list, acc_list)\r\n plt.ylabel('accuracy')\r\n plt.xlabel('epoch')\r\n plt.show()\r\n","sub_path":"chart_10_1.py","file_name":"chart_10_1.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"273267712","text":"import sys\nimport matplotlib\nimport math\nimport scipy\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom numpy import array\nimport glob\nfrom collections import namedtuple\nimport matplotlib.ticker as mticker\n\n# Excitation frequency and amplitude\nfa = 1.0e5\ndpa = 1.0e6\n\n# Number of Fubini harmonics in the outer loop\nNF = 20\n\n# Number Bessel summands\nNB = 20\n\n# Number of time steps per simulated time and number of simulated periods\nNtimesteps = 400\nNperiods = 2\n\n# Position divided by the shock formation distance\n# In the current implementation, the Fubini solution is singular at sigma = 0!\nsigma = 0.5\n\n# Duration of one excitation period\nTa = 1.0/fa\n\n# Vectors of dimensionless time and pressure\nfatau = []\npbydpa = []\n\n# Time loop\nfor i in range(0, Ntimesteps):\n \n # calculating time and dimensionless time\n tau = Ta*float(Nperiods)/float(Ntimesteps-1)*float(i)\n fatau.append(fa*tau)\n omegatau = 2.0*math.pi*fatau[i]\n \n # Loop over the harmonics of the Fubini solution\n # sumFT is the sum over the Fubini terms (outer loop)\n sumFT = 0.0\n for n in range(1, NF+1):\n \n coeff = 2.0/(float(n)*sigma)\n invcoeff = 1.0/coeff\n \n # Loop over the Bessel summands\n # sumBT is the sum of the Bessel summands\n sumBT = 0.0\n for m in range(0, NB+1):\n # Computing the factorials of the Bessel function\n facm = float(math.factorial(m))\n facmplusn = float(math.factorial(m+n))\n \n # Computing the Bessel summand and the sum of Bessel summands\n BT = float((-1)**m)/float(facm*facmplusn)*invcoeff**(2*m+n)\n sumBT = sumBT + BT\n \n FT = coeff*math.sin(float(n)*omegatau)*sumBT\n sumFT = sumFT + FT\n \n pbydpa.append(dpa*sumFT/dpa)\n\n# Plot results\nplt.plot(fatau, pbydpa)\nplt.show()","sub_path":"Fubini.py","file_name":"Fubini.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"400376053","text":"import socket\nimport sys\nimport cv2\nimport pickle\nimport numpy as np\nimport struct ## new\nimport zlib\nimport tensorflow as tf\nfrom align_custom import AlignCustom\nfrom face_feature import FaceFeature\nfrom mtcnn_detect import MTCNNDetect\nfrom tf_graph import FaceRecGraph\nimport argparse\nimport sys\nimport json\nimport time\nfrom threading import *\nimport multiprocessing\nfrom multiprocessing import Pipe\nimport mss\nimport time\nfrom FaceRecog import *\nglobal server_socket\nglobal FrameCounter\nglobal frameList\nglobal devices\nglobal noc\n\n\ndef add(a,b):\n\tc=a+b\n\tprint(c)\n\treturn c\n\ndef createSocket():\n\tglobal server_socket\n\tglobal FrameCounter\n\tglobal aligner\n\tglobal extract_feature\n\tglobal face_detect\n\tglobal devices\n\tglobal noc\n\tdevices=[]\n\tFrameCounter,aligner,extract_feature,face_detect=startRec()\n\tFrameCounter=0\n\tnoc=1000\n\tserver_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\t#server_socket.settimeout(0.1)\n\tprint('Socket created')\n\n\tserver_socket.bind(('',9000))\n\tprint('Socket bind complete')\n\tserver_socket.listen(noc)\n\tprint('Socket now listening')\n\treturn server_socket\n\t\ndef start(socket):\n\t\trpf=Thread(target=recPiFrame(socket))\n\t\trpf.start()\ndef connectDevice(server_socket):\n\tglobal devices\n\tconCount=0\n\ttry:\n\t\t\n\t\tprint(\"accepting\")\n\t\tconn,add=server_socket.accept()\n\t\ttime.sleep(0.1)\n\t\tprint(\"new device connected | address : \",add[0],\" \",add[1])\n\t\tif conn is not None:\n\t\t\tdevices.append(conn)\n\t\t\t\n\t\t\ttime.sleep(0.5)\n\texcept Exception as e:\n\t\tprint(e,\"error\")\n\t\ttime.sleep(0.1)\n\ttime.sleep(0.5)\n\ndef recPiFrame(server_socket):\n\tglobal frameList\n\tglobal aligner\n\tglobal extract_feature\n\tglobal face_detect\n\tglobal FrameCounter\n\tglobal devices\n\tframeList=[]\t\n\td=0\n\tconnectDevice(server_socket)\n\t\n\tconn1=devices[0]\n\tprint(\"|--- Conection has been established with pi ---|\")\n\tp_output, p_input = Pipe()\n\tsvf=Thread(target=sendVideoFrame(server_socket))\n\tsvf.start()\n\ttime.sleep(0.01)\n\tdata = b\"\"\n\tpayload_size = struct.calcsize(\">L\")\n\ttime.sleep(1)\n\tprint(\"payload_size: {}\".format(payload_size))\n\twhile True:\n\t\tprint(\"recv:\")\n\t\ttime.sleep(0.01)\n\t\twhile len(data) < payload_size:\n\t\t\tdata += conn1.recv(4096)\n\t\tpacked_msg_size = data[:payload_size]\n\t\tdata = data[payload_size:]\n\t\tmsg_size = struct.unpack(\">L\", packed_msg_size)[0]\n\t\twhile len(data) < msg_size:\n\t\t\tdata += conn1.recv(4096)\n\t\tframe_data = data[:msg_size]\n\t\tdata = data[msg_size:]\n\t\tframe=pickle.loads(frame_data, fix_imports=True, encoding=\"bytes\")\n\t\tframe = cv2.imdecode(frame, cv2.IMREAD_COLOR)\n\t\tframeList,FrameCounter,frame=camera_recog(frame,frameList,FrameCounter,aligner,extract_feature,face_detect)\n\t\ttime.sleep(0.01)\n\t\t\n\t\tcv2.waitKey(1)\n\n\ndef sendVideoFrame(server_socket):\n\tglobal FrameCounter\n\tglobal frameList\n\tglobal devices\n\tprint(\"waiting for devices to connect\")\n\ttime.sleep(1.0)\n\timg_counter = 0\n\tconnectDevice(server_socket)\n\tencode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]\n\tprint('sending')\n\tcmdRecThread=Thread(target=RecCommand)\n\tcmdRecThread.start()\n\tcmdRecThread.join()\n\tfail=0\n\n\twhile True:\n\t\ttry:\n\t\t\tcountDevices=1\n\t\t\twhile (countDevices 3:\n\t\t\t\t\tframe =frameList[FrameCounter-2]\n\t\t\t\t\tcv2.imshow('fcontroller'+str(len(frame)),frame)\n\t\t\t\t\tresult, frame = cv2.imencode('.jpg', frame, encode_param)\n\t\t\t\t\t#data = zlib.compress(pickle.dumps(frame, 0))\n\t\t\t\t\tdata = pickle.dumps(frame, 0)\n\t\t\t\t\tsize = len(data)\n\n\t\t\t\t\t#print(\"frames:\",\"{}: {}\".format(img_counter, size))\n\t\t\t\t\t\n\t\t\t\t\tconn2=devices[countDevices]\n\t\t\t\t\tds=conn2.send(frame)\n\t\t\t\t\tif(ds==0):\n\t\t\t\t\t\tconnectDevice(server_socket)\n\t\t\t\t\t\n\t\t\t\t\ttime.sleep(0.05)\n\t\t\t\t\tcountDevices+=1\n\t\t\t\t\t\n\t\t\t\t\timg_counter += 1\n\t\t\t\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\t\t\t\tbreak\t\n\t\texcept Exception as e:\n\t\t\tpass\n\t\t\n\t\t\ndef RecCommand():\n\ttime.sleep(1)\n\twhile True:\t\n\t\tglobal devices\n\t\tcd=1\n\t\twhile cd= 1\n\t\tassert minDelay >= 0\n\n\t\tself.quantity = quantity\n\t\tself.minDelay = minDelay\n\t\tself.canUnhook = canUnhook\n\t\tself.name = name\n\n\tdef toXml(self, xmlDoc):\n\t\t\"\"\"\n\t\tExports the Machine instance to an XML tree node and returns the node\n\t\tinstance. xmlDoc to used to create the XML tree node element.\n\t\t\"\"\"\n\t\tnode = xmlDoc.createElement(\"machine\")\n\t\tnode.setAttribute(\"name\", self.name)\n\t\tnode.setAttribute(\"quantity\", str(self.quantity))\n\t\tnode.setAttribute(\"minDelay\", str(self.minDelay))\n\t\tnode.setAttribute(\"canUnhook\", str(self.canUnhook))\n\t\treturn node\n\n\t@staticmethod\n\tdef fromXml(element):\n\t\t\"\"\"\n\t\tCreates a Machine instance from XML node tree element and returns it.\n\t\t\"\"\"\n\t\treturn Machine(\n\t\t\tname = element.getAttribute(\"name\"),\n\t\t\tquantity = int(element.getAttribute(\"quantity\")),\n\t\t\tminDelay = int(element.getAttribute(\"minDelay\")),\n\t\t\tcanUnhook = strToBool(element.getAttribute(\"canUnhook\"))\n\t\t)\n\nclass Plant(object):\n\t\"\"\"\n\tProvides the implementation of a Plant (factory) with a list of Machine \n\tinstances.\n\t\"\"\"\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tmachines is a list of ordered Machine instances (by sequence in Plant).\n\t\tminProcTime is the minimum (constant) processing time for any Order \n\t\tgoing through the Plant. This is the summation of all the times between\n\t\tevery two Machine instances in the Plant.\n\t\t\"\"\"\n\t\tobject.__init__(self)\n\t\tself.machines = []\n\n\tdef toXml(self):\n\t\t\"\"\"\n\t\tCreates an XML tree node from the Plant instance and returns it.\n\t\t\"\"\"\n\t\tdomImp = minidom.getDOMImplementation()\n\t\txmlDoc = domImp.createDocument(None, \"plant\", None)\n\n\t\tfor m in self.machines:\n\t\t\txmlDoc.documentElement.appendChild(m.toXml(xmlDoc))\n\n\t\treturn xmlDoc.documentElement\n\n\tdef toXmlFile(self, filename):\n\t\t\"\"\"\n\t\tSaves the Plant instance to an XML file.\n\t\t\"\"\"\n\t\tfile = open(filename, \"w\")\n\t\tfile.write(self.toXml().toprettyxml())\n\t\tfile.close()\n\n\t@staticmethod\n\tdef fromXml(xmlDoc):\n\t\t\"\"\"\n\t\tA static method that loads a Plant instance (and returns it) from \n\t\tan XML document. xmlDoc is the document instance.\n\t\t\"\"\"\n\t\tplant = Plant()\n\t\tfor e in xmlDoc.getElementsByTagName(\"machine\"):\n\t\t\tplant.addMachine(Machine.fromXml(e))\n\t\treturn plant\n\n\t@staticmethod\n\tdef fromXmlFile(filename):\n\t\t\"\"\"\n\t\tA static methods that loads a Plant instance (and returns it) from \n\t\tan XML file (str filename).\n\t\t\"\"\"\n\t\tfile = open(filename, \"r\")\n\t\tdoc = minidom.parse(file)\n\t\tplant = Plant.fromXml(doc)\n\t\tfile.close()\n\t\treturn plant\n\n\tdef addMachine(self, machine):\n\t\t\"\"\"\n\t\tAdd a Machine instance to the Plant. If the Machine instance or its\n\t\tname is already in the list of machines, an Exception will be thrown.\n\t\tAfter adding a Machine instance, minProcTime is updated.\n\t\t\"\"\"\n\t\tassert machine not in self.machines\n\n\t\tfor m in self.machines:\n\t\t\tif m.name == machine.name:\n\t\t\t\traise Exception(\"Machine name already in plant\")\n\t\tself.machines.append(machine)\n","sub_path":"Projects/PlantMaker/archive/20100420/plant.py","file_name":"plant.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"9260589","text":"#!/usr/bin/env python3\nimport argparse\nimport logging\nimport gc\nimport objgraph\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.python.lib.io import file_io\nfrom tensorflow.python.client import device_lib\n\n\nimport numpy as np\n\nfrom trainer.ellington_library import EllingtonLibrary, Track\nfrom trainer.audio import Audio\nfrom trainer.generator import LibraryIterator, TrackIterator\nfrom trainer.model import model_gen\n\nclass CustomCallback(keras.callbacks.Callback):\n def __init__(self, jobd):\n self.step = 0\n self.jobd = jobd\n self.metric_cache = {}\n\n def on_batch_end(self, batch, logs={}):\n self.step = self.step + 1\n for k in self.params['metrics']:\n if k in logs:\n self.metric_cache[k] = self.metric_cache.get(k, 0) + logs[k]\n\n metrics_log = ''\n for (k, v) in self.metric_cache.items():\n if abs(v) > 1e-3:\n metrics_log += ' - %s: %.4f' % (k, v)\n else:\n metrics_log += ' - %s: %.4e' % (k, v)\n\n print('step: {}/{} ::{}'.format(self.step,\n self.params['steps'],\n metrics_log))\n self.metric_cache.clear()\n\n gc.collect() \n\n def on_epoch_end(self, epoch, logs={}): \n print(\"Saving model\")\n\n # Save the model locally\n self.model.save('model.h5')\n\n # Save the model to the Cloud Storage bucket's jobs directory\n print(\"Saving to : \" + self.jobd)\n with file_io.FileIO('model.h5', mode='rb') as input_f:\n with file_io.FileIO(self.jobd + '-model.h5', mode='w+') as output_f:\n output_f.write(input_f.read())\n\n\ndef main(data_dir=\"data/smnp/\", ellington_lib=\"data/example.el\", job_dir=\"logs\"):\n # Start logging\n logging.basicConfig(\n format='%(asctime)s %(levelname)s %(module)s %(lineno)d : %(message)s', level=logging.DEBUG)\n\n # List the available tensorflow devices\n print(device_lib.list_local_devices()) \n\n # Set up the data input etc.\n train_lib = EllingtonLibrary.from_file(ellington_lib)\n valid_lib = EllingtonLibrary.from_file(ellington_lib)\n\n # Set up the generators to yield training data\n training_gen = LibraryIterator(\n train_lib, data_dir, samples=128, batchsize=512, start=30, end=150, iterations=1)\n validation_gen = LibraryIterator(\n valid_lib, data_dir, samples=4, batchsize=64, start=30, end=200, iterations=1)\n\n # Fix an input size for our model\n input_time_dim = 1720\n input_freq_dim = 256\n\n # Create the model, print info\n model = model_gen(input_time_dim, input_freq_dim)\n print(model.summary())\n\n # Compile the model\n sgd = keras.optimizers.SGD(\n lr=1e-4, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(optimizer=sgd,\n loss='mse',\n metrics=['mae', 'msle', 'mape'])\n\n # Set up callbacks - one for tensorboard\n tfcallback = keras.callbacks.TensorBoard(log_dir=job_dir + \"/tensorboard/\",\n histogram_freq=0,\n write_grads=True,\n write_graph=False,\n write_images=False,\n batch_size=32)\n # And another for our custom callback that saves the model.\n bcallback = CustomCallback(job_dir)\n\n # One for a progress bar\n prog_bar = keras.callbacks.ProgbarLogger(count_mode='steps')\n\n # Fit the model using all of the above!\n model.fit_generator(\n generator = training_gen.batch(), \n steps_per_epoch = training_gen.len(), \n epochs = 1000, \n verbose = 2, \n callbacks = [tfcallback, bcallback, prog_bar], \n validation_data = validation_gen.batch(), \n validation_steps = validation_gen.len(),\n use_multiprocessing=True \n )\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--data-dir', required=True, help='Path to training data, in the form of compressed numpy arrays')\n parser.add_argument('--ellington-lib', required=True, help='The ellington library from which to read track names and BPMs')\n parser.add_argument('--job-dir', required=True, help='The directory to export the model, and store temp files')\n args = parser.parse_args()\n arguments = args.__dict__\n main(**arguments)\n","sub_path":"bellson/bellson.py","file_name":"bellson.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"201939965","text":"import tensorflow as tf\n\ndef create_sparse_map(Vals, starts, lens, mb, mtl, ith, prefix):\n '''\n Create sparse tensor given an array and 2 index vectors.\n input:\n Vals: 1-D tensor (mb, ), dense,\n starts: 1-D tensor (mb, ) starting position\n lens: 1-D tensor (mb, ) size\n mb: minibatch size, scalar\n mtl: max text length, a scalar\n return:\n st: sparse tensor, [mb by mtl]\n '''\n \n sp_shape = [mb, mtl]\n value_list, idx_list = [], []\n\n for i in xrange(mb):\n l = tf.reshape(tf.slice(lens, [i], [1]), [], name='%s_reshape_i%d_mb%d' % (prefix, ith, i))\n s = tf.slice(starts, [i], [1], name='%s_slice_s_i%d_mb%d'% (prefix, ith, i))\n val = tf.slice(Vals, s, [l], name='%s_slice_v_i%d_mb%d' % (prefix, ith, i))\n value_list.append(val)\n col1 = tf.fill([l, 1], i, name='%s_fill_1_i%d_mb%d' % (prefix, ith, i))\n col2 = tf.reshape(tf.range(0, l), [l,1], name='%s_fill_2_i%d_mb%d' % (prefix, ith, i))\n idx_list.append(tf.concat(1, [col1, col2])) # l1 * 2 \n\n values = tf.concat(0, value_list, name='%s_concat_v_i%d' % (prefix, ith))\n indices = tf.concat(0, idx_list, name='%s_concat_i_i%d' % (prefix, ith))\n\n indices = tf.to_int64(indices, name='%s_toint64_i%d' % (prefix, ith))\n sp_shape = tf.to_int64(sp_shape, name='%s_toint64_i%d' % (prefix, ith))\n st = tf.SparseTensor(indices, values, sp_shape)\n return st\n","sub_path":"deprecated/sparse_map.py","file_name":"sparse_map.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"174355747","text":"# -*- coding: utf-8 -*-\n\n\"\"\" How to create endpoints into REST service \"\"\"\n\nfrom .. import get_logger\nfrom ..meta import Meta\nfrom confs.config import ALL_API_URL\n\nlogger = get_logger(__name__)\n\n\nclass Endpoints(object):\n \"\"\" Handling endpoints creation\"\"\"\n\n rest_api = None\n\n def __init__(self, api):\n super(Endpoints, self).__init__()\n self.rest_api = api\n\n def create_single(self, resource, endpoints, endkey):\n \"\"\" Adding a single restpoint from a Resource Class \"\"\"\n\n urls = []\n\n for endpoint in endpoints:\n address = ALL_API_URL + '/' + endpoint\n logger.info(\"Mapping '%s' res to '%s'\",\n resource.__name__, address)\n # Normal endpoint, e.g. /api/foo\n urls.append(address)\n # Special endpoint, e.g. /api/foo/:endkey\n if endkey is not None:\n urls.append(address + '/<' + endkey + '>')\n\n # Create the restful resource with it\n self.rest_api.add_resource(resource, *urls)\n\n def create_many(self, resources):\n \"\"\" Automatic creation from an array of resources \"\"\"\n # For each RESTful resource i receive\n for resource in resources:\n endpoint, endkey = resource().get_endpoint()\n self.create_single(resource, [endpoint], endkey)\n\n def many_from_module(self, module):\n \"\"\" Automatic creation of endpoint from specified resources \"\"\"\n\n resources = Meta().get_new_classes_from_module(module)\n # Init restful plugin\n if len(resources) > 0:\n self.create_many(resources)\n\n def services_startup(self, models, secured=False):\n \"\"\"\n DEPRECATED\n\n BUT MAY STILL BE USEFULL\n\n A special case for RethinkDB and other main services?\n\n This is where you tell the app what to do with requests.\n Note: For this resources make sure you create the table!\n \"\"\"\n from .services.rethink import create_rdbjson_resources\n\n for name, content in create_rdbjson_resources(models, secured).items():\n (rclass, rname) = content\n # print rname, rclass.__dict__\n\n # Add resource from ORM class\n address = ALL_API_URL + '/' + rname\n self.rest_api.add_resource(\n rclass,\n address,\n address + '/')\n # Warning: due to restful plugin system,\n # methods get and get(value) require 2 different resources.\n # This is why we provide two times the same resource\n\n logger.info(\"Resource '\" + rname + \"' [\" + name + \"]: loaded\")\n","sub_path":"restapi/resources/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"337346904","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport json\nfrom atm.core import auth,main\n\nuser_data={'account_id':None,\n 'authentication':False,\n 'account_data':None}\nprint('登录')\nauth.login(user_data)\nwith open('product') as data:\n product_data = json.load(data)\n\nwhile True:\n for name,values in product_data.items():\n print(name,values)\n choose=input('请输入要购买的物品或输入q退出')\n if choose=='q':\n break\n main.handle(user_data, 'consume',product_data[choose])\n","sub_path":"shopping/Shopping.py","file_name":"Shopping.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"60415884","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as anm\nfrom math import pi, cos, sin, tan\n\n\n\nimport invkinema\n\n\n# 同時変換行列\n\nl1 = 1\nl2 = 1\nl3 = 1\nl4 = 1\n\ndef D1(q):\n return np.array([\n [cos(q[0,0]), -sin(q[0,0]), 0],\n [sin(q[0,0]), cos(q[0,0]), 0],\n [0, 0, 1],\n ])\n\ndef D2(q):\n return np.array([\n [1, 0, l1],\n [0, 1, 0],\n [0, 0, 1],\n ])\n\ndef D3(q):\n return np.array([\n [cos(q[1,0]), -sin(q[1,0]), 0],\n [sin(q[1,0]), cos(q[1,0]), 0],\n [0, 0, 1],\n ])\n\ndef D4(q):\n return np.array([\n [1, 0, l2],\n [0, 1, 0],\n [0, 0, 1],\n ])\n\ndef D5(q):\n theta = 270/180*pi\n return np.array([\n [cos(theta), -sin(theta), 0],\n [sin(theta), cos(theta), 0],\n [0, 0, 1],\n ])\n\ndef D6(q):\n return np.array([\n [1, 0, l3],\n [0, 1, 0],\n [0, 0, 1],\n ])\n\ndef D7(q):\n return np.array([\n [cos(q[2,0]), -sin(q[2,0]), 0],\n [sin(q[2,0]), cos(q[2,0]), 0],\n [0, 0, 1],\n ])\n\ndef D8(q):\n return np.array([\n [1, 0, l4],\n [0, 1, 0],\n [0, 0, 1],\n ])\n\nD_func_all = [D1, D2, D3, D4, D5, D6, D7, D8]\n\n\n# 図示\nclass Hoge:\n \n \n def __init__(self,):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n # global q\n # q = np.array([[60, -60, 60]]).T * pi/180\n\n # D_all = [Di(q) for Di in D_func_all]\n\n # for j, D in enumerate(D_all):\n # if j == 0:\n # D_w_all = [D]\n # else:\n # D_w_all.append(D_w_all[j-1] @ D)\n\n # x = []\n # y = []\n # for D in D_w_all:\n # x.append(D[0, 2])\n # y.append(D[1, 2])\n\n\n\n # for j, D_w in enumerate(D_w_all):\n # ax.scatter(\n # D_w[0, 2], D_w[1, 2], label = str(j+1),\n # )\n\n ax.legend()\n ax.grid(True)\n\n\n ax.set_xlim(-3, 3)\n ax.set_ylim(-3, 3)\n\n\n time_template = 'time = %s [epoch]'\n ax.text(-0.5, -1, time_template % 0, size = 10)\n\n\n self.q = [np.random.rand()*2*pi for k in range(3)]\n self.q = np.array([self.q]).T\n\n def update(i):\n \n ax.cla()\n print(i)\n \n xd = np.array([[2.8, 0.01*i-1, pi/2]]).T\n \n \n if i == 0:\n\n self.q = invkinema.inv_kinema(xd)\n else:\n self.q = invkinema.inv_kinema(xd, self.q)\n\n \n D_all = [Di(self.q) for Di in D_func_all]\n\n D_w_all = []\n for j, D in enumerate(D_all):\n if j == 0:\n D_w_all.append(D)\n else:\n D_w_all.append(D_w_all[j-1] @ D)\n\n x = []\n y = []\n for D in D_w_all:\n x.append(D[0, 2])\n y.append(D[1, 2])\n\n\n ax.plot(x, y, label='leg')\n\n for j, D_w in enumerate(D_w_all):\n ax.scatter(\n D_w[0, 2], D_w[1, 2], label = str(j+1),\n )\n\n ax.legend()\n ax.grid(True)\n \n ax.text(-0.5, -1, time_template % i, size = 10)\n \n ax.set_aspect('equal')\n ax.set_xlim(-3, 3)\n ax.set_ylim(-3, 3)\n\n return\n\n ani = anm.FuncAnimation(\n fig = fig,\n func = update,\n frames = 100,\n )\n\n ani.save('hoge.gif')\n\n plt.show()\n\n\nif __name__ == '__main__':\n hoge = Hoge()","sub_path":"leg_anime.py","file_name":"leg_anime.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"399878503","text":"import numpy\nimport matplotlib.pyplot as plt\n\n#Create the arrays that represents the values of the x and y axis:\n\nx = [1,2,3,5,6,7,8,9,10,12,13,14,15,16,18,19,21,22]\ny = [100,90,80,60,60,55,60,65,70,70,75,76,78,79,90,99,99,100]\n\n#NumPy has a method that lets us make a polynomial model:\n\nmymodel = numpy.poly1d(numpy.polyfit(x, y, 3))\n\n#Then specify how the line will display, we start at position 1, and end at position 22:\n\nmyline = numpy.linspace(1, 22, 100)\n\n#Draw the original scatter plot:\n\nplt.scatter(x, y)\n\n#Draw the line of polynomial regression:\n\nplt.plot(myline, mymodel(myline))\n\n#Display the diagram:\n\nplt.show()","sub_path":"dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"194061422","text":"# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nsys.path.append(r'./efficientdet-pytorch')\nimport torch\nfrom effdet.config import get_efficientdet_config\nfrom effdet.efficientdet import EfficientDet\nimport argparse\n\nparser = argparse.ArgumentParser(description='pth to onnx')\n\n\nparser.add_argument('--batch_size', type=int, default=1, metavar='N',\n help='batch size 1/4/8/16/32')\nparser.add_argument('--checkpoint', type=str, default='d0.pth', metavar='N',\n help='pytorch checkpoint path ')\nparser.add_argument('--out', type=str, default='d7.onnx', metavar='N',\n help='export onnx model')\n\n\nargs = parser.parse_args()\nconfig = get_efficientdet_config(model_name='tf_efficientdet_d7')\nmodel = EfficientDet(config=config,pretrained_backbone=False)\nmodel_path = args.checkpoint\nmodel.load_state_dict(torch.load(model_path,map_location=torch.device('cpu')))\nmodel.eval()\nexample = torch.randn(args.batch_size, 3, 1536, 1536)\nexport_onnx_file = args.out\ntorch.onnx.export(model, example, export_onnx_file, do_constant_folding=True, verbose=True, opset_version=11)","sub_path":"ACL_PyTorch/contrib/cv/detection/EfficientDetD7/EfficientDetD7_pth2onnx.py","file_name":"EfficientDetD7_pth2onnx.py","file_ext":"py","file_size_in_byte":1671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"581531114","text":"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"\nAn example for computing correlation matrix.\nRun with:\n bin/spark-submit examples/src/main/python/ml/correlation_example.py\n\"\"\"\nfrom __future__ import print_function\n\n# $example on$\nfrom pyspark.ml.linalg import Vectors\nfrom pyspark.ml.stat import Correlation\n# $example off$\nfrom pyspark.sql import SparkSession\nimport time\nimport sys\n\n\npartition = float(sys.argv[1])\nnum_parts = int(sys.argv[2])\ninput_file = sys.argv[3]\n\nif __name__ == \"__main__\":\n spark = SparkSession \\\n .builder \\\n .appName(\"CorrelationExample\") \\\n .getOrCreate()\n\n # $example on$\n df = spark.read.format(\"libsvm\").load(input_file)\n\n df = df.sample(False, partition).coalesce(num_parts)\n\n start = time.time()\n r1 = Correlation.corr(df, \"features\").head()\n end = time.time()\n run_time = end - start\n print(\"time: \" + str(run_time))\n print(\"Pearson correlation matrix:\\n\" + str(r1[0]))\n\n\n # $example off$\n\n spark.stop()\n","sub_path":"spark-mllib/pearson.py","file_name":"pearson.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"529075403","text":"#!/usr/bin/env python\n\n\"\"\"\n ros_tcp_comm tcp_comm Node\n\n raw Authors: Nicholas McCullough and Joseph Neidigh\n\n author: Haijun Wang\n\n This node sends and receive messages across a lan or wireless network via a TCP connection.\n\n\n Currently this node receives location data from the turtlesim node. To\n send different data change the params inside the 'tcp_comm.launch' file.\n\n\"\"\"\n\nimport socket\nimport sys\nimport struct\nimport zlib\nimport rospy\nimport rostopic\nimport time\nfrom geometry_msgs.msg import Twist\nfrom std_msgs.msg import Empty\nfrom sensor_msgs.msg import Imu\nfrom xunjian_nav.msg import Encoder, Ultrasound\nfrom tf.transformations import euler_from_quaternion\n\nHEADER=0xFA\nIMU_ID=0x10\nENC_ID=0x11\nULTRA_ID=0x12\nVEL_ID=0x13\nRESET_ID=0x14\nMAX_PACK_LEN=41\nQ30=1073741824.0\nconnect_flag = True\n\nclass Tcp_comm():\n\tdef __init__(self):\n\t\trospy.init_node('tcp_comm')\n\n\t\tRECEIVER_IP = rospy.get_param('~ip')\n\t\tPORT = rospy.get_param('~port_number')\n\n\t\tself.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t#socket connection\n\t\ttry:\n\t\t\tself.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)#reuse addr\n\t\t\tself.sock.bind((\"\", PORT))\n\t\t\tself.sock.listen(1)\n\t\t\trospy.loginfo('waiting for connection')\n\t\t\tself.sock, addr = self.sock.accept()#block here and waite for connection\n\t\t\trospy.loginfo(\"connectted\")\n\t\texcept Exception as e:\n\t\t\tself.sock.close()\n\t\t\trospy.loginfo(\"CONNECTION ERROR\")\n\t\t\trospy.loginfo(e)\n\t\t\tsys.exit()\n\n\t\trospy.Subscriber(\"smoother_cmd_vel\", Twist, self.vel_callback, queue_size=1)\n\t\trospy.Subscriber(\"encoder_reset\", Empty, self.rst_callback, queue_size=1)\n\n\t\tenc_pub = rospy.Publisher(\"encoder\", Encoder, queue_size=10)\n\t\tut_pub = rospy.Publisher(\"range_dist\", Ultrasound, queue_size=10)\n\t\timu_pub = rospy.Publisher(\"imu\",Imu,queue_size=10)\n\n\t\tenc_msg = Encoder()\n\t\tut_msg = Ultrasound()\n\t\timu_msg = Imu()\n\t\tglobal connect_flag\n\t\twhile True:#receive data\n\t\t\tconnect_flag = True\n\t\t\ttry:\n\t\t\t\tself.sock.settimeout(5)#timeout 5s\n\t\t\t\tdata=self.recv_msg()\n\t\t\t\tself.sock.settimeout(None)\n\t\t\t\tnow = rospy.Time.now()\n\t\t\t\tif data is not None:\n\t\t\t\t\tif data[0]==ENC_ID:\n\t\t\t\t\t\trospy.loginfo('recv enc data,%d,%d,%d,%d',data[1],data[2],data[3],data[4])\n\t\t\t\t\t\tenc_msg.leftEncoder=data[1]\n\t\t\t\t\t\tenc_msg.rightEncoder=data[2]\n\t\t\t\t\t\tenc_msg.vx = data[3]\n\t\t\t\t\t\tenc_msg.w = data[4]\n\t\t\t\t\t\tenc_pub.publish(enc_msg)\n\t\t\t\t\telif data[0]==ULTRA_ID:\n\t\t\t\t\t\trospy.loginfo('recv ultra data')\n\t\t\t\t\t\tut_msg.ultra_1=data[1]\n\t\t\t\t\t\tut_msg.ultra_2=data[2]\n\t\t\t\t\t\tut_msg.ultra_3=data[3]\n\t\t\t\t\t\tut_msg.ultra_4=data[4]\n\t\t\t\t\t\tut_msg.ultra_5=data[5]\n\t\t\t\t\t\tut_msg.ultra_6=data[6]\n\t\t\t\t\t\tut_pub.publish(ut_msg)\n\t\t\t\t\telif data[0]==IMU_ID:\n\t\t\t\t\t\trospy.loginfo('recv imu data')\n\t\t\t\t\t\timu_msg.header.frame_id = \"imu\"\n\t\t\t\t\t\timu_msg.header.stamp = now\n\t\t\t\t\t\timu_msg.angular_velocity.x=data[1]/65536.0*4000.0\n\t\t\t\t\t\timu_msg.angular_velocity.y=data[2]/65536.0*4000.0\n\t\t\t\t\t\timu_msg.angular_velocity.z=data[3]/65536.0*4000.0\n\t\t\t\t\t\timu_msg.linear_acceleration.x=data[4]/65536.0*4.0\n\t\t\t\t\t\timu_msg.linear_acceleration.y=data[5]/65536.0*4.0\n\t\t\t\t\t\timu_msg.linear_acceleration.z=data[6]/65536.0*4.0\n\t\t\t\t\t\timu_msg.orientation.w=data[7]/Q30\n\t\t\t\t\t\timu_msg.orientation.x=data[8]/Q30\n\t\t\t\t\t\timu_msg.orientation.y=data[9]/Q30\n\t\t\t\t\t\timu_msg.orientation.z=data[10]/Q30\n\n\t\t\t\t\t\timu_pub.publish(imu_msg)\n\t\t\t\t\t\t#(roll,pitch,yaw)=euler_from_quaternion([imu_msg.orientation.x,imu_msg.orientation.y,imu_msg.orientation.z,imu_msg.orientation.w])\n\t\t\t\t\t\t#rospy.loginfo((roll*57.3,pitch*57.3,yaw*57.3))\n\t\t\t\t\telse:\n\t\t\t\t\t\trospy.loginfo(\"error: unrecognized header,0x%x\",data[0])\n\t\t\t\telse:\n\t\t\t\t\trospy.loginfo(\"error: data loss or connection lost\")\n\n\t\t\texcept socket.timeout:\n\t\t\t\tconnect_flag = False\n\t\t\t\tself.sock.shutdown(2)\n\t\t\t\tself.sock.close()\n\t\t\t\trospy.loginfo(\"socket timeout\")\n\t\t\t\trospy.loginfo(\"waiting for reconnection\")\n\t\t\t\tself.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t\t\tself.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\t\t\t\tself.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)\n\t\t\t\tself.sock.bind((\"\", PORT))\n\t\t\t\tself.sock.listen(1)\n\t\t\t\tself.sock, addr = self.sock.accept()\n\t\t\t\trospy.loginfo(\"connectted\")\n\t\t\t\tconnect_flag = True\n\t\tself.sock.close()\n\t\trospy.spin()\n\n\tdef vel_callback(self, msg):\n\t\tglobal connect_flag\n\t\tif connect_flag:\n\t\t\tdata = [(int)(msg.linear.x*1000), (int)(msg.angular.z*1000)]#extract data from msg\n\t\t\trospy.loginfo('set vel:%d,%d',data[0],data[1])\n\t\t\tpack = self.msg_pack(VEL_ID, data)#pack the data\n\t\t\t#print repr(pack)\n\t\t\ttry:\n\t\t\t\tself.sock.sendall(pack)\n\t\t\texcept Exception as e:\n\t\t\t\tconnect_flag=False\n\t\t\t\trospy.loginfo(\"SENDER ERROR\")\n\t\t\t\trospy.loginfo(e)\n\n\tdef rst_callback(self, msg):\n\t\tglobal connect_flag\n\t\tif connect_flag:\n\t\t\tdata = [0x01]#reset encoder\n\t\t\tpack = self.msg_pack(RESET_ID, data)\n\t\t\ttry:\n\t\t\t\tself.sock.sendall(pack)\n\t\t\t\trospy.loginfo('reset encoder')\n\t\t\texcept Exception as e:\n\t\t\t\tconnect_flag = False\n\t\t\t\trospy.loginfo(\"SENDER ERROR\")\n\t\t\t\trospy.loginfo(e)\n\n\tdef msg_pack(self, idd, data):\n\t\tpack = struct.pack('>B',HEADER)#pack header\n\t\tpack = pack+struct.pack('>B',len(data)*4+1)\t#pack len\n\t\tpack = pack + struct.pack('>B',idd) #pack header\n\t\t# pack data one by one\n\t\tfor item in data:\n\t\t\tpack=pack+struct.pack('>i',item)\n\t\tpack = pack + struct.pack('>B',0xAA)\n\t\treturn pack\n\n\tdef recv_msg(self):\n\t\t#Read message length and unpack it into an integer\n\t\theader = self.recvall(1)#first 1 data is the header\n\t\tif not header:\n\t\t\treturn None\n\t\traw_len= self.recvall(1)#next is len\n\t\tif not raw_len:\n\t\t\treturn None\n\t\tdata_len = struct.unpack('>B',raw_len)[0]#len is unsigned byte\n\t\t# Read the message data\n\t\t#rospy.loginfo('recv pack_len:%d',data_len)\n\t\tif data_len<1 or data_len>MAX_PACK_LEN:\n\t\t\tself.recvall(4096)\n\t\t\treturn None\n\t\tpack = self.recvall(data_len)\n\t\tif not pack:\n\t\t\treturn None\n\t\tdata = struct.unpack('>B'+str((data_len-1)/4)+'i',pack)#a byte header + (lenbyte-1)/4 int\n\t\tchk= self.recvall(1)#last is chk\n\t\treturn data\n\n\tdef recvall(self, n):\n\t\t# Helper function to recv n bytes or return None if EOF is hit\n\t\tdata = ''\n\t\twhile len(data) < n:\n\t\t\tpacket = self.sock.recv(n - len(data))\n\t\t\tif not packet:\n\t\t\t\treturn None\n\t\t\tdata += packet\n\t\treturn data\n\nif __name__ == \"__main__\":\n\tTcp_comm()\n","sub_path":"src/xunjian_nav/scripts/tcp_comm.py","file_name":"tcp_comm.py","file_ext":"py","file_size_in_byte":6177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"639243463","text":"import pickle\nimport requests\nimport socket, os\n\nclass Registration(object):\n\n def __init__(self):\n self.hash = ''\n self.code = ''\n self.qr = ''\n\n def register(self, port):\n\n ipaddr = \"\"\n try:\n gw = os.popen(\"ip -4 route show default\").read().split()\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n s.connect((gw[2], 0))\n ipaddr = s.getsockname()[0]\n except IndexError:\n ipaddr = socket.gethostbyname(socket.gethostname())\n\n #url = 'http://localhost:5000/register/%s/%d' % (ipaddr, port)\n url = 'http://130.240.5.87:5000/register/%s/%d' % (ipaddr, port)\n r = requests.post(url)\n status_code = r.status_code\n if status_code != 200:\n print('Something went wrong, got ' + str(status_code))\n return -1\n else:\n response = pickle.loads(r.content) # unbox the response content\n self.hash = response['hash'] # not really needed\n self.code = response['code'] # should be displayed\n self.qr = response['qr']\n\n self.saveQR()\n return\n\n def saveQR(self):\n img = self.qr.make_image()\n img.save('static/media/qr.png') # can also use .jpeg, .bmp\n","sub_path":"screen/registration.py","file_name":"registration.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"639947743","text":"import sys\nimport readline\nimport queue\n\n\n\ndef reader():\n filename0 = \"example.txt\"\n filename1 = \"user1.txt\"\n filename2 = \"user2.txt\"\n filename3 = \"user10.txt\"\n filename4 = \"user45.txt\"\n\n alist = [line.rstrip().split(' ')[1] for line in open(filename3)]\n\n userDic = {}\n\n transNum = 0\n for i in alist:\n transNum += 1\n\n iList = i.split(',')\n\n newData = ''\n newData = ','.join([str(transNum),i])\n\n\n if iList[0] == \"DUMPLOG\":\n userDic.update({iList[0]:[newData]})\n elif iList[1] not in userDic:\n userDic.update({iList[1]:[newData]})\n else:\n userDic.get(iList[1]).append(newData)\n\n\n spliter = 0\n loadcount = 1\n for v in userDic.values():\n if spliter == 0:\n filename = 'workload'+str(loadcount)+'.txt'\n f = open(filename, 'w')\n loadcount += 1\n\n for item in v:\n f.write(item+'\\n')\n\n spliter += 1\n\n if spliter == 3:\n spliter = 0\n f.close()\n\n\nif __name__ == '__main__':\n reader()\n","sub_path":"jayDev/update03-06/workload/workload.py","file_name":"workload.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"621164466","text":"'''\nLet d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).\nIf d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and each of a and b are called amicable numbers.\n\nFor example,\nthe proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110; therefore\nd(220) = 284.\nThe proper divisors of 284 are 1, 2, 4, 71 and 142; so\nd(284) = 220.\n\nEvaluate the sum of all the amicable numbers under 10000.\n'''\n\nfrom math import sqrt\ndef sum_of_all_divisors(num):\n '''return the sum of all proper divisors less than n'''\n sum_divs = 0\n divisor = 1\n while divisor <= sqrt(num):\n if(num%divisor == 0):\n sum_divs+=divisor\n if(divisor != num/divisor):\n sum_divs+=num/divisor\n divisor+=1\n\n # remove itself as a valid divisor\n sum_divs-=num\n\n return int(sum_divs)\n\n\namicables = []\nnum = 1\n\n# find all amicables under 10 000\nwhile num < 10000:\n # check if amicable\n pot_amicable = sum_of_all_divisors(num)\n # validate number\n if (sum_of_all_divisors(pot_amicable) == num and pot_amicable != num):\n # add to cart if not alreaddy present\n if num not in amicables:\n amicables.append(pot_amicable)\n amicables.append(num)\n num+=1\n\n# Evaluate the sum of all the amicable numbers under 10 000\nsum = 0\nprint(amicables)\nfor am in amicables:\n sum+=am\n\nprint(sum)\n","sub_path":"20-29/p_21.py","file_name":"p_21.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"235435700","text":"import cv2\nimport pafy\n\nmouth_cascade = cv2.CascadeClassifier('./data/haarcascade_mcs_mouth.xml')\n\nlip_mask = cv2.imread('./data/lip5.png')\nh_mask, w_mask = lip_mask.shape[:2]\n\nurl = 'https://www.youtube.com/watch?v=B0abXq6bff4'\nvideo = pafy.new(url)\nbest= video.getbest(preftype='webm')\ncap=cv2.VideoCapture(best.url)\nframe_size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n\nout = cv2.VideoWriter('lip5_mask_final.MP4', 0x7634706d, 25, frame_size, isColor=True)\n\nif mouth_cascade.empty():\n\traise IOError('Unable to load the mouth cascade classifier xml file')\n\nwhile True:\n ret, frame = cap.read()\n\n if not ret:\n break\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n mouth_rects = mouth_cascade.detectMultiScale(gray, 1.3,11)\n\n for (x,y,w,h) in mouth_rects:\n w = int(w*2.1)\n h = int(h*0.7)\n x = int(x - 0.10*w)\n y = int(y - 0.15*h)\n # cv2.rectangle(frame, (x,y), (x+w,y+h), (0,255,0), 3)\n\n frame_roi = frame[y:y + h, x:x + w] # 얼굴에 해당하는 부분 자르고\n face_mask_small = cv2.resize(lip_mask, (w, h), interpolation=cv2.INTER_AREA)\n # 마스크를 크기에 맞게 자르고\n gray_mask = cv2.cvtColor(face_mask_small, cv2.COLOR_BGR2GRAY)\n\n ret, mask = cv2.threshold(gray_mask, 150, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY_INV)\n mask_inv = cv2.bitwise_not(mask)\n masked_face = cv2.bitwise_and(face_mask_small, face_mask_small, mask=mask)\n masked_frame = cv2.bitwise_and(frame_roi, frame_roi, mask=mask_inv)\n frame[y:y + h, x:x + w] = cv2.add(masked_face, masked_frame)\n\n break\n\n cv2.imshow('Mouth Detector', frame)\n out.write(frame)\n\n c = cv2.waitKey(1)\n if c == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"hw/4th/hw4_2.py","file_name":"hw4_2.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"507978245","text":"#Write a class vector representing a vector of n dimension. overload the + & * operators which calculates the sum & the dot product of them.\n\n#Override the __len__() method on vector of above problem to display the dimension of vector\n\nclass Vector:\n def __init__(self,vec):\n self.vec= vec\n\n # def __str__(self):\n # return f\"{self.vec}\"\n\n def __str__(self):\n str_1= \"\"\n index= 0\n for i in self.vec:\n str_1 += f\" {i}a{index} +\"\n index +=1\n return str_1[:-1]\n\n def __add__(self,vec2):\n newList= []\n for i in range(len(self.vec)):\n newList.append(self.vec[i] + vec2.vec[i])\n return Vector(newList) \n def __mul__(self,vec2):\n sum= 0\n for i in range(len(self.vec)):\n sum +=(self.vec[i] * vec2.vec[i])\n return sum \n\n def __len__(self):\n return len(self.vec) \n\n\nv1= Vector([1,4,6])\nv2= Vector([2,5,6])\nprint(v1+v2) \nprint(v1*v2) \nprint(len(v1))\nprint(len(v2)) ","sub_path":"oops/practiseOops06.py","file_name":"practiseOops06.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"637954183","text":"\"\"\"\nProvides an abstraction of Cassandra's data model to allow for easy\nmanipulation of data inside Cassandra.\n\n.. seealso:: :mod:`pycassa.columnfamilymap`\n\"\"\"\n\nfrom pycassa.cassandra.ttypes import Column, ColumnOrSuperColumn,\\\n ColumnParent, ColumnPath, ConsistencyLevel, NotFoundException,\\\n SlicePredicate, SliceRange, SuperColumn, KeyRange,\\\n IndexExpression, IndexClause, CounterColumn\nimport pycassa.util as util\n\nimport time\nimport sys\nimport uuid\nimport threading\n\nfrom batch import CfMutator\n\n__all__ = ['gm_timestamp', 'ColumnFamily', 'PooledColumnFamily']\n\n_NON_SLICE = 0\n_SLICE_START = 1\n_SLICE_FINISH = 2\n\ndef gm_timestamp():\n \"\"\" Gets the current GMT timestamp in microseconds. \"\"\"\n return int(time.time() * 1e6)\n\n\nclass ColumnFamily(object):\n \"\"\" An abstraction of a Cassandra column family or super column family. \"\"\"\n\n buffer_size = 1024\n \"\"\" When calling :meth:`get_range()` or :meth:`get_indexed_slices()`,\n the intermediate results need to be buffered if we are fetching many\n rows, otherwise the Cassandra server will overallocate memory and fail.\n This is the size of that buffer in number of rows. The default is 1024. \"\"\"\n\n read_consistency_level = ConsistencyLevel.ONE\n \"\"\" The default consistency level for every read operation, such as \n :meth:`get` or :meth:`get_range`. This may be overridden per-operation. This should be\n an instance of :class:`~pycassa.cassandra.ttypes.ConsistencyLevel`.\n The default level is ``ONE``. \"\"\"\n\n write_consistency_level = ConsistencyLevel.ONE\n \"\"\" The default consistency level for every write operation, such as \n :meth:`insert` or :meth:`remove`. This may be overridden per-operation. This should be\n an instance of :class:`.~pycassa.cassandra.ttypes.ConsistencyLevel`.\n The default level is ``ONE``. \"\"\"\n\n timestamp = gm_timestamp\n \"\"\" Each :meth:`insert()` or :meth:`remove` sends a timestamp with every\n column. This attribute is a function that is used to get\n this timestamp when needed. The default function is :meth:`gm_timestamp()`.\"\"\"\n\n dict_class = util.OrderedDict\n \"\"\" Results are returned as dictionaries. :class:`~pycassa.util.OrderedDict` is\n used by default so that order is maintained. A different class, such as\n :class:`dict` may be used setting this. \"\"\"\n\n autopack_names = True\n \"\"\" Controls whether column names are automatically converted to or from\n their natural type to the binary string format that Cassandra uses.\n The data type used is controlled by :attr:`column_name_class` for\n column names and :attr:`super_column_name_class` for super column names. \n By default, this is :const:`True`. \"\"\"\n\n autopack_values = True\n \"\"\" Whether column values are automatically converted to or from\n their natural type to the binary string format that Cassandra uses.\n The data type used is controlled by :attr:`default_validation_class`\n and :attr:`column_validators`. \n By default, this is :const:`True`. \"\"\"\n\n autopack_keys = True\n \"\"\" Whether row keys are automatically converted to or from\n their natural type to the binary string format that Cassandra uses.\n The data type used is controlled by :attr:`key_validation_class`. \n By default, this is :const:`True`.\n \"\"\"\n\n column_name_class = None\n \"\"\" The data type of column names, which pycassa will use to determine\n how to pack and unpack them.\n \n This is set automatically by inspecting the column family's\n ``comparator_type``, but it may also be set manually if you want\n autopacking behavior without setting a ``comparator_type``. Options\n include anything in :mod:`~pycassa.system_manager`, such as \"LongType\". \"\"\"\n\n super_column_name_class = None\n \"\"\" Like :attr:`column_name_class`, but for super column names. \"\"\"\n\n default_validation_class = None\n \"\"\" The default data type of column values, which pycassa will use\n to determine how to pack and unpack them.\n \n This is set automatically by inspecting the column family's\n ``default_validation_class``, but it may also be set manually if you want\n autopacking behavior without setting a ``default_validation_class``. Options\n include anything in :mod:`~pycassa.system_manager`, such as \"LongType\". \"\"\"\n\n column_validators = {}\n \"\"\" Like :attr:`default_validation_class`, but is a :class:`dict` mapping\n individual columns to types. \"\"\"\n\n key_validation_class = None\n \"\"\" The data type of row keys, which pycassa will use to determine how\n to pack and unpack them.\n \n This is set automatically by inspecting the column family's\n ``key_validation_class`` (which only exists in Cassandra 0.8 or greater),\n but may be set manually if you want the autopacking behavior without\n setting a ``key_validation_class`` or if you are using Cassandra 0.7. \n Options include anything in :mod:`~pycassa.system_manager`, such as\n \"LongType\".\"\"\"\n\n def __init__(self, pool, column_family, **kwargs):\n \"\"\"\n An abstraction of a Cassandra column family or super column family.\n Operations on this, such as :meth:`get` or :meth:`insert` will get data from or\n insert data into the corresponding Cassandra column family with\n name `column_family`.\n\n `pool` is a :class:`~pycassa.pool.ConnectionPool` that the column\n family will use for all operations. A connection is drawn from the\n pool before each operations and is returned afterwards. Note that\n the keyspace to be used is determined by the pool.\n \"\"\"\n\n self.pool = pool\n self._tlocal = threading.local()\n self._tlocal.client = None\n self.column_family = column_family\n self.timestamp = gm_timestamp\n self.load_schema()\n\n recognized_kwargs = [\"buffer_size\", \"read_consistency_level\",\n \"write_consistency_level\", \"timestamp\",\n \"dict_class\", \"buffer_size\", \"autopack_names\",\n \"autopack_values\", \"autopack_keys\"]\n for kw in recognized_kwargs:\n if kw in kwargs:\n setattr(self, kw, kwargs[kw])\n\n def load_schema(self):\n \"\"\"\n Loads the schema definition for this column family from\n Cassandra and updates comparator and validation classes if\n neccessary.\n \"\"\"\n try:\n try:\n self._obtain_connection()\n ksdef = self._tlocal.client.get_keyspace_description(use_dict_for_col_metadata=True)\n self._cfdef = ksdef[self.column_family]\n except KeyError:\n nfe = NotFoundException()\n nfe.why = 'Column family %s not found.' % self.column_family\n raise nfe\n finally:\n self._release_connection()\n\n self.super = self._cfdef.column_type == 'Super'\n self._load_comparator_classes()\n self._load_validation_classes()\n self._load_key_class()\n\n def _load_comparator_classes(self):\n if not self.super:\n self.column_name_class = util.extract_type_name(self._cfdef.comparator_type)\n self.super_column_name_class = None\n else:\n self.column_name_class = util.extract_type_name(self._cfdef.subcomparator_type)\n self.super_column_name_class = util.extract_type_name(self._cfdef.comparator_type)\n\n def _load_validation_classes(self):\n self.default_validation_class = util.extract_type_name(self._cfdef.default_validation_class)\n self.column_validators = {}\n for name, coldef in self._cfdef.column_metadata.items():\n self.column_validators[name] = util.extract_type_name(coldef.validation_class)\n\n def _load_key_class(self):\n if hasattr(self._cfdef, \"key_validation_class\"):\n self.key_validation_class = util.extract_type_name(self._cfdef.key_validation_class)\n else:\n self.key_validation_class = 'BytesType'\n\n def _col_to_dict(self, column, include_timestamp):\n value = self._unpack_value(column.value, column.name)\n if include_timestamp:\n return (value, column.timestamp)\n return value\n\n def _scol_to_dict(self, super_column, include_timestamp):\n ret = self.dict_class()\n for column in super_column.columns:\n ret[self._unpack_name(column.name)] = self._col_to_dict(column, include_timestamp)\n return ret\n\n def _scounter_to_dict(self, counter_super_column):\n ret = self.dict_class()\n for counter in counter_super_column.columns:\n ret[self._unpack_name(counter.name)] = counter.value\n return ret\n\n def _cosc_to_dict(self, list_col_or_super, include_timestamp):\n ret = self.dict_class()\n for cosc in list_col_or_super:\n if cosc.column:\n col = cosc.column\n ret[self._unpack_name(col.name)] = self._col_to_dict(col, include_timestamp)\n elif cosc.counter_column:\n counter = cosc.counter_column\n ret[self._unpack_name(counter.name)] = counter.value\n elif cosc.super_column:\n scol = cosc.super_column\n ret[self._unpack_name(scol.name, True)] = self._scol_to_dict(scol, include_timestamp)\n else:\n scounter = cosc.counter_super_column\n ret[self._unpack_name(scounter.name, True)] = self._scounter_to_dict(scounter)\n return ret\n\n def _column_path(self, super_column=None, column=None):\n return ColumnPath(self.column_family,\n self._pack_name(super_column, is_supercol_name=True),\n self._pack_name(column, False))\n\n def _column_parent(self, super_column=None):\n return ColumnParent(column_family=self.column_family,\n super_column=self._pack_name(super_column, is_supercol_name=True))\n\n def _slice_predicate(self, columns, column_start, column_finish,\n column_reversed, column_count, super_column=None):\n is_supercol_name = self.super and super_column is None\n if columns is not None:\n packed_cols = []\n for col in columns:\n packed_cols.append(self._pack_name(col, is_supercol_name=is_supercol_name))\n return SlicePredicate(column_names=packed_cols)\n else:\n if column_start != '':\n column_start = self._pack_name(column_start,\n is_supercol_name=is_supercol_name,\n slice_end=_SLICE_START)\n if column_finish != '':\n column_finish = self._pack_name(column_finish,\n is_supercol_name=is_supercol_name,\n slice_end=_SLICE_FINISH)\n\n sr = SliceRange(start=column_start, finish=column_finish,\n reversed=column_reversed, count=column_count)\n return SlicePredicate(slice_range=sr)\n\n def _pack_name(self, value, is_supercol_name=False, slice_end=_NON_SLICE):\n if not self.autopack_names:\n if value is not None and not (isinstance(value, str) or isinstance(value, unicode)):\n raise TypeError(\"A str or unicode column name was expected, but %s was received instead (%s)\"\n % (value.__class__.__name__, str(value)))\n return value\n if value is None: return\n\n if is_supercol_name:\n d_type = self.super_column_name_class\n else:\n d_type = self.column_name_class\n\n if d_type == 'TimeUUIDType':\n if slice_end:\n value = util.convert_time_to_uuid(value,\n lowest_val=(slice_end == _SLICE_START),\n randomize=False)\n else:\n value = util.convert_time_to_uuid(value,\n randomize=True)\n elif d_type == 'BytesType' and not (isinstance(value, str) or isinstance(value, unicode)):\n raise TypeError(\"A str or unicode column name was expected, but %s was received instead (%s)\"\n % (value.__class__.__name__, str(value)))\n\n return util.pack(value, d_type)\n\n def _unpack_name(self, b, is_supercol_name=False):\n if not self.autopack_names:\n return b\n if b is None: return\n\n if is_supercol_name:\n d_type = self.super_column_name_class\n else:\n d_type = self.column_name_class\n\n return util.unpack(b, d_type)\n\n def _get_data_type_for_col(self, col_name):\n return self.column_validators.get(col_name, self.default_validation_class)\n\n def _pack_value(self, value, col_name):\n if not self.autopack_values:\n if value is not None and not (isinstance(value, str) or isinstance(value, unicode)):\n raise TypeError(\"A str or unicode column value was expected for column '%s', but %s was received instead (%s)\"\n % (str(col_name), value.__class__.__name__, str(value)))\n return value\n\n d_type = self._get_data_type_for_col(col_name)\n if d_type == 'BytesType' and not (isinstance(value, str) or isinstance(value, unicode)):\n raise TypeError(\"A str or unicode column value was expected for column '%s', but %s was received instead (%s)\"\n % (str(col_name), value.__class__.__name__, str(value)))\n\n return util.pack(value, d_type)\n\n def _unpack_value(self, value, col_name):\n if not self.autopack_values:\n return value\n return util.unpack(value, self._get_data_type_for_col(col_name))\n\n def _pack_key(self, key):\n if not self.autopack_keys or not key:\n return key\n return util.pack(key, self.key_validation_class)\n\n def _unpack_key(self, b):\n if not self.autopack_keys:\n return b\n return util.unpack(b, self.key_validation_class)\n\n def _obtain_connection(self):\n self._tlocal.client = self.pool.get()\n\n def _release_connection(self):\n if hasattr(self._tlocal, 'client'):\n if self._tlocal.client:\n self._tlocal.client.return_to_pool()\n self._tlocal.client = None\n\n def get(self, key, columns=None, column_start=\"\", column_finish=\"\",\n column_reversed=False, column_count=100, include_timestamp=False,\n super_column=None, read_consistency_level = None):\n \"\"\"\n Fetches all or part of the row with key `key`.\n\n The columns fetched may be limited to a specified list of column names\n using `columns`.\n\n Alternatively, you may fetch a slice of columns or super columns from a row\n using `column_start`, `column_finish`, and `column_count`.\n Setting these will cause columns or super columns to be fetched starting with\n `column_start`, continuing until `column_count` columns or super columns have\n been fetched or `column_finish` is reached. If `column_start` is left as the\n empty string, the slice will begin with the start of the row; leaving\n `column_finish` blank will cause the slice to extend to the end of the row.\n Note that `column_count` defaults to 100, so rows over this size will not be\n completely fetched by default.\n\n If `column_reversed` is ``True``, columns are fetched in reverse sorted order,\n beginning with `column_start`. In this case, if `column_start` is the empty\n string, the slice will begin with the end of the row.\n\n You may fetch all or part of only a single super column by setting `super_column`.\n If this is set, `column_start`, `column_finish`, `column_count`, and `column_reversed`\n will apply to the subcolumns of `super_column`.\n\n To include every column's timestamp in the result set, set `include_timestamp` to\n ``True``. Results will include a ``(value, timestamp)`` tuple for each column.\n\n If this is a standard column family, the return type is of the form\n ``{column_name: column_value}``. If this is a super column family and `super_column`\n is not specified, the results are of the form\n ``{super_column_name: {column_name, column_value}}``. If `super_column` is set,\n the super column name will be excluded and the results are of the form\n ``{column_name: column_value}``.\n\n \"\"\"\n\n packed_key = self._pack_key(key)\n single_column = columns is not None and len(columns) == 1\n if (not self.super and single_column) or \\\n (self.super and super_column is not None and single_column):\n super_col_orig = super_column is not None\n column = None\n if self.super and super_column is None:\n super_column = columns[0]\n else:\n column = columns[0]\n cp = self._column_path(super_column, column)\n try:\n self._obtain_connection()\n col_or_super = self._tlocal.client.get(\n packed_key, cp,\n read_consistency_level or self.read_consistency_level)\n finally:\n self._release_connection()\n return self._cosc_to_dict([col_or_super], include_timestamp)\n else:\n cp = self._column_parent(super_column)\n sp = self._slice_predicate(columns, column_start, column_finish,\n column_reversed, column_count, super_column)\n\n try:\n self._obtain_connection()\n list_col_or_super = self._tlocal.client.get_slice(\n packed_key, cp, sp,\n read_consistency_level or self.read_consistency_level)\n finally:\n self._release_connection()\n\n if len(list_col_or_super) == 0:\n raise NotFoundException()\n return self._cosc_to_dict(list_col_or_super, include_timestamp)\n\n def get_indexed_slices(self, index_clause, columns=None, column_start=\"\", column_finish=\"\",\n column_reversed=False, column_count=100, include_timestamp=False,\n read_consistency_level=None, buffer_size=None):\n \"\"\"\n Similar to :meth:`get_range()`, but an :class:`~pycassa.cassandra.ttypes.IndexClause`\n is used instead of a key range.\n\n `index_clause` limits the keys that are returned based on expressions\n that compare the value of a column to a given value. At least one of the\n expressions in the :class:`.IndexClause` must be on an indexed column.\n\n Note that Cassandra does not support secondary indexes or get_indexed_slices()\n for super column families.\n\n .. seealso:: :meth:`~pycassa.index.create_index_clause()` and\n :meth:`~pycassa.index.create_index_expression()`\n\n \"\"\"\n\n assert not self.super, \"get_indexed_slices() is not \" \\\n \"supported by super column families\"\n\n cl = read_consistency_level or self.read_consistency_level\n cp = self._column_parent()\n sp = self._slice_predicate(columns, column_start, column_finish,\n column_reversed, column_count)\n\n new_exprs = []\n # Pack the values in the index clause expressions\n for expr in index_clause.expressions:\n name = self._pack_name(expr.column_name)\n value = self._pack_value(expr.value, name)\n new_exprs.append(IndexExpression(name, expr.op, value))\n\n packed_start_key = self._pack_key(index_clause.start_key)\n clause = IndexClause(new_exprs, packed_start_key, index_clause.count)\n\n # Figure out how we will chunk the request\n if buffer_size is None:\n buffer_size = self.buffer_size\n row_count = clause.count\n\n count = 0\n i = 0\n last_key = clause.start_key\n while True:\n if row_count is not None:\n buffer_size = min(row_count - count + 1, buffer_size)\n clause.count = buffer_size\n clause.start_key = last_key\n try:\n self._obtain_connection()\n key_slices = self._tlocal.client.get_indexed_slices(\n cp, clause, sp, cl)\n finally:\n self._release_connection()\n\n if key_slices is None:\n return\n for j, key_slice in enumerate(key_slices):\n # Ignore the first element after the first iteration\n # because it will be a duplicate.\n if j == 0 and i != 0:\n continue\n unpacked_key = self._unpack_key(key_slice.key)\n yield (unpacked_key,\n self._cosc_to_dict(key_slice.columns, include_timestamp))\n\n count += 1\n if row_count is not None and count >= row_count:\n return\n\n if len(key_slices) != buffer_size:\n return\n last_key = key_slices[-1].key\n i += 1\n\n def multiget(self, keys, columns=None, column_start=\"\", column_finish=\"\",\n column_reversed=False, column_count=100, include_timestamp=False,\n super_column=None, read_consistency_level = None, buffer_size=None):\n \"\"\"\n Fetch multiple rows from a Cassandra server.\n\n `keys` should be a list of keys to fetch.\n\n `buffer_size` is the number of rows from the total list to fetch at a time.\n If left as ``None``, the ColumnFamily's :attr:`buffer_size` will be used.\n\n All other parameters are the same as :meth:`get()`, except that a list of keys may\n be passed in.\n\n Results will be returned in the form: ``{key: {column_name: column_value}}``. If\n an OrderedDict is used, the rows will have the same order as `keys`.\n\n \"\"\"\n\n packed_keys = map(self._pack_key, keys)\n cp = self._column_parent(super_column)\n sp = self._slice_predicate(columns, column_start, column_finish,\n column_reversed, column_count, super_column)\n consistency = read_consistency_level or self.read_consistency_level\n\n buffer_size = buffer_size or self.buffer_size\n offset = 0\n keymap = {}\n while offset < len(packed_keys):\n try:\n self._obtain_connection()\n new_keymap = self._tlocal.client.multiget_slice(\n packed_keys[offset:offset+buffer_size], cp, sp, consistency)\n finally:\n self._release_connection()\n keymap.update(new_keymap)\n offset += buffer_size\n\n ret = self.dict_class()\n\n # Keep the order of keys\n for key in keys:\n ret[key] = None\n\n empty_keys = []\n for packed_key, columns in keymap.iteritems():\n unpacked_key = self._unpack_key(packed_key)\n if len(columns) > 0:\n ret[unpacked_key] = self._cosc_to_dict(columns, include_timestamp)\n else:\n empty_keys.append(unpacked_key)\n\n for key in empty_keys:\n try:\n del ret[key]\n except KeyError:\n pass\n\n return ret\n\n MAX_COUNT = 2**31-1\n def get_count(self, key, super_column=None, read_consistency_level=None,\n columns=None, column_start=\"\", column_finish=\"\",\n column_reversed=False, max_count=None):\n \"\"\"\n Count the number of columns in the row with key `key`.\n\n You may limit the columns or super columns counted to those in `columns`.\n Additionally, you may limit the columns or super columns counted to\n only those between `column_start` and `column_finish`.\n\n You may also count only the number of subcolumns in a single super column\n using `super_column`. If this is set, `columns`, `column_start`, and\n `column_finish` only apply to the subcolumns of `super_column`.\n\n To put an upper bound on the number of columns that are counted,\n set `max_count`.\n\n \"\"\"\n if max_count is None:\n max_count = self.MAX_COUNT\n\n packed_key = self._pack_key(key)\n cp = self._column_parent(super_column)\n sp = self._slice_predicate(columns, column_start, column_finish,\n column_reversed, max_count, super_column)\n\n try:\n self._obtain_connection()\n ret = self._tlocal.client.get_count(\n packed_key, cp, sp,\n read_consistency_level or self.read_consistency_level)\n finally:\n self._release_connection()\n return ret\n\n def multiget_count(self, keys, super_column=None,\n read_consistency_level=None,\n columns=None, column_start=\"\",\n column_finish=\"\", buffer_size=None,\n column_reversed=False, max_count=None):\n \"\"\"\n Perform a column count in parallel on a set of rows.\n\n The parameters are the same as for :meth:`multiget()`, except that a list\n of keys may be used. A dictionary of the form ``{key: int}`` is\n returned.\n\n `buffer_size` is the number of rows from the total list to count at a time.\n If left as ``None``, the ColumnFamily's :attr:`buffer_size` will be used.\n \n To put an upper bound on the number of columns that are counted,\n set `max_count`.\n\n \"\"\"\n if max_count is None:\n max_count = self.MAX_COUNT\n\n packed_keys = map(self._pack_key, keys)\n cp = self._column_parent(super_column)\n sp = self._slice_predicate(columns, column_start, column_finish,\n column_reversed, max_count, super_column)\n consistency = read_consistency_level or self.read_consistency_level\n\n buffer_size = buffer_size or self.buffer_size\n offset = 0\n keymap = {}\n while offset < len(packed_keys):\n try:\n self._obtain_connection()\n new_keymap = self._tlocal.client.multiget_count(\n packed_keys[offset:offset+buffer_size], cp, sp, consistency)\n finally:\n self._release_connection()\n keymap.update(new_keymap)\n offset += buffer_size\n\n ret = self.dict_class()\n\n # Keep the order of keys\n for key in keys:\n ret[key] = None\n\n for packed_key, count in keymap.iteritems():\n ret[self._unpack_key(packed_key)] = count\n\n return ret\n\n def get_range(self, start=\"\", finish=\"\", columns=None, column_start=\"\",\n column_finish=\"\", column_reversed=False, column_count=100,\n row_count=None, include_timestamp=False,\n super_column=None, read_consistency_level=None,\n buffer_size=None):\n \"\"\"\n Get an iterator over rows in a specified key range.\n\n The key range begins with `start` and ends with `finish`. If left\n as empty strings, these extend to the beginning and end, respectively.\n Note that if RandomPartitioner is used, rows are stored in the\n order of the MD5 hash of their keys, so getting a lexicographical range\n of keys is not feasible.\n\n The `row_count` parameter limits the total number of rows that may be\n returned. If left as ``None``, the number of rows that may be returned\n is unlimted (this is the default).\n\n When calling `get_range()`, the intermediate results need to be\n buffered if we are fetching many rows, otherwise the Cassandra\n server will overallocate memory and fail. `buffer_size` is the\n size of that buffer in number of rows. If left as ``None``, the\n ColumnFamily's :attr:`buffer_size` attribute will be used.\n\n All other parameters are the same as those of :meth:`get()`.\n\n A generator over ``(key, {column_name: column_value})`` is returned.\n To convert this to a list, use ``list()`` on the result.\n\n \"\"\"\n\n cl = read_consistency_level or self.read_consistency_level\n cp = self._column_parent(super_column)\n sp = self._slice_predicate(columns, column_start, column_finish,\n column_reversed, column_count, super_column)\n\n count = 0\n i = 0\n last_key = self._pack_key(start)\n finish = self._pack_key(finish)\n\n if buffer_size is None:\n buffer_size = self.buffer_size\n while True:\n if row_count is not None:\n buffer_size = min(row_count - count + 1, buffer_size)\n key_range = KeyRange(start_key=last_key, end_key=finish, count=buffer_size)\n try:\n self._obtain_connection()\n key_slices = self._tlocal.client.get_range_slices(\n cp, sp, key_range, cl)\n finally:\n self._release_connection()\n # This may happen if nothing was ever inserted\n if key_slices is None:\n return\n for j, key_slice in enumerate(key_slices):\n # Ignore the first element after the first iteration\n # because it will be a duplicate.\n if j == 0 and i != 0:\n continue\n yield (self._unpack_key(key_slice.key),\n self._cosc_to_dict(key_slice.columns, include_timestamp))\n count += 1\n if row_count is not None and count >= row_count:\n return\n\n if len(key_slices) != buffer_size:\n return\n last_key = key_slices[-1].key\n i += 1\n\n def insert(self, key, columns, timestamp=None, ttl=None,\n write_consistency_level=None):\n \"\"\"\n Insert or update columns in the row with key `key`.\n\n `columns` should be a dictionary of columns or super columns to insert\n or update. If this is a standard column family, `columns` should\n look like ``{column_name: column_value}``. If this is a super\n column family, `columns` should look like\n ``{super_column_name: {sub_column_name: value}}``\n\n A timestamp may be supplied for all inserted columns with `timestamp`.\n\n `ttl` sets the \"time to live\" in number of seconds for the inserted\n columns. After this many seconds, Cassandra will mark the columns as\n deleted.\n\n The timestamp Cassandra reports as being used for insert is returned.\n\n \"\"\"\n packed_key = self._pack_key(key)\n if ((not self.super) and len(columns) == 1) or \\\n (self.super and len(columns) == 1 and len(columns.values()[0]) == 1):\n\n if timestamp is None:\n timestamp = self.timestamp()\n\n if self.super:\n super_col = columns.keys()[0]\n cp = self._column_path(super_col)\n columns = columns.values()[0]\n else:\n cp = self._column_path()\n\n colname = self._pack_name(columns.keys()[0], False)\n colval = self._pack_value(columns.values()[0], colname)\n column = Column(colname, colval, timestamp, ttl)\n try:\n self._obtain_connection()\n self._tlocal.client.insert(packed_key, cp, column,\n write_consistency_level or self.write_consistency_level)\n finally:\n self._release_connection()\n return timestamp\n else:\n return self.batch_insert({key: columns}, timestamp=timestamp, ttl=ttl,\n write_consistency_level=write_consistency_level)\n\n def batch_insert(self, rows, timestamp=None, ttl=None, write_consistency_level = None):\n \"\"\"\n Like :meth:`insert()`, but multiple rows may be inserted at once.\n\n The `rows` parameter should be of the form ``{key: {column_name: column_value}}``\n if this is a standard column family or\n ``{key: {super_column_name: {column_name: column_value}}}`` if this is a super\n column family.\n\n \"\"\"\n\n if timestamp == None:\n timestamp = self.timestamp()\n batch = self.batch(write_consistency_level=write_consistency_level)\n for key, columns in rows.iteritems():\n batch.insert(key, columns, timestamp=timestamp, ttl=ttl)\n batch.send()\n return timestamp\n\n def add(self, key, column, value=1, super_column=None, write_consistency_level=None):\n \"\"\"\n Increment or decrement a counter.\n\n `value` should be an integer, either positive or negative, to be added\n to a counter column. By default, `value` is 1.\n\n .. note:: This method is not idempotent. Retrying a failed add may result\n in a double count. You should consider using a separate\n ConnectionPool with retries disabled for column families\n with counters.\n\n .. versionadded:: 1.1.0\n Available in Cassandra 0.8.0 and later.\n\n \"\"\"\n packed_key = self._pack_key(key)\n cp = self._column_parent(super_column)\n column = self._pack_name(column)\n try:\n self._obtain_connection()\n self._tlocal.client.add(packed_key, cp, CounterColumn(column, value),\n write_consistency_level or self.write_consistency_level)\n finally:\n self._release_connection()\n\n\n def remove(self, key, columns=None, super_column=None,\n write_consistency_level=None, timestamp=None, counter=None):\n \"\"\"\n Remove a specified row or a set of columns within the row with key `key`.\n\n A set of columns or super columns to delete may be specified using\n `columns`.\n\n A single super column may be deleted by setting `super_column`. If\n `super_column` is specified, `columns` will apply to the subcolumns\n of `super_column`.\n\n If `columns` and `super_column` are both ``None``, the entire row is\n removed.\n\n The timestamp used for remove is returned.\n\n \"\"\"\n\n if timestamp is None:\n timestamp = self.timestamp()\n batch = self.batch(write_consistency_level=write_consistency_level)\n batch.remove(key, columns, super_column, timestamp)\n batch.send()\n return timestamp\n\n def remove_counter(self, key, column, super_column=None, write_consistency_level=None):\n \"\"\"\n Remove a counter at the specified location.\n\n Note that counters have limited support for deletes: if you remove a\n counter, you must wait to issue any following update until the delete\n has reached all the nodes and all of them have been fully compacted.\n\n .. versionadded:: 1.1.0\n Available in Cassandra 0.8.0 and later.\n\n \"\"\"\n packed_key = self._pack_key(key)\n cp = self._column_path(super_column, column)\n consistency = write_consistency_level or self.write_consistency_level\n try:\n self._obtain_connection()\n self._tlocal.client.remove_counter(packed_key, cp, consistency)\n finally:\n self._release_connection()\n\n def batch(self, queue_size=100, write_consistency_level=None):\n \"\"\"\n Create batch mutator for doing multiple insert, update, and remove\n operations using as few roundtrips as possible.\n\n The `queue_size` parameter sets the max number of mutations per request.\n\n A :class:`~pycassa.batch.CfMutator` is returned.\n\n \"\"\"\n\n return CfMutator(self, queue_size,\n write_consistency_level or self.write_consistency_level)\n\n def truncate(self):\n \"\"\"\n Marks the entire ColumnFamily as deleted.\n\n From the user's perspective, a successful call to ``truncate`` will\n result complete data deletion from this column family. Internally,\n however, disk space will not be immediatily released, as with all\n deletes in Cassandra, this one only marks the data as deleted.\n\n The operation succeeds only if all hosts in the cluster at available\n and will throw an :exc:`.UnavailableException` if some hosts are\n down.\n\n \"\"\"\n try:\n self._obtain_connection()\n self._tlocal.client.truncate(self.column_family)\n finally:\n self._release_connection()\n\nPooledColumnFamily = ColumnFamily\n","sub_path":"bin/pycassa/columnfamily.py","file_name":"columnfamily.py","file_ext":"py","file_size_in_byte":36913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"648037857","text":"# Implementar un programa que muestre la siguiente secuencia: \n# 1, 2, 3, 4, 5, 4, 3, 2, 1, 0 \n# Para un desafío mayor:** Utilizar un solo *while*, un solo *if* y un solo *else*\n\ni = 1\nj = 4\n\nwhile j >= 0:\n if i <= 5:\n print(i)\n i += 1\n else:\n print(j)\n j -= 1\n","sub_path":"daro/Clase 1/mini-desafios/if-while.py","file_name":"if-while.py","file_ext":"py","file_size_in_byte":296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"165678653","text":"import numpy as np\nimport pandas as pd\nimport hashlib\n\nfrom tqdm import tqdm\n\n\n# verbose function\ndef tqdm_v(gen, verbose=True, **kwargs):\n if verbose:\n return tqdm(gen, **kwargs)\n else:\n return gen\n\n\nclass Preprocessing:\n def __init__(self):\n pass\n\n def get_name(self):\n return \"\"\n\n def apply(self, data):\n return data\n\n\nclass RandomSlice(Preprocessing):\n \"\"\"\n Preprocessing object that extracts a random slice from the data.\n \"\"\"\n def __init__(self, slice_size = -1, slice_start = None):\n self.slice_param = slice_size\n self.start_param = slice_start\n\n def get_name(self):\n \"\"\"Returns the name of the object.\n\n Returns\n -------\n str\n Name of the instanciated RandomSlice object,\n based on its parameters.\n\n \"\"\"\n return \"_rand-slice-{}{}\".format(self.slice_param,\n (\"-\"+str(self.start_param)) if self.start_param is not None else \"\")\n\n def apply(self, data, verbose=False):\n \"\"\"Applies slicing to given data.\n\n Parameters\n ----------\n data : numpy.ndarray\n The data to apply the slicing on.\n verbose : bool\n Whether to print stuff.\n\n Returns\n -------\n numpy.ndarray\n The data, sliced accordingly.\n\n \"\"\"\n n = data.shape[1]\n if verbose:\n print(\"retrieving window\")\n # slice_size\n if self.slice_param == -1 or self.slice_param > n:\n slice_size = n\n elif type(self.slice_param) == float:\n slice_size = int(self.slice_param * n)\n else:\n slice_size = self.slice_param\n # slice_start\n if self.start_param is None:\n slice_start = np.random.randint(0,n - slice_size + 1)\n else:\n slice_start = self.start_param\n # slicing\n return data[:, slice_start:(slice_start + slice_size)]\n\n\nclass HashingWindow(Preprocessing):\n def __init__(self, granularity=1, hashing=None):\n \"\"\"Initializes HashingWindow object.\n\n Parameters\n ----------\n granularity : int\n Size of window to consider as atomic element of a sequence.\n Default, 1, means that we take the data as is.\n 10 means that we slice the data into slices of width 10, and work on hashes of those length-10 slices.\n hashing : type\n Hashing method. Must be a hashlib method.\n\n \"\"\"\n self.granularity = granularity\n self.hasher = hashing if hashing is not None else hashlib.md5\n\n def get_name(self):\n \"\"\"Returns the name of the HashingWindow object.\n\n Returns\n -------\n str\n Name of the object, taking into account granularity and hashing method.\n\n \"\"\"\n return \"hash-{}-{}\".format(self.hasher().name, self.granularity)\n\n def apply(self, data, verbose=False):\n # slicing data into evenly sized chunks\n def chunk_sizes():\n l = list()\n curr = 0\n for i in range(0, data.shape[1], self.granularity):\n curr += min(self.granularity, data.shape[1]-i)\n l.append(curr)\n return np.array(l[:-1])\n\n chunks = chunk_sizes()\n result = np.zeros((data.shape[0], len(chunks)+1))\n for j,subarray in tqdm_v(enumerate(np.split(data, chunks, axis=1)),\n verbose, desc=\"hashing\"):\n for i in range(subarray.shape[0]):\n result[i,j] = int(self.hasher((''.join(map(str,subarray[i,:]))).encode('UTF-8')).hexdigest(),16)\n return result\n","sub_path":"Code/common/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"421760806","text":"import os\nimport csv\nimport sys\nimport logging\nimport pymongo\n\nfrom config.settings import config\nfrom etl.transform import coauthorGraph, coauthorMatrix\nfrom etl.transform import collaboratorGraph, collaboratorOrgs\n\nlogging.basicConfig(\n filename=os.path.join(config['LOG_DIR'],'etl.log'),\n format='%(asctime)-15s %(message)s',\n level=logging.INFO)\n\nrab_jobs = [ coauthorGraph, coauthorMatrix,\n collaboratorGraph, collaboratorOrgs ]\n\ndef load_csv(fileName):\n with open(fileName, 'r' ) as f:\n rdr = csv.reader(f)\n data = [ row for row in rdr ]\n return data\n\ndef main():\n extractDir = config['EXTRACT_DIR']\n mongo = pymongo.MongoClient(config['MONGO_URI'], config['MONGO_PORT'])\n mongo_db = mongo.get_database(config['MONGO_DB'])\n auth = mongo_db.authenticate(config['MONGO_USER'], config['MONGO_PASSWORD'])\n\n for job in rab_jobs:\n logging.info(\"Begin: \" + job.__name__)\n viz_coll = mongo_db[ job.collection_name ]\n coll_key = job.key_field\n coll_val = job.value_field\n\n datasets = []\n for input_file in job.input_files:\n data = load_csv(os.path.join(extractDir, input_file) )\n datasets.append(data)\n\n data_generator = job.transform(*datasets)\n for key, timestamp, trans_data in data_generator:\n viz_coll.update_one({ coll_key: key },\n {'$set' : { 'updated': timestamp, coll_key: key,\n coll_val: trans_data } }, upsert=True)\n logging.info(\"Completed: \" + job.__name__)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"rabviz/etl/run_transform.py","file_name":"run_transform.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"118490406","text":"\"\"\"Support for the definition of AIS hostname.\"\"\"\n\nfrom .config_flow import configured_host\n\nDOMAIN = \"ais_host\"\n\n\nasync def async_setup(hass, config):\n \"\"\"Set up if necessary.\"\"\"\n return True\n\n\nasync def async_setup_entry(hass, config_entry):\n \"\"\"Set up ais host as config entry.\"\"\"\n return True\n\n\nasync def async_unload_entry(hass, config_entry):\n \"\"\"Unload a config entry.\"\"\"\n return True\n","sub_path":"homeassistant/components/ais_host/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"213313372","text":"#naming game\n\nimport sys\nimport random as r\nimport csv \nfrom itertools import combinations\n#from statistics import mean\n#from timeit import default_timer as timer\n\nfrom fly import Firefly\n\nNUM_SPECIES = 3\nNUM_EACH = 15\nEPOCHS = 500\nMUTATE_PROB = .1\nTRIALS = 10\n\n#given list of fireflies, the epoch number\n#implement original naming game \ndef round_one(fireflies, epoch, A, B):\n for (i, j) in combinations(fireflies, 2):\n same = i.same_species(j)\n if same:\n #both no pattern\n if i.pattern == None and j.pattern == None:\n i.init_pattern()\n j.pattern = i.pattern\n #j has pattern\n elif i.pattern == None:\n i.pattern = j.pattern\n #i has pattern\n elif j.pattern == None:\n j.pattern = i.pattern\n #both have\n elif i.score() != None and j.score() != None:\n #compare lincombo, replicate smaller one\n iscore = (A*i.score()) + (B*i.num_flash())\n jscore = (A*j.score()) + (B*j.num_flash())\n if iscore <= jscore:\n j.pattern = i.pattern[:]\n if r.random() < MUTATE_PROB and epoch < 495:\n j.mutate()\n j.reset_simscore()\n j.last_score = iscore\n else:\n i.pattern = j.pattern[:]\n if r.random() < MUTATE_PROB and epoch < 495:\n i.mutate()\n i.reset_simscore()\n i.last_score = jscore\n\n else:\n #calculate and update similarity score\n if i.pattern == None:\n i.init_pattern()\n if j.pattern == None:\n j.init_pattern()\n distance = i.calc_similarity(j)\n i.update_simscore(distance)\n j.update_simscore(distance)\n\n#printing results\ndef list_flies(flies):\n flies.sort()\n seen = {}\n for f in flies:\n if (str(f.set_start()), f.species) not in seen:\n seen[(str(f.set_start()), f.species)] = f.last_score\n elif f.last_score < seen[(str(f.set_start()), f.species)]:\n seen[(str(f.set_start()), f.species)] = f.last_score\n \n return seen\n\n#write to csv\ndef print_csv(results):\n with open('results.csv', mode = 'w') as file:\n writer = csv.writer(file, delimiter = ',')\n \n for run in results.keys():\n row = [run]\n flies = results[run]\n row += flies\n row += flies.values()\n writer.writerow(row)\n\n\ndef main(args):\n #keep track of all the results\n runs = {}\n\n a = [.2, .25, .3, .35, .4, .45, .5]\n \n for A in a:\n B = 1-A\n for rep in range(TRIALS):\n fireflies = [0] * (NUM_SPECIES * NUM_EACH)\n for i in range(NUM_SPECIES):\n for j in range(NUM_EACH):\n fireflies[j+(NUM_EACH*i)] = Firefly(i)\n\n for epoch in range(EPOCHS):\n r.shuffle(fireflies)\n round_one(fireflies, epoch, A, B)\n \n runs[(A, B, rep)] = list_flies(fireflies)\n\n print_csv(runs)\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","sub_path":"simplefly/learn_length/simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"555119536","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSchemas of responses.\n\"\"\"\n\nfrom schema import Optional, Schema\n\n\n_cves = {\n 'cve_list': {\n str: {\n 'impact': str,\n 'public_date': str,\n 'synopsis': str,\n 'description': str,\n 'modified_date': str,\n Optional('redhat_url'): str,\n 'cvss3_score': str,\n Optional('secondary_url'): str,\n 'cwe_list': [str],\n }\n }\n}\n\n_errata = {\n 'errata_list': {\n str: {\n 'updated': str,\n 'severity': str,\n 'reference_list': [str],\n 'issued': str,\n 'description': str,\n 'solution': str,\n 'summary': str,\n 'url': str,\n 'synopsis': str,\n 'cve_list': [str],\n 'bugzilla_list': [str],\n 'package_list': [str],\n 'type': str,\n }\n }\n}\n\n_repos = {\n 'repository_list': {\n str: [\n {\n 'product': str,\n 'releasever': str,\n 'name': str,\n 'url': str,\n 'basearch': str,\n 'revision': str,\n 'label': str,\n }\n ]\n }\n}\n\n_updates_top = {'update_list': {str: dict}}\n_updates_top_repolist = {'repository_list': [str], 'update_list': {str: dict}}\n\n_updates_package = {\n 'available_updates': [\n {\n 'basearch': str,\n 'erratum': str,\n 'releasever': str,\n 'repository': str,\n 'package': str,\n }\n ],\n 'description': str,\n 'summary': str,\n}\n\ncves_schema = Schema(_cves)\nerrata_schema = Schema(_errata)\nrepos_schema = Schema(_repos)\nupdates_top_schema = Schema(_updates_top)\nupdates_top_repolist_schema = Schema(_updates_top_repolist)\nupdates_package_schema = Schema(_updates_package)\n","sub_path":"vmaas/rest/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"650668983","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Open file with MV profiles consumption data\ndf = pd.read_excel('CCH.xlsx')\ndf['DataLectura'] = pd.to_datetime(df['DataLectura'], yearfirst=True)\ndf.index = df['DataLectura']\nreference_id = df['CUPS'].unique()\ndf['weekday'] = df['DataLectura'].dt.weekday\n\n# Sum up all the consumption for the reference weeks for each quarter\nconsumption_hour_march = np.asarray([0 for x in range(169)])\nconsumption_hour_june = np.asarray([0 for x in range(169)])\nconsumption_hour_september = np.asarray([0 for x in range(169)])\nconsumption_hour_december = np.asarray([0 for x in range(169)])\n\ntotal_id = len(reference_id)\nfor i in range(total_id):\n df1 = df.loc[df['CUPS'] == reference_id[i]]\n df_week_march = df1.loc['20180221230000':'20180228']\n df_week_june = df1.loc['201706132300':'20170620']\n df_week_september = df1.loc['201709112300':'20170918']\n df_week_december = df1.loc['201711042300':'20171111'] # get November instead to avoid holidays\n\n # Order from Monday to Sunday\n df2_march = df_week_march.loc[df_week_march['weekday'] == 0]\n df2_june = df_week_june.loc[df_week_june['weekday'] == 0]\n df2_september = df_week_september.loc[df_week_september['weekday'] == 0]\n df2_december = df_week_december.loc[df_week_december['weekday'] == 0]\n for j in range(1, 7):\n df3_march = df_week_march.loc[df_week_march['weekday'] == j]\n df3_june = df_week_june.loc[df_week_june['weekday'] == j]\n df3_september = df_week_september.loc[df_week_september['weekday'] == j]\n df3_december = df_week_december.loc[df_week_december['weekday'] == j]\n\n frame_march = [df2_march, df3_march]\n frame_june = [df2_june, df3_june]\n frame_september = [df2_september, df3_september]\n frame_december = [df2_december, df3_december]\n\n df2_march = pd.concat(frame_march)\n df2_june = pd.concat(frame_june)\n df2_september = pd.concat(frame_september)\n df2_december = pd.concat(frame_december)\n\n consumption_hour_march = df2_march['ActivaImport'] / 1000 + consumption_hour_march # in kW\n consumption_hour_june = df2_june['ActivaImport'] / 1000 + consumption_hour_june\n consumption_hour_september = df2_september['ActivaImport'] / 1000 + consumption_hour_september\n consumption_hour_december = df2_december['ActivaImport'] / 1000 + consumption_hour_december\n\n consumption_hour_march = np.asarray(consumption_hour_march)\n consumption_hour_june = np.asarray(consumption_hour_june)\n consumption_hour_september = np.asarray(consumption_hour_september)\n consumption_hour_december = np.asarray(consumption_hour_december)\n\n# Assume the reference week to be the selected one of the year 2016\nnew_index_march = pd.date_range('14/03/2016 0:15', periods=169, freq='H')\nnew_index_june = pd.date_range('06/06/2016 0:15', periods=169, freq='H')\nnew_index_september = pd.date_range('09/12/2016 0:15', periods=169, freq='H')\nnew_index_december = pd.date_range('12/05/2016 0:15', periods=169, freq='H')\n\nconsumption_hour_march = pd.Series(data=consumption_hour_march, index=new_index_march)\nconsumption_hour_june = pd.Series(data=consumption_hour_june, index=new_index_june)\nconsumption_hour_september = pd.Series(data=consumption_hour_september, index=new_index_september)\nconsumption_hour_december = pd.Series(data=consumption_hour_december, index=new_index_december)\n\n# get the energy each 15 min\npower_15min_march = consumption_hour_march.resample('15min').interpolate(method='linear') # still in kW\npower_15min_june = consumption_hour_june.resample('15min').interpolate(method='linear')\npower_15min_september = consumption_hour_september.resample('15min').interpolate(method='linear')\npower_15min_december = consumption_hour_december.resample('15min').interpolate(method='linear')\n\nconsumption_15min_march = power_15min_march * .25 # get the kWh\nconsumption_15min_june = power_15min_june * .25\nconsumption_15min_september = power_15min_september * .25\nconsumption_15min_december = power_15min_december * .25\n\nconsumption_15min_march.drop(consumption_15min_march.tail(1).index, inplace=True)\nconsumption_15min_june.drop(consumption_15min_june.tail(1).index, inplace=True)\nconsumption_15min_september.drop(consumption_15min_september.tail(1).index, inplace=True)\nconsumption_15min_december.drop(consumption_15min_december.tail(1).index, inplace=True)\n\n\ndf_print = pd.DataFrame({\n 'final consumption march': consumption_15min_march,\n 'final consumption june': consumption_15min_june,\n 'final consumption september': consumption_15min_september,\n 'final consumption december': consumption_15min_december\n})\ndf_print.to_csv('MV_demand.csv', index_label='time')\n\n\nplt.plot(consumption_hour_june)\nplt.plot(power_15min_june)\nplt.show()\n","sub_path":"MV_consumption.py","file_name":"MV_consumption.py","file_ext":"py","file_size_in_byte":4809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"492429965","text":"from matscholar_web.constants import valid_search_filters\nfrom matscholar_web.search.logic import (\n search_bar_live_display,\n sum_all_fields_and_buttons_n_submits,\n)\nfrom matscholar_web.tests.util import MatScholarWebBaseTest\n\n\n\"\"\"\nTests for the main search callback logic.\n\"\"\"\n\n\nclass TestSearchLogic(MatScholarWebBaseTest):\n def test_show_search_results(self):\n pass\n\n def test_sum_all_fields_and_buttons_n_submits(self):\n n_fields = len(valid_search_filters)\n all_n_clicks = [0] * n_fields\n self.assertEqual(\n 0, sum_all_fields_and_buttons_n_submits(*all_n_clicks)\n )\n all_n_clicks = [None] * n_fields\n self.assertEqual(\n 0, sum_all_fields_and_buttons_n_submits(*all_n_clicks)\n )\n all_n_clicks = [0] * n_fields\n all_n_clicks[2] = None\n self.assertEqual(\n 0, sum_all_fields_and_buttons_n_submits(*all_n_clicks)\n )\n all_n_clicks[1] = 1\n self.assertEqual(\n 1, sum_all_fields_and_buttons_n_submits(*all_n_clicks)\n )\n all_n_clicks = [1] * n_fields\n self.assertEqual(\n n_fields, sum_all_fields_and_buttons_n_submits(*all_n_clicks)\n )\n\n def test_search_bar_live_display(self):\n test_txt_src = \"abcdefghi\"\n ent_txts = [\n test_txt_src[i] for i, _ in enumerate(valid_search_filters)\n ]\n n_clicks = 0\n display = search_bar_live_display(n_clicks, *ent_txts)\n truth = [\n f\"{valid_search_filters[i]}: {test_txt_src[i]}\"\n for i, _ in enumerate(valid_search_filters)\n ]\n truth = \", \".join(truth) + \",\"\n self.assertEqual(truth.strip(), display.strip())\n","sub_path":"matscholar_web/search/tests/test_logic.py","file_name":"test_logic.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"477598630","text":"from app.config import db\nimport time\n\n\nclass UserBooks(db.Model):\n __tablename__ = \"user_books\"\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True, nullable=True)\n user_id = db.Column(db.Integer)\n book_id = db.Column(db.Integer)\n time = db.Column(db.Integer)\n reading_state = db.Column(db.Integer)\n\n reading_state_repr = {\n 0: 'Want to read',\n 1: 'Currently reading',\n 2: 'Read'\n }\n\n @classmethod\n def get_books(cls, user_id):\n \"\"\"Gets list of tags for a given book.\n\n Args:\n user_id ([string]): Goodreads book id provided in the dataset.\n Encountered as `goodreads_book_id`\n\n Returns:\n [List]: List of book_ids\n \"\"\"\n return [\n x.__dict__\n for x in (\n cls.query\n .filter_by(user_id=user_id)\n .order_by(cls.time.desc())\n )\n ]\n\n @classmethod\n def add_entry(cls, user_id, book_id, reading_state):\n try:\n assert reading_state in cls.reading_state_repr.keys()\n\n user_book_pair = (\n cls.query\n .filter_by(user_id=user_id)\n .filter_by(book_id=book_id)\n .first()\n )\n if user_book_pair:\n if user_book_pair.reading_state == reading_state:\n pass\n else:\n user_book_pair.reading_state = reading_state\n user_book_pair.time = int(time.time())\n else:\n ub = UserBooks(\n user_id=user_id,\n book_id=book_id,\n time=int(time.time()),\n reading_state=reading_state\n )\n db.session.add(ub)\n db.session.commit()\n return True\n except Exception as e:\n print(e)\n return False\n\n @classmethod\n def get_reading_state(cls, user_id, book_id):\n user_book_pair = (\n cls.query\n .filter_by(user_id=user_id)\n .filter_by(book_id=book_id)\n .first()\n )\n if user_book_pair:\n return user_book_pair.__dict__.get(\"reading_state\", 0)\n return None\n","sub_path":"app/models/user_books.py","file_name":"user_books.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"595995898","text":"from dataclasses import dataclass\n\nfrom expungeservice.models.charge import ChargeType\nfrom expungeservice.models.charge import ChargeUtil\nfrom expungeservice.models.expungement_result import TypeEligibility, EligibilityStatus\n\n\n@dataclass(frozen=True)\nclass MisdemeanorClassBC(ChargeType):\n type_name: str = \"Misdemeanor Class B or C\"\n expungement_rules: str = \"\"\"Convictions for misdemeanors are generally eligible under ORS 137.225(1)(b).\nExceptions include convictions related to sex, child and elder abuse, and driving, including DUII.\nDismissals for misdemeanors are generally eligible under ORS 137.225(1)(b). Exceptions include cases dismissed due to successful completion of DUII diversion.\"\"\"\n severity_level: str = \"Misdemeanor Class B\" # Might be technically inaccurate; but the time analyzer needs this.\n\n def type_eligibility(self, disposition):\n if ChargeUtil.dismissed(disposition):\n raise ValueError(\"Dismissed criminal charges should have been caught by another class.\")\n elif ChargeUtil.convicted(disposition):\n return TypeEligibility(EligibilityStatus.ELIGIBLE, reason=\"Eligible under 137.225(1)(b)\")\n else:\n return TypeEligibility(\n EligibilityStatus.ELIGIBLE,\n reason=\"Always eligible under 137.225(1)(b) for convictions, or 137.225(1)(d) for dismissals\",\n )\n","sub_path":"src/backend/expungeservice/models/charge_types/misdemeanor_class_bc.py","file_name":"misdemeanor_class_bc.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"294301104","text":"from dynamic_scraper.spiders.django_spider import DjangoSpider\nfrom open_races.models import RacesWebsite, Race, RaceItem\n\n\nclass RaceSpider(DjangoSpider):\n\n name = 'race_spider'\n\n def __init__(self, *args, **kwargs):\n self._set_ref_object(RacesWebsite, **kwargs)\n self.scraper = self.ref_object.scraper\n self.scrape_url = self.ref_object.url\n self.scheduler_runtime = self.ref_object.scraper_runtime\n self.scraped_obj_class = Race\n self.scraped_obj_item_class = RaceItem\n super(RaceSpider, self).__init__(self, *args, **kwargs)","sub_path":"open_races/scraper/spiders.py","file_name":"spiders.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"377027980","text":"import logging\nimport functools\nfrom typing import Callable, Optional\nimport ray\nfrom airflow.operators.python import task\nfrom ray_provider.hooks.ray_client import RayClientHook\nfrom ray_provider.xcom.ray_backend import RayBackend, get_or_create_kv_store\n\nlog = logging.getLogger(__name__)\n\n\ndef ray_wrapped(f, ray_conn_id=\"ray_default\", eager=False):\n @functools.wraps(f)\n def wrapper(*args, **kwargs) -> \"ray.ObjectRef\":\n log.info(\"[wrapper] Got executor.\")\n executor = get_or_create_kv_store(\n identifier=RayBackend.store_identifier, allow_new=True\n )\n log.info(f\"[wrapper] Launching task (with {args}, {kwargs}.\")\n ret_str = executor.execute(f, args=args, kwargs=kwargs, eager=eager)\n log.info(\"[wrapper] Remote task finished\")\n return ret_str\n\n return wrapper\n\n\ndef ray_task(\n python_callable: Optional[Callable] = None,\n ray_conn_id: str = \"ray_default\",\n ray_worker_pool: str = \"ray_worker_pool\",\n eager: bool = False,\n):\n \"\"\"Wraps a function to be executed on the Ray cluster.\n\n The return values of the function will be cached on the Ray object store.\n Downstream tasks must be ray tasks too, as the dependencies will be\n fetched from the object store. The RayBackend will need to be setup in your\n Dockerfile to use this decorator.\n\n Use as a task decorator: ::\n\n from ray_provider.decorators import ray_task\n\n def ray_example_dag():\n\n @ray_task(\"ray_conn_id\")\n def sum_cols(df: pd.DataFrame) -> pd.DataFrame:\n return pd.DataFrame(df.sum()).T\n\n :param python_callable: Function to be invoked on the Ray cluster.\n :type python_callable: Optional[Callable]\n :param http_conn_id: Http connection id for conenction to ray.\n :type http_conn_id: str\n :param ray_worker_pool: The pool that controls the\n amount of parallel clients created to access the Ray cluster.\n :type ray_worker_pool: Optional[str]\n :param eager: Whether to run the the function on the\n coordinator process (on the Ray cluster) or to\n send the function to a remote task. You should\n set this to False normally.\n :type eager: Optional[bool]\n \"\"\"\n\n @functools.wraps(python_callable)\n def wrapper(f):\n\n return task(\n ray_wrapped(f, ray_conn_id, eager=eager),\n pool=ray_worker_pool,\n )\n\n return wrapper\n","sub_path":"ray_provider/decorators/ray_decorators.py","file_name":"ray_decorators.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"14301656","text":"from skimage.measure import compare_ssim as ssim\nfrom skimage import io\nfrom skimage.viewer import ImageViewer\nimport threading\nimport pickle\nimport viewFilteredImages as vf\n\ndef compare_images(im1path, im2path, compare_func, size=(512, 144)):\n im1 = io.imread(fname=im1path)\n im2 = io.imread(fname=im2path)\n\n if (im1.size != size) or (im2.size != size):\n im1.resize(size)\n im2.resize(size)\n return compare_func(im1, im2)\n\n\ndef compare_image_to_group(im1path, grp, compare_func):\n comp_dic = {} # type: dict\n for img in grp:\n comp_dic[img] = compare_images(im1path, img, compare_func)\n return comp_dic\n\n\ndef dict_sum(dictionary):\n _sum = 0 # type: float\n for key in dictionary:\n _sum = float(dictionary[key]) + _sum\n return _sum\n\n\ndef show_image_from_path(image_path):\n image = io.imread(image_path)\n viewer = ImageViewer(image)\n viewer.show()\n\n\nclass CompareImageToGroupThread(threading.Thread):\n def __init__(self, thread_id, name, im, target_group, comp_func):\n threading.Thread.__init__(self)\n self.thread_id = thread_id\n self.name = name\n self.im = im\n self.target_group = target_group\n self.comp_func = comp_func\n self.comp_dic = 0\n\n def run(self):\n threadLimiter.acquire()\n try:\n print(\"Starting \" + self.name + \"\\n\")\n self.comp_dic = compare_image_to_group(self.im, self.target_group, self.comp_func)\n print(\"Exiting \" + self.name + \"\\n\")\n dicSums[self.im] = dict_sum(self.comp_dic)\n #print(\"\\n-----------------------------------\\n\")\n with open(sys.argv[1] + '/images.dictionary', 'wb+') as dicSumsFile:\n pickle.dump(dicSums, dicSumsFile)\n finally:\n threadLimiter.release()\n\n\ndef compare_group_to_group(group1, target_group, compare_func):\n i = 0\n for im in group1:\n CompareImageToGroupThread(i, im, im, target_group, compare_func).start()\n i = i + 1\n\n\nif __name__ == '__main__':\n import sys\n import os\n import glob\n\n maximumNumberOfThreads = 20\n threadLimiter = threading.BoundedSemaphore(maximumNumberOfThreads)\n dicSums = {}\n imagesToCompare = glob.glob(sys.argv[1] + '/*.jpg')\n try:\n dicSums = vf.get_pickle(sys.argv[1]+'/images.dictionary')\n filteredImages = set(dicSums.keys())\n imagesToCompare = list(set(imagesToCompare) - filteredImages)\n except Exception :\n print(Exception)\n print('[!] file doesn\\'t exist : a new one will be created!')\n\n groupOfImages = list(map(lambda x: os.path.join(sys.argv[2], x), os.listdir(sys.argv[2])))\n print('\\n[!]filtering patient : ',sys.argv[1])\n compare_group_to_group(imagesToCompare, groupOfImages, ssim)","sub_path":"tools/imagetools/filterImages.py","file_name":"filterImages.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"591749989","text":"# -*- coding: utf-8 -*-\nimport urllib.request, urllib.error\nimport datetime\nfrom bs4 import BeautifulSoup\n\nclass SAXO:\n\n NAME = 'SAXOBANK'\n targetList = []\n kijunbi = None\n posList = {}\n soup = None\n index = {\n \"PLN/JPY\":\"106\",\n \"TRY/JPY\":\"114\",\n \"ZAR/JPY\":\"147\",\n }\n tables = {}\n\n def __init__(self, targetList,diff_day):\n\n try:\n self.targetList = targetList\n self.kijunbi = datetime.datetime.today() - datetime.timedelta(days=diff_day)\n yyyy = \"{0:%Y}\".format(self.kijunbi) \n m = \"{}\".format(self.kijunbi.month)\n for targetPair in self.targetList:\n url = \"http://saxobank.co.jp/swaplist/\" + self.index[targetPair] + \"/\" + yyyy + \"/\" + m\n html = urllib.request.urlopen(url)\n # BeautifulSoupで扱えるようにパースします\n self.soup = BeautifulSoup(html, \"html.parser\")\n table = self.soup.find(\"table\",id=\"tbl-swap-points\")\n self.tables[targetPair] = table\n except:\n pass\n\n\n def getSwapList(self):\n returnPrice = {}\n try:\n nowTime = \"{0:%m月%d日}\".format(self.kijunbi) \n for targetPair in self.targetList:\n tbody = self.tables[targetPair].find(\"tbody\")\n trs = tbody.find_all(\"tr\")\n bid_swap = '-'\n ask_swap = '-'\n for tr in trs:\n # 対象日の行\n if(nowTime in tr.find_all(\"td\")[0].text):\n # 売SWAP取得\n bid_swap = tr.find_all(\"td\")[1].text\n # 買SWAP取得\n ask_swap = tr.find_all(\"td\")[2].text\n returnPrice[targetPair] = {\"bid_swap\" : bid_swap,\"ask_swap\":ask_swap}\n except:\n pass\n return returnPrice\n\n def getSwap(self, target):\n bid_swap = '-'\n ask_swap = '-'\n returnPrice = {}\n returnPrice[target] = {\"bid_swap\":bid_swap,\"ask_swap\":ask_swap}\n try:\n nowTime = \"{0:%m月%d日}\".format(self.kijunbi) \n if target not in self.tables:return returnPrice\n tbody = self.tables[target].find(\"tbody\")\n trs = tbody.find_all(\"tr\")\n bid_swap = '-'\n ask_swap = '-'\n for tr in trs:\n # 対象日の行\n if(nowTime in tr.find_all(\"td\")[0].text):\n # 売SWAP取得\n bid_swap = tr.find_all(\"td\")[1].text\n # 買SWAP取得\n ask_swap = tr.find_all(\"td\")[2].text\n returnPrice[target] = {\"bid_swap\" : bid_swap,\"ask_swap\":ask_swap}\n except:\n pass\n return returnPrice\n\n\nif __name__ == '__main__':\n saxo = SAXO(['PLN/JPY','TRY/JPY'],diff_day=2)\n print(\"======================================\")\n print(saxo.getSwapList())\n print(saxo.getSwap('PLN/JPY'))\n print(\"======================================\")\n\n","sub_path":"exchange/saxo.py","file_name":"saxo.py","file_ext":"py","file_size_in_byte":3068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"458888247","text":"import json\nimport datetime\nfrom ast import literal_eval\n\ndef utc_diff(zone):\n m = zone%100\n h = zone//100\n return h *3600 + m * 60\n\ndef date_format(date):\n arr = date.split()\n date = \" \".join(arr[1:-1])\n zone = int(arr[-1])\n date_obj = datetime.datetime.strptime(date, '%d %b %Y %H:%M:%S')\n return date_obj, zone\n\ndef seconds_between(date1, date2):\n arr = []\n date1 = date1.replace(\"_\",\" \")\n date2 = date2.replace(\"_\",\" \")\n date_obj1, zone1 = date_format(date1)\n date_obj2, zone2 = date_format(date2)\n date_diff = int((date_obj1 - date_obj2).total_seconds())\n zone_diff = utc_diff(zone1) - utc_diff(zone2)\n arr.append(str(abs(date_diff - zone_diff)))\n \n return (json.dumps(arr))\n\n","sub_path":"Task B/Seconds_between.py","file_name":"Seconds_between.py","file_ext":"py","file_size_in_byte":735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"406828102","text":"# Autor: Diana Patricia Aguilar Martínez, A01745778\r\n# Descripcion: Este programa lee los lados de dos rectangulos y calcula el area, perimetro y dice cual es mas grande o si son iguales\r\n# A partir de aquí escribe tu programa\r\n\r\ndef calcularPerimetro(base, altura):\r\n perimetro= (2*base) + (2*altura)\r\n return perimetro\r\n\r\n\r\ndef calcularArea(base, altura):\r\n area= base * altura\r\n\r\n return area\r\n\r\n\r\ndef main():\r\n base1= int(input(\"inserte la base del primer rectangulo: \"))\r\n altura1= int(input(\"inserte la altura del primer rectangulo: \"))\r\n\r\n base2 = int(input(\"inserte la base del segundo rectangulo: \"))\r\n altura2 = int(input(\"inserte la altura del segundo rectangulo: \"))\r\n\r\n perimetro1= calcularPerimetro(base1, altura1)\r\n perimetro2= calcularPerimetro(base2, altura2)\r\n\r\n area1= calcularArea(base1, altura1)\r\n area2= calcularArea(base2,altura2)\r\n\r\n print (\"el perimetro del primer rectangulo es: \", perimetro1)\r\n print (\"el perimetro del segundo rectangulo es: \", perimetro2)\r\n\r\n print (\"el área del primer rectangulo es: \" , area1)\r\n print (\"el área del segundo rectangulo es: \", area2)\r\n\r\n if area1 > area2:\r\n print (\"el rectangulo mayor es el primero\")\r\n\r\n elif area2 > area1:\r\n print(\"el rectangulo mayor es el segundo\")\r\n\r\n else:\r\n print(\"los rectangulos son iguales\")\r\n\r\n \r\n\r\nmain()","sub_path":"rectángulosCompleto.py","file_name":"rectángulosCompleto.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"232997054","text":"#1.4 Given a string, write a function to check if it is a permutation of a palin- drome.\n# A palindrome is a word or phrase that is the same forwards and backwards. \n# A permutation is a rearrangement of letters.The palindrome does not need to be limited to just dictionary words.\n\n\ndef isPalondrome(str1, str2):\n\tif len(str1) != len(str2):\n\t\treturn False\n\n\tchar_odd_count = 0\n\tif len(str1)%2 > 0: #is odd\n\t\tcountEvens = int(len(str1))\n\t\tstr1 = sorted(str1)\n\t\tfor char in range(len(str1)): \n\t\t\tprint (\"char:\", str1[char], str1[char+1], char)\n\t\t\t\n\t\t\t\n\n\t\t\tif str1[char + char_odd_count] != str1[char+1 + char_odd_count]:\n\t\t\t\tchar_odd_count += 1\n\t\t\t\tchar += 1\n\n\t\t\t# if (char_odd_count > 1):\n\t\t\t\t# return False\n\n\t\treturn True\n\n\n\telse: #is even\n\t\treturn True\n\n\ndef isPalondromeMap(str1, str2):\n\tmapStr1 = {}\n\tcount_odd = 0\n\tfor char in str1:\n\t\tmapStr1[char] = 0\n\n\tfor char in str1:\t\n\t\tmapStr1[char] += 1\n\t\tif mapStr1[char] > 2:\n\t\t\tprint (mapStr1)\n\t\t\treturn False\n\n\tfor char in str1:\n\t\tif mapStr1[char] == 1:\n\t\t\tcount_odd += 1\n\t\t\tif count_odd > 1:\n\t\t\t\tprint (mapStr1)\n\t\t\t\treturn False\n\n\tprint (mapStr1)\n\treturn True\n\n\n\n\n\n\t\n\n\t# for char in str1:\n\t# \tif mapStr1[char] == 1: #Check for more than one odd number\n\t# \t\tif count_odd:\n\t# \t\t\tprint (mapStr1)\n\t# \t\t\treturn False\n\t# \t\tcount_odd = True\n\t\t\t\n\n\tprint (mapStr1)\n\treturn True\t\n\n\n\n#Start\n\n\n# print(isPalondrome(\"holahola1\", \"oalh1halo\"))\nprint(isPalondromeMap(\"holahola111\", \"oalh1halo\"))\nprint(isPalondromeMap(\"oalh1halo\", \"oalh1halo\"))\nprint(isPalondromeMap(\"hola2hola1\", \"oalh1halo\"))\n\n\n","sub_path":"CTCI_book/CH1 - Arrays and strings/1.4.py","file_name":"1.4.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"14812294","text":"from functools import wraps\nimport time\n\nimport datetime\nimport json\n\nfrom ubcmhn.controllers.base import BaseController\nfrom ubcmhn.util.hnwrappers import TrFieldsByHnItem, ItemTranslationStatus\n\ndef strip_objid(mdict : dict):\n if mdict.get(\"_id\"):\n del mdict[\"_id\"]\n return mdict\n\nclass DataPresentationController(BaseController):\n\n def GetLanguages(self):\n # TODO: this should be dynamic, but since this is a toy project and we're late ;-)\n return (self.config.SRCLANG, self.config.DSTLANG1, self.config.DSTLANG2)\n\n def monkey_patch_translation(self, _hnitem, language):\n hnitem = dict(_hnitem)\n if not (language == self.config.SRCLANG):\n # Different language, monkey-patch translated text in\n\n trstatus = ItemTranslationStatus(hnitem, language)\n orig, trf, ubj = TrFieldsByHnItem(hnitem, language)\n\n unbabeljobstatus = \"N/A\"\n if hnitem.get(ubj):\n unbabeljobstatus = hnitem[ubj]['status']\n\n realtitle = hnitem.get(trf) if hnitem.get(trf) else hnitem.get(orig)\n\n itemstatus = \"\"\n\n if trstatus != \"translated\":\n itemstatus = \" (LANG:{} STATUS:{} UB_JOBSTATUS:{})\".format(\n language,\n trstatus,\n unbabeljobstatus)\n\n t = \"{0}{1}\".format(\n realtitle,\n itemstatus)\n\n hnitem[orig] = t\n\n return hnitem\n\n\n def GetIndexPageData(self, language='en') -> bool:\n\n vsl = self.db.get_visiblestorylist()\n\n itemlist = vsl['itemlist']\n\n rv = []\n\n for itemid in itemlist:\n hnitem = self.db.get_itembyhnid(itemid)\n processed_hnitem = self.monkey_patch_translation(hnitem, language)\n rv.append(strip_objid(processed_hnitem))\n\n return rv\n\n def GetStoryCommentData(self, itemid, language='en'):\n\n print(\"itemid -> {}\".format(itemid))\n\n hnstory = self.db.get_itembyhnid(itemid)\n if not hnstory:\n return\n if hnstory['item_type'] != 'story':\n return\n\n trhnstory = self.monkey_patch_translation(hnstory, language)\n\n def getallkids_aslist(rootitem : dict, depth=0):\n rv = []\n\n kids = rootitem.get(\"kids\")\n if not kids:\n return []\n\n for kidid in kids:\n kiditem = self.db.get_itembyhnid(kidid)\n\n if not kiditem:\n self.l().info(\"Skipping BLANK HNITEMID={}\".format(kidid))\n continue\n\n self.l().info(\"Processing HNITEMID={}\".format(kidid))\n\n # Monkey patch the language\n tr_kiditem = self.monkey_patch_translation(kiditem, language)\n\n # Assemble and add (comment depth, comment data)\n kiddata = (depth, tr_kiditem)\n rv.append(kiddata)\n\n # Recursively do the same thing to the kids' kids....\n kids_kids = kiditem.get(\"kids\")\n if kids_kids:\n newdepth = depth +1\n rv = rv + getallkids_aslist(kiditem, depth=newdepth)\n\n return rv\n\n return (trhnstory, getallkids_aslist(hnstory))","sub_path":"ubcmhn/controllers/datapresentation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"572855929","text":"# Variables\nshell = None\nfolders = None\nconfig = None\nscm = None\n\n# Functions\ninit = None\nactions = None\n\n\ndef parseJinja(string):\n\tparsed = string.\\\n\t\t\treplace('{{shared}}',folders['shared']).\\\n\t\t\treplace('{{new}}',folders['new']).\\\n\t\t\treplace('{{code}}',folders['code']).\\\n\t\t\treplace('{{shared_run}}',folders['shared_run']).\\\n\t\t\treplace('{{shared_env}}',folders['shared_env']).\\\n\t\t\treplace('{{releases}}',folders['releases']).\\\n\t\t\treplace('{{current}}', folders['current'])\n\treturn parsed","sub_path":"lib/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"148070976","text":"# -*- coding: utf-8 -*-\n# ---------------------------------------------------------------------\n# CollectionCache model\n# ---------------------------------------------------------------------\n# Copyright (C) 2007-2013 The NOC Project\n# See LICENSE for details\n# ---------------------------------------------------------------------\n\n# NOC modules\nfrom noc.lib.nosql import (Document, StringField, UUIDField)\n\n\nclass CollectionCache(Document):\n meta = {\n \"collection\": \"noc.collectioncache\",\n \"strict\": False,\n \"auto_create_index\": False,\n \"indexes\": [\"collection\"]\n }\n\n collection = StringField()\n uuid = UUIDField(unique=True, binary=True)\n\n def unicode(self):\n return \"%s:%s\" % (self.collection, self.uuid)\n\n @classmethod\n def merge(cls, collection, uuids):\n \"\"\"\n Merge UUIDs to cache\n \"\"\"\n current = set(o.uuid for o in CollectionCache.objects.filter(\n collection=collection))\n for u in uuids - current:\n CollectionCache(collection=collection, uuid=u).save()\n","sub_path":"main/models/collectioncache.py","file_name":"collectioncache.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"343792964","text":"import nnets.hidden_layer as nhl\n\nimport theano\nimport theano.tensor as T\n\nimport numpy\n\n\n\nclass rbm_layer:\n\n def __init__(self,x,n_visible,n_hidden,rng,\n W = None,hbias = None,vbias = None,theano_rng = None):\n if theano_rng is None:theano_rng = RandomStreams(rng.randint(2**30))\n self.theano_rng = theano_rng\n\n #sigmoid_layer = nhl.hidden_layer(x,n_in,n_hidden,rng,'sigm')\n if W is None:\n Warray = numpy.asarray(rng.uniform(\n low=-4 * numpy.sqrt(6. / (n_hidden + n_visible)),\n high=4 * numpy.sqrt(6. / (n_hidden + n_visible)),\n size=(n_visible,n_hidden)),dtype=theano.config.floatX)\n W = theano.shared(value = Warray,name = 'W',borrow = True)\n if hbias is None:\n hbiasarray = numpy.zeros(n_hidden,dtype = theano.config.floatX)\n hbias = theano.shared(value = hbiasarray,name = 'hbias',borrow = True)\n if vbias is None:\n vbiasarray = numpy.zeros(n_visible,dtype = theano.config.floatX)\n vbias = theano.shared(value = vbiasarray,name = 'vbias',borrow = True)\n\n self.n_in = n_in\n self.n_out = n_out\n self.n_visible = n_visible\n self.n_hidden = n_hidden\n self.x = x\n self.y = sigmoid_layer.y\n self.params = [W,hbias,vbias]\n self.W = W\n self.hbias = hbias\n self.vbias = vbias\n\n\n # propagate the visible units activation upwards to the hidden units\n def propup(self,vis):\n pre_sigmoid_activation = T.dot(vis,self.W) + self.hbias\n return pre_sigmoid_activation,T.nnet.sigmoid(pre_sigmoid_activation)\n\n\n # propagates the hidden units activation downwards to the visible units\n def propdown(self,hid):\n pre_sigmoid_activation = T.dot(hid,self.W.T) + self.vbias\n return pre_sigmoid_activation,T.nnet.sigmoid(pre_sigmoid_activation)\n\n\n # infer state of hidden units given visible units \n def sample_h_given_v(self,v0_sample):\n # compute the activation of the hidden units given a sample of the visibles\n pre_sigmoid_h1,h1_mean = self.propup(v0_sample)\n h1_sample = self.theano_rng.binomial(\n size = h1_mean.shape,n = 1,p = h1_mean,dtype = theano.config.floatX)\n return pre_sigmoid_h1,h1_mean,h1_sample\n\n\n # infers state of visible units given hidden units \n def sample_v_given_h(self,h0_sample):\n # compute the activation of the visible given the hidden sample\n pre_sigmoid_v1,v1_mean = self.propdown(h0_sample)\n v1_sample = self.theano_rng.binomial(\n size = v1_mean.shape,n = 1,p = v1_mean,dtype = theano.config.floatX)\n return pre_sigmoid_v1,v1_mean,v1_sample\n\n\n # implements one step of Gibbs sampling, starting from the hidden state\n def gibbs_hvh(self,h0_sample):\n pre_sigmoid_v1,v1_mean,v1_sample = self.sample_v_given_h(h0_sample)\n pre_sigmoid_h1,h1_mean,h1_sample = self.sample_h_given_v(v1_sample)\n return pre_sigmoid_v1,v1_mean,v1_sample,pre_sigmoid_h1,h1_mean,h1_sample\n\n\n # implements one step of Gibbs sampling, starting from the visible state\n def gibbs_vhv(self, v0_sample):\n pre_sigmoid_h1,h1_mean,h1_sample = self.sample_h_given_v(v0_sample)\n pre_sigmoid_v1,v1_mean,v1_sample = self.sample_v_given_h(h1_sample)\n return pre_sigmoid_h1,h1_mean,h1_sample,pre_sigmoid_v1,v1_mean,v1_sample\n\n\n # compute the free energy \n def free_energy(self,v_sample):\n wx_b = T.dot(v_sample,self.W) + self.hbias\n vbias_term = T.dot(v_sample,self.vbias)\n hidden_term = T.sum(T.log(1 + T.exp(wx_b)),axis = 1)\n return -hidden_term - vbias_term\n\n\n # stochastic approximation to the pseudo-likelihood\n def get_pseudo_likelihood_cost(self, updates):\n # index of bit i in expression p(x_i | x_{\\i})\n bit_i_idx = theano.shared(value = 0,name = 'bit_i_idx')\n # binarize the input image by rounding to nearest integer\n xi = T.round(self.x)\n # calculate free energy for the given bit configuration\n fe_xi = self.free_energy(xi)\n # flip bit x_i of matrix xi and preserve all other bits x_{\\i}\n # Equivalent to xi[:,bit_i_idx] = 1-xi[:, bit_i_idx], \n # but assigns the result to xi_flip, instead of working in place on xi.\n xi_flip = T.set_subtensor(xi[:,bit_i_idx],1 - xi[:,bit_i_idx])\n # calculate free energy with bit flipped\n fe_xi_flip = self.free_energy(xi_flip)\n # equivalent to e^(-FE(x_i)) / (e^(-FE(x_i)) + e^(-FE(x_{\\i})))\n cost = T.mean(self.n_visible*T.log(T.nnet.sigmoid(fe_xi_flip - fe_xi)))\n # increment bit_i_idx % number as part of updates\n updates[bit_i_idx] = (bit_i_idx + 1) % self.n_visible\n return cost\n\n\n # approximation to the reconstruction error\n def get_reconstruction_cost(self,updates,pre_sigmoid_nv):\n cross_entropy = T.mean(T.sum(\n self.x*T.log(T.nnet.sigmoid(pre_sigmoid_nv)) +\n (1 - self.x)*T.log(1 - T.nnet.sigmoid(pre_sigmoid_nv)),axis = 1))\n return cross_entropy\n\n\n # implement one step of CD-k or PCD-k\n # persistent: None for CD. For PCD, shared variable containing old state of Gibbs chain. \n # This must be a shared variable of size (batch size, number of hidden units).\n # k: number of Gibbs steps to do in CD-k/PCD-k\n def get_cost_updates(self,lr = 0.1,persistent = None,k = 1):\n # compute positive phase\n pre_sigmoid_ph,ph_mean,ph_sample = self.sample_h_given_v(self.x)\n # decide how to initialize persistent chain:\n # for CD, we use the newly generate hidden sample\n # for PCD, we initialize from the old state of the chain\n if persistent is None:chain_start = ph_sample\n else:chain_start = persistent\n # perform actual negative phase\n # in order to implement CD-k/PCD-k we need to scan over the\n # function that implements one gibbs step k times.\n # scan will return the entire Gibbs chain\n ([pre_sigmoid_nvs,nv_means,nv_samples,\n pre_sigmoid_nhs,nh_means,nh_samples],updates) = theano.scan(\n self.gibbs_hvh,n_steps = k,name = 'gibbs_hvh',\n outputs_info = [None,None,None,None,None,chain_start])\n # determine gradients on RBM parameters\n # NOTE: that we only need the sample at the end of the chain\n chain_end = nv_samples[-1]\n cost = T.mean(self.free_energy(self.x)) - T.mean(self.free_energy(chain_end))\n gparams = T.grad(cost,self.params,consider_constant = [chain_end])\n for gparam, param in zip(gparams, self.params):\n updates[param] = param - gparam*T.cast(lr,dtype = theano.config.floatX)\n # pseudo-likelihood is a better proxy for PCD\n # reconstruction cross-entropy is a better proxy for CD\n if persistent:\n # NOTE: that this works only if persistent is a shared variable\n updates[persistent] = nh_samples[-1]\n monitoring_cost = self.get_pseudo_likelihood_cost(updates)\n else:monitoring_cost = self.get_reconstruction_cost(updates,pre_sigmoid_nvs[-1])\n return monitoring_cost,updates\n\n\n\n\n","sub_path":"src/nnets/rbm_layer.py","file_name":"rbm_layer.py","file_ext":"py","file_size_in_byte":7201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"620499438","text":"import requests\nimport re\nfrom bs4 import BeautifulSoup\n\ndef get_law(url, headers):\n html = requests.get(url, headers = headers).content.decode('utf-8')\n soup = BeautifulSoup(html, 'html.parser')\n\n views = []\n view_list = soup.find_all('h2', 'headline')\n for view in view_list:\n view_title = view.find('a').get_text()\n view_a = view.find('a')\n view_link = view_a['href']\n views.append([view_title, view_link])\n\n news = []\n new_list = soup.find_all('p', 'pis-title')\n for new in new_list:\n new_title = new.find('a').get_text()\n new_a = new.find('a')\n new_link = new_a['href']\n news.append([new_title, new_link])\n \n yield{\n 'view': views,\n 'new': news\n }\n \n\ndef main():\n url = 'http://conflictoflaws.net'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3722.400 QQBrowser/10.5.3738.400'\n }\n \n law = get_law(url, headers)\n for _ in law:\n print(_)\n\n\nif __name__ == '__main__':\n main()","sub_path":"每日邮件(自写完结)/law.py","file_name":"law.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"435429560","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 30 15:58:28 2019\n\n@author: Bahij\n\"\"\"\n\n#data preprocessing\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\ndataset = pd.read_csv('Churn_Modelling.csv')\nX = dataset.iloc[:, 3:13].values\ny = dataset.iloc[:, 13].values\n\n# Encoding categorical data\nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder_X_1 = LabelEncoder()\nX[:, 1] = labelencoder_X_1.fit_transform(X[:, 1])\nlabelencoder_X_2 = LabelEncoder()\nX[:, 2] = labelencoder_X_2.fit_transform(X[:, 2])\n\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Value Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\n#The ANN\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\n\nmodel = Sequential()\n\n#add input and first hidden layer with dropout\nmodel.add(Dense(activation = 'relu', units = 6, kernel_initializer = 'uniform', input_dim = 10))\nmodel.add(Dropout(rate = 0.1))\n# add second hidden layer\nmodel.add(Dense(activation = 'relu', units = 6, kernel_initializer = 'uniform'))\nmodel.add(Dropout(rate = 0.1))\n#add output layer\nmodel.add(Dense(activation = 'sigmoid', units = 1, kernel_initializer = 'uniform'))\n\n#compile the ann\nmodel.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\n# fit ann to training set\nmodel.fit(X_train, y_train, batch_size = 40, epochs = 80)\n\n#part 3\n# Predicting the Test set results\ny_pred = model.predict(X_test)\ny_pred = (y_pred > 0.5)\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\nnewpred = model.predict(sc.transform(np.array([[600.0,0,1,40,3,60000,2,1,1,50000]])))\nnewpred = (newpred > 0.5)\nprint(newpred)\n","sub_path":"DeepLearning/ArtificialNeuralNetwork/ann.py","file_name":"ann.py","file_ext":"py","file_size_in_byte":1911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"509291108","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*- vim60:fdm=marker\n#\n# Copyright: 2016, Maximiliano Curia \n#\n# License: ISC\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n# .\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND ISC DISCLAIMS ALL WARRANTIES WITH\n# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\n# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,\n# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\n# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE\n# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\n# PERFORMANCE OF THIS SOFTWARE.\n\n''' Miscelaneous data types '''\n\nimport re\n\nfrom collections import namedtuple\n\nReSub = namedtuple('ReSub', ('re', 'repl'))\nUNKNOWN = 'Unknown'\nUNKNOWN_COPYRIGHTED = 'UnknownCopyrighted'\n\n\nclass License(object):\n\n licenses = {}\n\n def __init__(self, name):\n self.name = name\n self.stored = None\n\n def __str__(self):\n if self.stored:\n return self.stored.dump().rstrip('\\n')\n return ('License: {name}\\n'\n 'Comment: Add the corresponding license text here'.format(\n name=self.name))\n\n @staticmethod\n def get(name):\n if name not in License.licenses:\n License.licenses[name] = License(name)\n\n return License.licenses[name]\n\n\nclass YearRange(object):\n\n def __init__(self, low=0, high=0):\n low_value = int(low)\n high_value = int(high)\n if low_value > high_value:\n low_value, high_value = high_value, low_value\n self.low = low_value\n self.high = high_value\n\n def __in__(self, year):\n value = int(year)\n return self.low <= value <= self.high\n\n def add(self, year):\n value = int(year)\n if not value:\n return\n if not self.low or value < self.low:\n self.low = value\n if not self.high or self.high < value:\n self.high = value\n return self\n\n def newer(self, other):\n if self.high and other.high:\n return other.high > self.high\n return not self.high and other.high\n\n def merge(self, other):\n self.add(other.low)\n self.add(other.high)\n return self\n\n def __str__(self):\n if not self.low:\n return ''\n if self.low == self.high:\n return str(self.low)\n return str(self.low) + '-' + str(self.high)\n\n\nclass CopyrightHolder(object):\n\n year_re = re.compile(r'\\s*(?:[\\s:([]*)?(?P\\d{2,})[]:\\s]*'\n r'(?:[-~=–—][\\s:[]*(?P\\d{1,})[]:\\s]*)?[,/)]*')\n holder_re = re.compile(r'\\s*(?:by\\s*)?(?P\\S.*?\\S)[\\s\"\\*,;/]*$', re.I)\n\n name_email_re = re.compile(\n r'(?P\\S.*?\\S)?(?(name)(?:\\s|(?=[<(])|$)|)\\s*'\n r'[<(/\\\\]*(?P[^\\s<>]+?@[^\\s<>@]+?)?(?(email)[)<>/\\\\]*|)$')\n\n email_subs = [\n ReSub(re.compile(r''), r''),\n ReSub(re.compile(r'%20'), r' '),\n ReSub(re.compile(r'<?'), r'<'),\n ReSub(re.compile(r'>?'), r'>'),\n ReSub(re.compile(r'@'), r'@'),\n ReSub(re.compile(r'“?'), r'\"'),\n ReSub(re.compile(r'\\(c\\)$', re.I), r''),\n # Expensive fix for \") at the end of the string\n ReSub(re.compile(r'((?P\\()?(?(paren).*?|))(?(paren)|\\)+)?$'), r'\\1'),\n ReSub(re.compile(r'\\s+\\(?(where|at|@)\\)?\\s+', re.I), r'@'),\n ReSub(re.compile(r'\\(at\\)', re.I), r'@'),\n ReSub(re.compile(r'\\s+\\(?do?[tm]\\)?\\s+', re.I), r'.'),\n # Ugly fix for >mail@example.com<\n ReSub(re.compile(r'(?:^|(?<=\\s))\\s*\\>\\s*(?=\\w(?:\\w|[.-])*@)'), r'<'),\n ReSub(re.compile(r'\\<\\s*$'), r'>'),\n ReSub(re.compile(r'(?:^|(?<=\\s))\\s*((?!\\<)\\w(?:\\w|[.-])*@'\n r'?:\\w(?:\\w|-)+(?:\\.\\w(?:\\w|-)+)+(?))\\s*(?:(?=\\s)|$)'),\n r'<\\1>'),\n ]\n\n name_cruft_subs = [\n ReSub(re.compile(r'', re.IGNORECASE), r''),\n ReSub(re.compile(r'^>', re.IGNORECASE), r''),\n ReSub(re.compile(r'<$', re.IGNORECASE), r''),\n ReSub(re.compile(r'\\\\[nt]$', re.IGNORECASE), r''),\n ReSub(re.compile(r'^\\(\\s*c\\s*\\)\\s*', re.IGNORECASE), r''),\n ]\n\n def __init__(self, name, email, years):\n self.name = name\n self.email = email\n self.years = years\n\n def merge(self, other):\n if other.name and self.years.newer(other.years):\n self.name = other.name\n self.years.merge(other.years)\n return self\n\n @property\n def person(self):\n result = self.name\n if self.name and self.email:\n result += ' '\n if self.email:\n result += '<{}>'.format(self.email)\n return result\n\n def __str__(self):\n result = str(self.years)\n result += ', ' if result else ''\n result += self.person\n return result\n\n def __repr__(self):\n return str(self)\n\n @staticmethod\n def _get_year(text):\n year = int(text)\n if year < 50:\n year += 2000\n if year < 100:\n year += 1900\n return year\n\n @staticmethod\n def get_name_email(text):\n # De-cruft email\n for sub in CopyrightHolder.email_subs:\n text = sub.re.sub(sub.repl, text)\n\n match = CopyrightHolder.name_email_re.match(text)\n if not match:\n return None, None\n match_dict = match.groupdict()\n name = match_dict.get('name', '')\n if name is None:\n name = ''\n name = name.strip(r''',.;*'\"@-–—[]{} ''')\n for sub in CopyrightHolder.name_cruft_subs:\n name = sub.re.sub(sub.repl, name)\n email = match_dict.get('email', None)\n return name, email\n\n @staticmethod\n def from_copyright(copyright_):\n\n def get_years(text, years):\n start = len(text)\n end = 0\n year_match = CopyrightHolder.year_re.search(text)\n while year_match:\n match_dict = year_match.groupdict()\n low = CopyrightHolder._get_year(match_dict['lo'])\n years.add(low)\n if match_dict.get('hi', None):\n high = CopyrightHolder._get_year(match_dict['hi'])\n if high < low:\n # 2001-4 -> '200' + '4'\n # new_high = \\\n # match_dict['lo'][:- len(match_dict['hi'])] + \\\n # match_dict['hi']\n high = CopyrightHolder._get_year(match_dict['hi'])\n years.add(high)\n if start > year_match.start(0):\n start = year_match.start(0)\n end = year_match.end(0)\n year_match = CopyrightHolder.year_re.match(text, end)\n return start, end\n\n years = YearRange()\n start, end = get_years(copyright_, years)\n if start < end:\n copyright_ = copyright_[:start] + copyright_[end:]\n\n match = CopyrightHolder.holder_re.match(copyright_)\n if match:\n holder = match.group('holder')\n name, email = CopyrightHolder.get_name_email(holder)\n if not name and not email:\n return\n return CopyrightHolder(name, email, years)\n","sub_path":"decopy/datatypes.py","file_name":"datatypes.py","file_ext":"py","file_size_in_byte":7539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"58103955","text":"'''\nCreated on Nov 22, 2017\n\n@author: Monica\n'''\nfrom validator.validatorCommands import validatorCommands,\\\n validatorCommandsException\nfrom repository.repository import repositoryExceptions\nfrom validator.validatorRent import validatorRentExceptions, validatorRent\nfrom validator.validatorMovie import validatorMovie, validatorMovieExceptions\nfrom validator.validatorCustomer import validatorCustomer,\\\n validatorCustomerExceptions\n\nclass rentsMenu:\n def __init__(self,srvRent):\n self.__serviceRent=srvRent\n self.valid=validatorCommands()\n self.validM=validatorMovie()\n self.validC=validatorCustomer()\n self.validR=validatorRent()\n def addRent(self):\n '''\n rents a movie to a customer\n '''\n idR=input(\"Give the rent id:\")\n idM=input(\"Give the movie id:\")\n idC=input(\"Give the customer id:\")\n try:\n try:\n self.validM.validateId(idM)\n self.validC.validateId(idC)\n self.validR.validate(idR)\n idR=int(idR)\n idM=int(idM)\n idC=int(idC)\n \n r=self.__serviceRent.createRent(idR,idM,idC)\n print(\" Rent \" + str(r.getId()) + \" added succesfully!\\n\")\n\n except validatorRentExceptions as ex:\n print(ex)\n except validatorMovieExceptions as ex:\n print (ex)\n except validatorCustomerExceptions as ex:\n print(ex)\n \n except repositoryExceptions as ex:\n print(ex)\n except validatorRentExceptions as ex:\n print(ex)\n except validatorCustomerExceptions as ex:\n print(ex)\n except validatorMovieExceptions as ex:\n print(ex)\n \n def removeAllRents(self):\n '''removes all rents'''\n try:\n self.__serviceRent.removeAll()\n print(\"Rents removed succesfully!\\n\")\n except repositoryExceptions as ex:\n print(ex)\n except validatorRentExceptions as ex:\n print(ex)\n \n def findById(self):\n '''finds a rent by id'''\n idR=input(\"Give the id:\")\n try:\n try:\n self.validR.validate(idR)\n idR=int(idR)\n except validatorRentExceptions as ex:\n print(ex)\n r=self.__serviceRent.find(\"id\",idR)\n if len(r)==0:\n print(\"No rent found!\\n\")\n else:\n print(\"Rent found succesfully ! \"+r.__str__())\n except repositoryExceptions as ex:\n print(ex)\n except validatorRentExceptions as ex:\n print(ex)\n \n def findByMovieId(self):\n '''finds a rent by a movie id'''\n idM=input(\"Give the movie id:\")\n try:\n try:\n self.validM.validateId(idM)\n idM=int(idM)\n except validatorMovieExceptions as ex:\n print(ex)\n r=self.__serviceRent.find(\"idM\",idM)\n if len(r)==0:\n print(\"No rent found!\\n\")\n else:\n print(\"Rent found succesfully ! \"+r.__str__())\n except repositoryExceptions as ex:\n print(ex)\n except validatorRentExceptions as ex:\n print(ex)\n \n def findByCustomerId(self):\n '''finds a rent by a customer id'''\n idC=input(\"Give the customer id:\")\n try:\n try:\n self.validC.validateId(idC)\n idC=int(idC)\n except validatorCustomerExceptions as ex:\n print(ex)\n r=self.__serviceRent.find(\"idC\",idC)\n if len(r)==0:\n print(\"No rent found!\\n\")\n else:\n print(\"Rent found succesfully ! \"+r.__str__())\n except repositoryExceptions as ex:\n print(ex)\n except validatorRentExceptions as ex:\n print(ex)\n \n def sortById(self):\n '''sorts the rents by id'''\n if self.__serviceRent.getNrRents()==0:\n print(\"The list has no rents!\\n\")\n else:\n rents=self.__serviceRent.sortBy(\"id\")\n for i in rents:\n print(i.__str__())\n print(\"List sorted succesfully!\\n\")\n \n def showARent(self):\n '''shows a rent with an given id'''\n idR=input(\"Give the rent id:\")\n try:\n self.validR.validate(idR)\n idR=int(idR)\n for i in self.__serviceRent.getAll():\n if int(i.getId())==idR:\n print(i.__str__())\n except repositoryExceptions as ex:\n print(ex)\n except validatorRentExceptions as ex:\n print(ex)\n \n def showAllRents(self):\n '''shows all the rents'''\n printed=False\n for i in self.__serviceRent.getAll():\n print(i.__str__())\n printed=True\n if printed==False:\n print(\"There are't rents yet!\\n\")\n \n def populateRandom(self,limit):\n '''\n populate the repository with random elements\n '''\n while limit!=0:\n try:\n limit=self.__serviceRent.populateRandom(limit) \n print(\"The repository was populated successfully!\\n\")\n except repositoryExceptions:\n print(\"The repository was populated successfully but with less rents!\\n\")\n break\n \n def show(self):\n while True:\n print(\"===Rents menu===\")\n print(\"1.Add\")\n print(\"2.Find\")\n print(\"3.Show\")\n print(\"4.Sort by id\")\n print(\"5.Populate random\")\n print(\"0.Exit\")\n print()\n cmd=input(\"Give command:\")\n try:\n self.valid.validate(cmd,5)\n com=int(cmd)\n if com==0:\n return\n elif com==1:\n self.addRent()\n elif com==2:\n print(\"--Find menu--\")\n print(\"1.By id\")\n print(\"2.By movie id\")\n print(\"3.By customer id\")\n print(\"0.Exit\")\n print()\n cmd=input(\"Give command:\")\n try:\n self.valid.validate(cmd,3)\n com=int(cmd)\n if com==0:\n break\n elif com==1:\n self.findById()\n elif com==2:\n self.findByMovieId()\n elif com==3:\n self.findByCustomerId()\n except validatorCommandsException as ex:\n print(ex)\n elif com==3:\n print(\"--Show menu--\")\n print(\"1.Show a rent\")\n print(\"2.Show all rents\")\n print(\"0.Exit\")\n print()\n cmd=input(\"Give command:\")\n try:\n self.valid.validate(cmd,2)\n com=int(cmd)\n if com==0:\n break\n elif com==1:\n self.showARent()\n elif com==2:\n self.showAllRents()\n except validatorCommandsException as ex:\n print(ex)\n elif com==4:\n self.sortById()\n elif com==5:\n try:\n limit=int(input(\"Give the number of rents:\"))\n self.populateRandom(limit)\n except ValueError:\n print(\"The limit can't be string!\\n\") \n \n except validatorCommandsException as ex:\n print(ex)\n","sub_path":"UI/rentsMenu.py","file_name":"rentsMenu.py","file_ext":"py","file_size_in_byte":8059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"600734390","text":"##Joseph Napolitano written 10/8/17 determine population of bears\r\nimport math\r\ndef find_pop(bears,berries,tourists):\r\n bears_next = berries/(50*(bears+1))+bears*0.60- (math.log(1+tourists,10)*0.1)\r\n berries_next = (berries*1.5) - (bears+1)*(berries/14)-(math.log(1+tourists,10)*0.05)\r\n return (int(bears_next),berries_next)\r\nibears = input ('Number of bears => ')\r\nprint (ibears)\r\nbears_new = int(ibears)\r\niberry_area = input ('Size of berry area => ')\r\nprint (iberry_area)\r\niberry_area = float(iberry_area)\r\nprint('Year\\tBears\\tBerries\\tTourists')\r\ny = 1\r\nif 4>bears_new or bears_new>15:\r\n t = 0\r\nelif 10 < bears_new and bears_new < 15:\r\n t = 10000*(10)+(bears_new-10)*20000\r\nelif bears_new<=10:\r\n t=10000*bears_new\r\nprint (y, bears_new, iberry_area, t,sep='\\t')\r\nberries =[]\r\nbears = []\r\ntours = []\r\nbears.append(bears_new)\r\nberries.append(iberry_area)\r\ntours.append(t)\r\nwhile y<10:\r\n (bears_new,iberry_area) = find_pop(bears_new,iberry_area,t)\r\n if iberry_area<0:\r\n iberry_area=0.0\r\n if bears_new<0:\r\n bears_new=0 \r\n if 4>bears_new or bears_new>15:\r\n t = 0\r\n elif 10 < bears_new and bears_new < 15:\r\n t = 10000*(10)+(bears_new-10)*20000\r\n elif bears_new<=10:\r\n t=10000*bears_new\r\n bears.append(bears_new)\r\n berries.append(iberry_area)\r\n tours.append(t)\r\n y += 1\r\n print (y,bears_new,round(iberry_area,1),t,sep='\\t')\r\nprint()\r\nprint ('Min:' ,min(bears), round(min(berries),1),min(tours),sep='\\t')\r\nprint ('Max:' ,max(bears),round(max(berries),1),max(tours),sep='\\t')\r\n\r\n","sub_path":"cs1/hw3/hw3Part2.py","file_name":"hw3Part2.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"5923349","text":"\"\"\"\nCreate an exe file\n\n\n\"\"\"\n\n\"\"\"\nThis is module 'urllib', you can access the web by python\nNOTE THAT YOU SHOULD USE API\nESPECIALLY ON LARGE WEBSITES WITH LARGE AMOUNTS\nOF DATA\n\"\"\"\n\n# you have to do it this way for py 3\nimport urllib.request\n\n# performs a get request tot he website\n# THIS IS A GET\n# x = urllib.request.urlopen('http://www.google.com')\n# print(x.read())\n\nimport urllib.parse\n\n# THIS IS A POST\nurl = 'http://pythonprogramming.net'\n# values that we are looking for\nvalues = {'s': 'basic', 'submit': 'search'}\n\n# use encoding for adding values, since it\n# will handle the questions marks, spaces ect...\ndata = urllib.parse.urlencode(values)\n# what kind of ecoding do we want to use\ndata = data.encode('utf-8')\n# the actual request\nreq = urllib.request.Request(url, data)\n# actually respond\nresp = urllib.request.urlopen(req)\nrespData = resp.read()\n\nprint(respData)\n\n# this will fail\n# since google will not allow bots\ntry:\n x = urllib.request.urlopen('http://www.google.com/search?q=test')\n print(x.read())\nexcept Exception as e:\n print(str(e))\n\n# you can get around this by:\ntry:\n url = 'http://www.google.com/search?q=test'\n # header contian diferent things about you that\n # that websites use to identify you\n headers = {}\n # this is user agent\n # i just went and copied one form the web\n # it was easy, and will trick the website\n headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.135 Safari/537.36 Edge/12.246'\n req = urllib.request.Request(url, headers=headers)\n resp = urllib.request.urlopen(req)\n respData = resp.read()\n\n # add the data to the file\n saveFile = open('withHeaders.txt', 'w')\n saveFile.write(str(respData))\n saveFile.close()\nexcept Exception as e:\n print(str(e))\n","sub_path":"test_file.py","file_name":"test_file.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"473185740","text":"\"\"\"Platform for Garmin Connect integration.\"\"\"\nfrom __future__ import annotations\n\nimport logging\n\nfrom homeassistant.components.sensor import SensorEntity\nfrom homeassistant.config_entries import ConfigEntry\nfrom homeassistant.const import ATTR_ATTRIBUTION, CONF_ID, DEVICE_CLASS_TIMESTAMP\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.entity import DeviceInfo\nfrom homeassistant.helpers.update_coordinator import (\n CoordinatorEntity,\n DataUpdateCoordinator,\n)\n\nfrom .alarm_util import calculate_next_active_alarms\nfrom .const import (\n ATTRIBUTION,\n DATA_COORDINATOR,\n DOMAIN as GARMIN_DOMAIN,\n GARMIN_ENTITY_LIST,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nasync def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities\n) -> None:\n \"\"\"Set up Garmin Connect sensor based on a config entry.\"\"\"\n coordinator: DataUpdateCoordinator = hass.data[GARMIN_DOMAIN][entry.entry_id][\n DATA_COORDINATOR\n ]\n unique_id = entry.data[CONF_ID]\n\n entities = []\n for (\n sensor_type,\n (name, unit, icon, device_class, enabled_by_default),\n ) in GARMIN_ENTITY_LIST.items():\n\n _LOGGER.debug(\n \"Registering entity: %s, %s, %s, %s, %s, %s\",\n sensor_type,\n name,\n unit,\n icon,\n device_class,\n enabled_by_default,\n )\n entities.append(\n GarminConnectSensor(\n coordinator,\n unique_id,\n sensor_type,\n name,\n unit,\n icon,\n device_class,\n enabled_by_default,\n )\n )\n\n async_add_entities(entities)\n\n\nclass GarminConnectSensor(CoordinatorEntity, SensorEntity):\n \"\"\"Representation of a Garmin Connect Sensor.\"\"\"\n\n def __init__(\n self,\n coordinator,\n unique_id,\n sensor_type,\n name,\n unit,\n icon,\n device_class,\n enabled_default: bool = True,\n ):\n \"\"\"Initialize a Garmin Connect sensor.\"\"\"\n super().__init__(coordinator)\n\n self._unique_id = unique_id\n self._type = sensor_type\n self._name = name\n self._unit = unit\n self._icon = icon\n self._device_class = device_class\n self._enabled_default = enabled_default\n\n @property\n def name(self):\n \"\"\"Return the name of the sensor.\"\"\"\n return self._name\n\n @property\n def icon(self):\n \"\"\"Return the icon to use in the frontend.\"\"\"\n return self._icon\n\n @property\n def state(self):\n \"\"\"Return the state of the sensor.\"\"\"\n if not self.coordinator.data or not self.coordinator.data[self._type]:\n return None\n\n value = self.coordinator.data[self._type]\n if \"Duration\" in self._type or \"Seconds\" in self._type:\n value = value // 60\n elif \"Mass\" in self._type or self._type == \"weight\":\n value = value / 1000\n elif self._type == \"nextAlarm\":\n active_alarms = calculate_next_active_alarms(\n self.coordinator.data[self._type]\n )\n if active_alarms:\n value = active_alarms[0]\n else:\n value = None\n\n if self._device_class == DEVICE_CLASS_TIMESTAMP:\n return value\n\n return round(value, 2)\n\n @property\n def unique_id(self) -> str:\n \"\"\"Return the unique ID for this sensor.\"\"\"\n return f\"{self._unique_id}_{self._type}\"\n\n @property\n def unit_of_measurement(self):\n \"\"\"Return the unit of measurement.\"\"\"\n return self._unit\n\n @property\n def extra_state_attributes(self):\n \"\"\"Return attributes for sensor.\"\"\"\n if not self.coordinator.data:\n return {}\n\n attributes = {\n \"source\": self.coordinator.data[\"source\"],\n \"last_synced\": self.coordinator.data[\"lastSyncTimestampGMT\"],\n ATTR_ATTRIBUTION: ATTRIBUTION,\n }\n if self._type == \"nextAlarm\":\n attributes[\"next_alarms\"] = calculate_next_active_alarms(\n self.coordinator.data[self._type]\n )\n\n return attributes\n\n @property\n def device_info(self) -> DeviceInfo:\n \"\"\"Return device information.\"\"\"\n return {\n \"identifiers\": {(GARMIN_DOMAIN, self._unique_id)},\n \"name\": \"Garmin Connect\",\n \"manufacturer\": \"Garmin Connect\",\n }\n\n @property\n def entity_registry_enabled_default(self) -> bool:\n \"\"\"Return if the entity should be enabled when first added to the entity registry.\"\"\"\n return self._enabled_default\n\n @property\n def available(self) -> bool:\n \"\"\"Return True if entity is available.\"\"\"\n return (\n super().available\n and self.coordinator.data\n and self._type in self.coordinator.data\n )\n\n @property\n def device_class(self):\n \"\"\"Return the device class of the sensor.\"\"\"\n return self._device_class\n","sub_path":"custom_components/garmin_connect/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":5099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"289275204","text":"import os\nimport json\nimport shutil\n\ndel_extension = {\n\t'.tmp': '临时文件',\n\t'._mp': '临时文件_mp',\n\t'.log': '日志文件',\n\t'.gid': '临时帮助文件',\n\t'.chk': '磁盘检查文件',\n\t'.old': '临时备份文件',\n\t'.xlk': 'Excel备份文件',\n\t'.bak': '临时备份文件bak'\n}\ndel_userprofile = ['cookies', 'recent', 'Temporary Internet Files', 'Temp']\ndel_windir = ['prefetch', 'temp']\n\n# 获取系统盘\nSYS_DRIVE = os.environ['systemdrive'] + '\\\\'\n# 获取用户目录\nUSER_PROFILE = os.environ['userprofile']\n# 获取 Windows 目录\nWIN_DIR = os.environ['windir']\n\n# 获取当前路径 os.getcwd() 'E:\\\\Software\\\\Python27'\n# 跳转至指定的文件目录 os.chdir('d://wamp')\n# 获取系统盘符 os.environ['systemdrive'] 'C:'\n# 获取用户目录 os.environ['userprofile'] 'C:\\\\Users\\\\Administrator'\n# 获取 Windows 目录 os.environ['windir'] 'C:\\\\Windows'\n\n# 删除文件或文件夹\ndef del_dir_or_file(root):\n\ttry:\n\t\tif os.path.isfile(root):\n\t\t\t# 删除文件\n\t\t\tos.remove(root)\n\t\t\tprint('file: ' + root + ' removed\\n')\n\t\telif os.path.isdir(root):\n\t\t\t# 删除文件夹\n\t\t\tshutil.rmtree(root)\n\t\t\tprint('directory: ' + root + ' removed\\n')\n\texcept WindowsError:\n\t\tprint('failure: ' + root + \" can't remove\\n\")\n\n# 字节bytes转化kb\\m\\g\ndef formatSize(bytes):\n\ttry:\n\t\tbytes = float(bytes)\n\t\tkb = bytes / 1024\n\texcept:\n\t\tprint('The incoming byte format is not correct!')\n\t\treturn \"Error\"\n\tif kb >= 1024:\n\t\tM = kb / 1024\n\t\tif M >= 1024:\n\t\t\tG = M / 1024\n\t\t\treturn \"%fG\" % (G)\n\t\telse:\n\t\t\treturn \"%fM\" % (M)\n\telse:\n\t\treturn \"%fkb\" % (kb)\n\nclass DiskClean(object):\n\tdef __init__(self):\n\t\tself.del_info = {}\n\t\tself.del_file_paths = []\n\t\tself.total_size = 0\n\t\tfor k,v in del_extension.items():\n\t\t\tself.del_info[k] = dict(name = v, count = 0)\n\n\tdef scan(self):\n\t\tfor roots, dirs, files in os.walk(USER_PROFILE, topdown=False):\n\t\t\t# 生成并展开以 root 为根目录的目录树,参数 topdown 设定展开方式从底层到顶层\n\t\t\tfor file_item in files:\n\t\t\t\t# 获取扩展名\n\t\t\t\tfile_extension = os.path.splitext(file_item)[1]\n\t\t\t\t# print os.path.join(roots, file_item)\n\t\t\t\tif file_extension in self.del_info:\n\t\t\t\t\t# 文件完整路径\n\t\t\t\t\tfile_full_path = os.path.join(roots, file_item)\n\t\t\t\t\tself.del_file_paths.append(file_full_path)\n\t\t\t\t\tself.del_info[file_extension]['count'] += 1\n\t\t\t\t\tself.total_size += os.path.getsize(file_full_path)\n\n\tdef show(self):\n\t\tprint(json.dumps(self.del_info, indent=4, ensure_ascii=False))\n\t\tprint('删除可节省:%s 空间' % formatSize(self.total_size))\n\n\tdef delete_files(self):\n\t\tfor i in self.del_file_paths:\n\t\t\tdel_dir_or_file(i)\n\nif __name__ == '__main__':\n\tcleaner = DiskClean()\n\tcleaner.scan()\n\tcleaner.show()\n\tif_del = input('是否删除y/n:')\n\tif if_del == 'y':\n\t\tcleaner.delete_files()\n","sub_path":"DiskClean.py","file_name":"DiskClean.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"453257026","text":"#encoding:utf-8\n#script_param_mark#\nimport os,sys,shutil\nfrom distutils.sysconfig import get_python_lib\nprint('copyBasePy...start')\nG_NODE_PY=r'c:\\script\\new_py'\nnode_py_cg=G_NODE_PY+r'\\cg'\nif G_ACTION == 'PreRender':\n G_ACTION = 'Render'\nif G_RENDER_OS=='0':#linux\n G_NODE_PY=r'/root/rayvision/script/new_py'\n node_py_cg=G_NODE_PY+r'/cg'\nelse:\n G_SCRIPT_POOL=G_SCRIPT_POOL.replace('/','\\\\')\n G_TASK_JSON=G_TASK_JSON.replace('/','\\\\')\nif not os.path.exists(node_py_cg):\n os.makedirs(node_py_cg)\ndef copy_folder(source,target):\n print(source+' ----------> '+target)\n if not os.path.exists(target):\n os.makedirs(target)\n if os.path.exists(source):\n for root, dirs, files in os.walk(source):\n print('copyBasePy...')\n for dirname in dirs:\n tdir=os.path.join(root,dirname)\n if not os.path.exists(tdir):\n os.makedirs(tdir)\n for i in range(0, files.__len__()):\n sf = os.path.join(root, files[i])\n folder=target+root[len(source):len(root)]+\"/\"\n if not os.path.exists(folder):\n os.makedirs(folder)\n shutil.copy(sf,folder)\ndef copy_py_site_package():\n lib_path=get_python_lib()\n print('lib_path='+lib_path)\n script_site_package=os.path.join(G_SCRIPT_POOL,'SitePackage')\n script_cg_site_package=os.path.join(G_SCRIPT_POOL,'CG',G_CG_NAME,'SitePackage')\n user_site_packageos=os.path.join(G_SCRIPT_POOL,'user',G_USER_ID,'SitePackage')\n print('script_site_package='+script_site_package)\n print('script_cg_site_package='+script_cg_site_package)\n print('user_site_packageos='+user_site_packageos)\n copy_folder(script_site_package,lib_path)\n copy_folder(script_cg_site_package,lib_path)\n copy_folder(user_site_packageos,lib_path)\nscript_base=os.path.join(G_SCRIPT_POOL,'Base')\nscript_cg=os.path.join(G_SCRIPT_POOL,'CG',G_CG_NAME)\nscript_util=os.path.join(G_SCRIPT_POOL,'Util')\nscript_node_base=os.path.join(G_NODE_PY,'Base')\nscript_node_cg=os.path.join(G_NODE_PY,'CG',G_CG_NAME)\nscript_node_cg_function=os.path.join(script_node_cg,'function')\nscript_node_cg_process=os.path.join(script_node_cg,'process')\nscript_node_util=os.path.join(G_NODE_PY,'Util')\nuser_render_py=os.path.join(G_SCRIPT_POOL,'user',G_USER_ID)\nprint('G_SCRIPT_POOL='+G_SCRIPT_POOL)\nprint('script_base='+script_base)\nprint('script_cg='+script_cg)\nprint('script_util='+script_util)\nprint('G_NODE_PY='+G_NODE_PY)\nprint('script_node_base='+script_node_base)\nprint('script_node_cg='+script_node_cg)\nprint('script_node_util='+script_node_util)\nprint('user_render_py='+user_render_py)\ntry:\n shutil.rmtree(G_NODE_PY)\nexcept Exception as e:\n pass\ncopy_folder(script_base,script_node_base)\ncopy_folder(script_cg,script_node_cg)\ncopy_folder(script_util,script_node_util)\ncopy_folder(user_render_py,G_NODE_PY)\ncopy_py_site_package()\nsys.path.append(G_NODE_PY)\nsys.path.append(script_node_base)\nsys.path.append(script_node_cg)\nsys.path.append(script_node_cg_function)\nsys.path.append(script_node_cg_process)\nsys.path.append(script_node_util)\nprint('copyBasePy...end')\n#-----------------------------------------------------\nif(__name__==\"__main__\"):\n paramDict = {}\n paramDict['G_SYS_ARGVS'] = sys.argv\n namespace_list = dir()\n for name in namespace_list:\n if name.startswith('G'):\n paramDict[name] = eval(name)\n #print(paramDict)\n print('G_CG_NAME=================='+G_CG_NAME)\n print('G_ACTION==================='+G_ACTION)\n module_name = G_ACTION+G_CG_NAME\n if G_ACTION == 'RenderPhoton' or G_ACTION == 'MergePic' or G_ACTION == 'GopRender':\n module_name = 'Render'+G_CG_NAME\n if G_ACTION == 'MergePhoton':\n module_name = 'Merge'+G_CG_NAME\n class_name = module_name\n #-----------import cg py model---------------\n from_str = 'from '+module_name+' import *'\n print('from_str='+from_str)\n print('class_name='+class_name)\n exec(from_str)\n exec('render='+class_name+'(**paramDict)')\n exec('render.RB_EXECUTE()')\n","sub_path":"new_render_data/input/p/script/model/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":4076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"164565296","text":"#! /usr/bin/env python3\nimport socket\nimport sys\nimport os\nimport time\n\ndef daemon_server():\n server_address = '/tmp/accelerator_socket'\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n #Bind socket to port\n sock.bind(server_address)\n #listen for incoming connections\n sock.listen(1)\n while True:\n #print('waiting for a connection')\n connection, client_address = sock.accept()\n try:\n #print('connection from: ', client_address)\n while True:\n data = connection.recv(16)\n if data and data != str.encode(\"\\0\"):\n connection.sendall(data)\n else:\n #print('no more data from: ', client_address)\n break\n #print(\"\")\n finally:\n print(\"close server connection to client\")\n connection.close()\n\ndef fork_creation():\n print(\"fork creation function\")\n pid = os.fork()\n if pid == 0:\n print(\"hello there!\")\n os.setsid()\n pid1 = os.fork()\n if pid1 == 0:\n print(\"hello there!\")\n daemon_server()\n else: exit(0)\n else:\n time.sleep(5)\n return\n\nif __name__ == \"__main__\":\n # Create a UDS socket\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n\n # Connect the socket to the port where the server is listening\n server_address = '/tmp/accelerator_socket'\n print('connecting to %s' % server_address)\n \n if os.path.exists(server_address):\n print(\"path exists\")\n try:\n print(\"trying to connect\")\n sock.connect(server_address)\n except Exception:\n print(\"connect failed sadly\")\n os.unlink(server_address)\n fork_creation()\n sock.connect(server_address)\n else:\n print(\"path does not exist\")\n fork_creation()\n sock.connect(server_address)\n\n try:\n \n # Send data\n message = 'This is the message. It will be repeated.'\n msgg = str.encode(message)\n print('sending \"%s\"' % message)\n sock.sendall(msgg)\n print(\"do i sendall?\")\n\n amount_received = 0\n amount_expected = len(message)\n print(amount_received, \" and \", amount_expected)\n \n while amount_received < amount_expected:\n data = sock.recv(16)\n amount_received += len(data)\n print('received \"%s\"' % data)\n end_msg = str.encode(\"\\0\")\n sock.sendall(end_msg)\n\n finally:\n print('closing socket')\n sock.close()\n","sub_path":"old_work/accelerator_fork1.py","file_name":"accelerator_fork1.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"347481540","text":"def fib(n):\n #dp = []\n fib = [0]*n\n fib[0]=0;\n fib[1]=1;\n for i in range(2, n):\n fib[i] = fib[i-1] + fib[i-2]\n\n print (fib)\n return fib[n-1]\n\n\nif __name__ == \"__main__\":\n n = int(input())\n r = fib(n)\n\n print (r)\n","sub_path":"Hackerrank-Practice/Practice Problems/Prob8_FibDP.py","file_name":"Prob8_FibDP.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"509719627","text":"\nfrom mpl_toolkits.mplot3d import axes3d\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport numpy as np\n\nfile_path = \"./3D_surface_and_contour.jpg\"\np = 0.05\nf = -0.01\n\ndef get_data(p):\n x, y, z = axes3d.get_test_data(p)\n print(z)\n z = f * z\n return x, y, z\n\ndef plot_3d_contour(p, f):\n nrows = 4\n ncols = 5\n\n x, y, z = get_data(p)\n\n x_min, x_max = np.min(x), np.max(x)\n y_min, y_max = np.min(y), np.max(y)\n z_min, z_max = np.min(z), np.max(z)\n\n fig = plt.figure(figsize=(15, 10))\n for n in range(nrows * ncols):\n i = n % ncols\n j = n / ncols\n k = n + 1\n if j == 0:\n azim = -60 + (i - 2) * 15\n elev = 30\n elif j == 1:\n azim = -60\n elev = 30 + (i - 2) * 5\n elif j == 2:\n azim = 60 + (i - 2) * 10\n elev = 30\n elif j == 3:\n azim = 60\n elev = 30 + (i - 2) * 5\n ax = fig.add_subplot(nrows, ncols, k, projection='3d')\n ax.set_title(\"azim=\" + str(azim) + \" elev=\" + str(elev))\n ax.tick_params(labelsize=8)\n ax.view_init(azim=azim, elev=elev)\n ax.plot_surface(x, y, z, rstride=10, cstride=10, alpha=0.3)\n ax.contourf(x, y, z, zdir='z', offset=z_min, cmap=cm.coolwarm)\n ax.contourf(x, y, z, zdir='x', offset=x_min, cmap=cm.coolwarm)\n if j == 0 or j == 1:\n ax.contourf(x, y, z, zdir='y', offset=y_max, cmap=cm.coolwarm)\n elif j == 2 or j == 3:\n ax.contourf(x, y, z, zdir='y', offset=y_min, cmap=cm.coolwarm)\n\n ax.set_xlabel('X')\n ax.set_xlim(x_min, x_max)\n ax.set_ylabel('Y')\n ax.set_ylim(y_min, y_max)\n ax.set_zlabel('Z')\n ax.set_zlim(z_min, z_max)\n\n plt.savefig(file_path, dpi=80)\n plt.show()\n plt.close()\n\nplot_3d_contour(p, f)","sub_path":"002.abnormal_dect/99.test/3d.py","file_name":"3d.py","file_ext":"py","file_size_in_byte":1842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"470096506","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('musicworld', '0004_article_likes'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Periodical',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=200)),\n ('content', models.TextField(max_length=1000)),\n ('image', models.FileField(verbose_name='periodical-image', upload_to='image/periodical')),\n ('timestamp', models.DateTimeField(auto_now=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Ptype',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=200)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='periodical',\n name='ptype',\n field=models.ForeignKey(to='musicworld.Ptype'),\n preserve_default=True,\n ),\n ]\n","sub_path":"musicworld/musicworld/migrations/0005_auto_20150104_1609.py","file_name":"0005_auto_20150104_1609.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"643532290","text":"from django.test import TestCase\n#models\nfrom catalog.models import Author\n#forms\nimport datetime\nfrom django.utils import timezone\nfrom catalog.forms import RenewBookForm, RenewBookModelForm\n#views\nfrom django.core.urlresolvers import reverse\n#loan\nfrom catalog.models import BookInstance, Book, Genre\nfrom django.contrib.auth.models import User #Required to assign User as a borrower\n\n\n# Create your tests here.\n#views loan test\nclass LoanedBookInstancesByUserListViewTest(TestCase):\n\n def setUp(self):\n #Create two users\n test_user1 = User.objects.create_user(username='testuser1', password='12345') \n test_user1.save()\n test_user2 = User.objects.create_user(username='testuser2', password='12345') \n test_user2.save()\n \n #Create a book\n test_author = Author.objects.create(first_name='John', last_name='Smith')\n test_genre = Genre.objects.create(name='Fantasy')\n #test_language = Language.objects.create(name='English')\n test_book = Book.objects.create(title='Book Title', summary = 'My book summary', isbn='ABCDEFG', author=test_author,) #language=test_language)\n # Create genre as a post-step\n genre_objects_for_book = Genre.objects.all()\n test_book.genre=genre_objects_for_book\n test_book.save()\n\n #Create 30 BookInstance objects\n number_of_book_copies = 30\n for book_copy in range(number_of_book_copies):\n return_date= timezone.now() + datetime.timedelta(days=book_copy%5)\n if book_copy % 2:\n the_borrower=test_user1\n else:\n the_borrower=test_user2\n status='m'\n BookInstance.objects.create(book=test_book,imprint='Unlikely Imprint, 2016', due_back=return_date, borrower=the_borrower, status=status)\n \n def test_redirect_if_not_logged_in(self):\n resp = self.client.get(reverse('my-borrowed'))\n self.assertRedirects(resp, '/accounts/login/?next=/mybooks/')\n\n def test_logged_in_uses_correct_template(self):\n login = self.client.login(username='testuser1', password='12345')\n resp = self.client.get(reverse('my-borrowed'))\n \n #Check our user is logged in\n self.assertEqual(str(resp.context['user']), 'testuser1')\n #Check that we got a response \"success\"\n self.assertEqual(resp.status_code, 200)\n\n #Check we used correct template\n self.assertTemplateUsed(resp, 'bookinstance_list_borrowed_user.html')\n\n#views test\nclass AuthorListViewTest(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n #Create 13 authors for pagination tests\n number_of_authors = 13\n for author_num in range(number_of_authors):\n Author.objects.create(first_name='Christian %s' % author_num, last_name = 'Surname %s' % author_num,)\n \n def test_view_url_exists_at_desired_location(self): \n resp = self.client.get('/authors/') \n self.assertEqual(resp.status_code, 200) \n \n def test_view_url_accessible_by_name(self):\n resp = self.client.get(reverse('authors'))\n self.assertEqual(resp.status_code, 200)\n \n def test_view_uses_correct_template(self):\n resp = self.client.get(reverse('authors'))\n self.assertEqual(resp.status_code, 200)\n\n self.assertTemplateUsed(resp, 'author_list.html')\n \n def test_pagination_is_ten(self):\n resp = self.client.get(reverse('authors'))\n self.assertEqual(resp.status_code, 200)\n self.assertTrue('is_paginated' in resp.context)\n self.assertTrue(resp.context['is_paginated'] == True)\n self.assertTrue( len(resp.context['author_list']) == 10)\n\n def test_lists_all_authors(self):\n #Get second page and confirm it has (exactly) remaining 3 items\n resp = self.client.get(reverse('authors')+'?page=2')\n self.assertEqual(resp.status_code, 200)\n self.assertTrue('is_paginated' in resp.context)\n self.assertTrue(resp.context['is_paginated'] == True)\n self.assertTrue( len(resp.context['author_list']) == 3)\n\n#forms test\nclass RenewBookFormTest(TestCase):\n\n def test_renew_form_date_field_label(self):\n form = RenewBookForm() \n self.assertTrue(form.fields['renewal_date'].label == None or form.fields['renewal_date'].label == 'renewal date')\n\n def test_renew_form_date_field_help_text(self):\n form = RenewBookForm()\n self.assertEqual(form.fields['renewal_date'].help_text,'Enter a date between now and 4 weeks (default 3).')\n\n def test_renew_form_date_in_past(self):\n date = datetime.date.today() - datetime.timedelta(days=1)\n form_data = {'renewal_date': date}\n form = RenewBookForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n def test_renew_form_date_too_far_in_future(self):\n date = datetime.date.today() + datetime.timedelta(weeks=4) + datetime.timedelta(days=1)\n form_data = {'renewal_date': date}\n form = RenewBookForm(data=form_data)\n self.assertFalse(form.is_valid())\n\n def test_renew_form_date_today(self):\n date = datetime.date.today()\n form_data = {'renewal_date': date}\n form = RenewBookForm(data=form_data)\n self.assertTrue(form.is_valid())\n \n def test_renew_form_date_max(self):\n date = timezone.now() + datetime.timedelta(weeks=4)\n form_data = {'renewal_date': date}\n form = RenewBookForm(data=form_data)\n self.assertTrue(form.is_valid())\n\n#models test\nclass AuthorModelTest(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n #Set up non-modified objects used by all test methods\n Author.objects.create(first_name='Big', last_name='Bob')\n\n def test_first_name_label(self):\n author=Author.objects.get(id=1)\n field_label = author._meta.get_field('first_name').verbose_name\n self.assertEquals(field_label,'first name')\n\n def test_date_of_death_label(self):\n author=Author.objects.get(id=1)\n field_label = author._meta.get_field('date_of_death').verbose_name\n self.assertEquals(field_label,'Died')\n\n def test_first_name_max_length(self):\n author=Author.objects.get(id=1)\n max_length = author._meta.get_field('first_name').max_length\n self.assertEquals(max_length,100)\n\n def test_object_name_is_last_name_comma_first_name(self):\n author=Author.objects.get(id=1)\n expected_object_name = '%s, %s' % (author.last_name, author.first_name)\n self.assertEquals(expected_object_name,str(author))\n\n def test_get_absolute_url(self):\n author=Author.objects.get(id=1)\n #This will also fail if the urlconf is not defined.\n self.assertEquals(author.get_absolute_url(),'/author/1')\n\n''' class YourTestClass(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n print(\"setUpTestData: Run once to set up non-modified data for all class methods.\")\n pass\n\n def setUp(self):\n print(\"setUp: Run once for every test method to setup clean data.\")\n pass\n\n def test_false_is_false(self):\n print(\"Method: test_false_is_false.\")\n self.assertFalse(False)\n\n def test_false_is_true(self):\n print(\"Method: test_false_is_true.\")\n self.assertTrue(False)\n\n def test_one_plus_one_equals_two(self):\n print(\"Method: test_one_plus_one_equals_two.\")\n self.assertEqual(1 + 1, 2) '''\n","sub_path":"catalog/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":7470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"75926622","text":"\nfrom framework.rom import meta\nfrom module.stream.stream_port_config import EnumErrorGen\nfrom module.testsuite.benchmark.benchmark_iteration_frame_size_command import BenchmarkIterationFrameSizeCommand\nfrom framework.rom.rom_property import *\n\n\n@meta.rom(description='Errored Frames Filtering iteration frame size command')\nclass ErroredFrameFilteringIterationFrameSizeCommand(BenchmarkIterationFrameSizeCommand):\n CrcChecked = BoolProperty(default=True, display='Enable Crc Errored Frame', category='config')\n\n CrcErroredFrameSize = U32Property(default=64, display='Crc Errored Frame Size', category='config',\n validator=(NumRangeValidator(64, 10000),),\n conditions=(Condition('applicable', 'CrcChecked', 'eq', 'True'),))\n\n UnderSizedChecked = BoolProperty(default=True, display='Enable Under Sized Frame', category='config')\n\n UnderSizedFrameSize = U32Property(default=60, display='Under Sized Frame Size', category='config',\n validator=(NumRangeValidator(60, 63),),\n conditions=(Condition('applicable', 'UnderSizedChecked', 'eq', 'True'),))\n\n OverSizedChecked = BoolProperty(default=True, display='Enable Over Sized Frame', category='config')\n\n OverSizedFrameSize = U32Property(default=1519, display='Over Sized Frame Size', category='config',\n validator=(NumRangeValidator(1519, 16383),),\n conditions=(Condition('applicable', 'OverSizedChecked', 'eq', 'True'),))\n\n def __init__(self):\n super().__init__()\n self._test_frame_sizes = None\n\n def execute_frame_size(self, para):\n iter_number = self.get_current_iteration()\n if iter_number < len(self.CustomFrameSizeList):\n frame_size = self.CustomFrameSizeList[iter_number]\n frame_type = self._test_frame_sizes.get(frame_size, 'UnknownFrame')\n if frame_type == 'CrcFrame':\n self.GenerateError = EnumErrorGen.CRC\n else:\n self.GenerateError = EnumErrorGen.NO_ERROR\n \n super().execute_frame_size(para)\n\n def validate(self):\n super().validate()\n self._test_frame_sizes = self.__get_test_frame_sizes()\n\n def __get_test_frame_sizes(self):\n frame_sizes = {}\n if self.CrcChecked:\n frame_sizes[self.CrcErroredFrameSize] = 'CrcFrame'\n if self.UnderSizedChecked:\n frame_sizes[self.UnderSizedFrameSize] = 'UnderFrame'\n if self.OverSizedChecked:\n frame_sizes[self.OverSizedFrameSize] = 'OverFrame'\n return frame_sizes\n","sub_path":"CL/module/testsuite/benchmark/l2l3/rfc2889/errored_frame_filtering/iteration_frame_command.py","file_name":"iteration_frame_command.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"157591999","text":"from API._API import *\nimport os\n\nstatus = os.system(\"ping -n 2 192.168.1.1\")\nif status == 1:\n print(\"Release LAN\")\n os.system(\"netsh interface set interface \\\"LAN\\\" disabled\")\n time.sleep(10)\n print(\"Reload LAN\")\n os.system(\"netsh interface set interface \\\"LAN\\\" enabled\")\n time.sleep(10)\n \n \n","sub_path":"New_GUI/R8500/Tools/loaddefault - Copy.py","file_name":"loaddefault - Copy.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"360333265","text":"from bwsi_grader.python.ciphers import grade_cesar_cipher;\n\ndef encode_caesar(string, shift_amt):\n original = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\";\n new = original[shift_amt:] + original[:shift_amt];\n encode = \"\";\n for c in string:\n if (c not in original): encode += c;\n else: encode += new[original.index(c)];\n return encode;\n\ngrade_cesar_cipher(encode_caesar);\n","sub_path":"PythonCore/Module04_CodingAssignments/Ciphers/ciphers1.py","file_name":"ciphers1.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"94866530","text":"\"\"\"\n@arthor:金龙\n@school:哈尔滨理工大学\n@department:计算机学院\n@time: 2017/9/24 22:01\n@describe:在这里实现了图的理解矩阵的相关操作,\n 并实现了图的深度搜索(dfs)和图的广度搜索(bfs)\n\"\"\"\n\n\nclass Graph:\n \"\"\"这个类是:一个图数据结构\"\"\"\n\n def __init__(self, var_map):\n # 1.接收传来的邻接矩阵值\n self.graph_map = var_map\n # 2.将邻接矩阵转化为无向图的形式\n self.toNoDirect()\n # 3.属性:结点数\n self.node_number = self.getNodeNumber()\n # 4.属性:边数\n self.edge_number = self.getEdgeNumber()\n # 5.属性:bfs结果\n self.router_bfs = []\n # 6.属性:dfs结果\n self.router_dfs = []\n\n def toNoDirect(self):\n \"\"\"\"这个方法的功能是:将有向图变为无向图\"\"\"\n for row in range(len(self.graph_map)):\n for col in range(row):\n summery = self.graph_map[row][col] + self.graph_map[col][row]\n # 构造对称矩阵\n # 构造原则:(x,y)和(y,x)当中,只要其中一个为 1, 则让它俩都为 1\n if summery > 0:\n self.graph_map[row][col] = 1\n self.graph_map[col][row] = 1\n\n def getNodeNumber(self):\n \"\"\"\"这个方法的功能是:取得图中结点的个数\"\"\"\n return len(self.graph_map)\n\n def getEdgeNumber(self):\n \"\"\"这个方法的功能是:取得图中边的个数\"\"\"\n self.edge_number = 0\n for row in range(self.node_number):\n for col in range(self.node_number):\n if self.graph_map[row][col] is 1:\n self.edge_number = self.edge_number + 1\n return self.edge_number // 2\n\n def addNode(self):\n \"\"\"这个方法的功能是:向图中添加一个结点\"\"\"\n for i in range(self.node_number):\n # 利用for循环在map里添加一列 0\n self.graph_map[i].append(0)\n self.node_number = self.node_number + 1\n # 用*来构造一行 0\n oneRow = [0] * self.node_number\n # 在map里添加一行0\n self.graph_map.append(oneRow)\n # 让对角线上的元素为 -1\n self.graph_map[self.node_number - 1][self.node_number - 1] = -1\n\n def addEdge(self, x, y):\n \"\"\"这个方法的功能是:向图中添加结点\"\"\"\n if self.graph_map[x][y] is 0:\n self.graph_map[x][y] = 1\n self.graph_map[y][x] = 1\n self.edge_number = self.edge_number + 1\n\n def removeEdge(self, x, y):\n \"\"\"这个方法的功能是:在图中删除结点(x,y)\"\"\"\n if self.graph_map[x][y] is 1:\n self.graph_map[x][y] = 0\n self.graph_map[y][x] = 0\n self.edgenum = self.edgenum - 1\n\n def callBFS(self):\n \"\"\"实现BFS功能\"\"\"\n\n def BFS(self, i):\n \"\"\"BFS递归体\"\"\"\n self.router_bfs.append(i)\n visited[i] = 1\n for k in range(self.node_number):\n if self.graph_map[i][k] == 1 and visited[k] == 0:\n BFS(self, k)\n\n # 遍历结点\n visited = [0] * self.node_number\n for i in range(self.node_number):\n if visited[i] is 0:\n BFS(self, i)\n\n def callDFS(self):\n \"\"\"实现DFS功能\"\"\"\n\n def DFS(self, i, queue):\n \"\"\"DFS迭代体\"\"\"\n queue.append(i)\n self.router_dfs.append(i)\n visited[i] = 1\n if len(queue) != 0:\n w = queue.pop()\n for k in range(self.node_number):\n if self.graph_map[w][k] is 1 and visited[k] is 0:\n DFS(self, k, queue)\n\n # 遍历结点\n visited = [0] * self.node_number\n queue = []\n for i in range(self.node_number):\n if visited[i] is 0:\n DFS(self, i, queue)\n\n def printNodeNumber(self):\n \"\"\"这个方法的功能是:个性化地打印图中结点个数\"\"\"\n print(\"该图的结点个数是:\", self.node_number)\n\n def printEdgeNumber(self):\n \"\"\"这个方法的功能是:个性化地打印图中边的个数\"\"\"\n print(\"该图的边的个数是:\", self.edge_number)\n\n def printMap(self):\n \"\"\"这个方法的功能是:个性化地打印邻接矩阵\"\"\"\n print(\"---------------------------MAP START------------------------\\n\")\n for row in range(self.node_number):\n for col in range(self.node_number):\n print(\"%6d\" % self.graph_map[row][col], end='')\n print()\n print(\"\\n---------------------------MAP END--------------------------\")\n\n def printBFS(self):\n \"\"\"这个方法的功能是:个性化地打印BFS的结果\"\"\"\n for i in range(len(self.router_bfs)):\n print(self.router_bfs[i], end=\" -> \")\n print(\"完成\")\n\n def printDFS(self):\n \"\"\"这个方法的功能是:个性化地打印DFS的结果\"\"\"\n for i in range(len(self.router_dfs)):\n print(self.router_dfs[i], end=\" -> \")\n print(\"完成\")\n\n\ndef myTest():\n \"\"\"\n 此方法是测试方法。\n 主要用来测试功能\n 并输出结果\n \"\"\"\n graph_map = [\n [-1, 1, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, 1],\n [1, 0, 0, -1]\n ]\n graph = Graph(graph_map)\n graph.addNode()\n graph.addEdge(1, 4)\n graph.addEdge(1, 3)\n graph.printMap()\n graph.printNodeNumber()\n graph.printEdgeNumber()\n graph.callBFS()\n graph.printBFS()\n graph.callDFS()\n graph.printDFS()\n\n\nif __name__ == '__main__':\n \"\"\"此方法是主方法。此处为整个函数的入口\"\"\"\n myTest()\n","sub_path":"day00/图论.py","file_name":"图论.py","file_ext":"py","file_size_in_byte":5761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"14899891","text":"def to_bytes(text, encoding=None, errors='strict'):\n \"\"\"Return the binary representation of `text`. If `text`\n is already a bytes object, return it as-is.\"\"\"\n if isinstance(text, bytes):\n return text\n if not isinstance(text, six.string_types):\n raise TypeError('to_bytes must receive a unicode, str or bytes '\n 'object, got %s' % type(text).__name__)\n if encoding is None:\n encoding = 'utf-8'\n return text.encode(encoding, errors)\n","sub_path":"mosquito/utils/python.py","file_name":"python.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"500699276","text":"from sys import stdin\ninput = stdin.readline\nn = int(input())\nwhile True:\n k = int(input())\n if k == 0:\n break\n elif k % n == 0:\n print(f'{k} is a multiple of {n}.')\n else:\n print(f'{k} is NOT a multiple of {n}.')\n","sub_path":"BaekJoon_Py/4505.py","file_name":"4505.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"109415324","text":"\"\"\"Allocation API tests.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport unittest\n\nimport mock\n\nfrom treadmill import admin\nfrom treadmill.api import allocation\n\n\nclass ApiAllocationTest(unittest.TestCase):\n \"\"\"treadmill.api.allocation tests.\"\"\"\n\n @mock.patch('treadmill.context.AdminContext.conn',\n mock.Mock(return_value=None))\n def setUp(self):\n self.alloc = allocation.API()\n\n def tearDown(self):\n pass\n\n @mock.patch('treadmill.context.AdminContext.conn',\n mock.Mock(return_value=admin.Admin(None, None)))\n @mock.patch('treadmill.admin.Allocation.list',\n mock.Mock(return_value=[]))\n @mock.patch('treadmill.admin.CellAllocation.list',\n mock.Mock(return_value=[]))\n def test_list(self):\n \"\"\"Dummy test for treadmill.api.allocation._list()\"\"\"\n alloc_admin = admin.Allocation(None)\n self.alloc.list()\n alloc_admin.list.assert_called_with({})\n\n @mock.patch('treadmill.context.AdminContext.conn',\n mock.Mock(return_value=admin.Admin(None, None)))\n @mock.patch('treadmill.admin.Allocation.create',\n mock.Mock(return_value={}))\n @mock.patch('treadmill.admin.Allocation.get',\n mock.Mock(return_value={'environment': 'prod'}))\n @mock.patch('treadmill.admin.CellAllocation.create',\n mock.Mock(return_value={}))\n @mock.patch('treadmill.admin.CellAllocation.get',\n mock.Mock(return_value={}))\n @mock.patch('treadmill.api.allocation._check_capacity',\n mock.Mock(return_value=True))\n def test_reservation(self):\n \"\"\"Dummy test for treadmill.api.allocation._list()\"\"\"\n alloc_admin = admin.CellAllocation(None)\n self.alloc.reservation.create(\n 'tenant/alloc/cellname',\n {'memory': '1G',\n 'cpu': '100%',\n 'disk': '2G',\n 'partition': None})\n alloc_admin.create.assert_called_with(\n ['cellname', 'tenant/alloc'],\n {'disk': '2G',\n 'partition': None,\n 'cpu': '100%',\n 'rank': 100,\n 'memory': '1G'},\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/api/allocation_test.py","file_name":"allocation_test.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"305044500","text":"\"\"\"\nНа вход программе подается натуральное число nn, а затем nn строк. Напишите программу, которая выводит только уникальные строки,\nв том же порядке, в котором они были введены.\n\"\"\"\n\nn = int(input())\ndata = []\nfor _ in range(n):\n s = input()\n if s not in data:\n data.append(s)\nprint(*data, sep='\\n')\n","sub_path":"Generation Python - A Beginner's Course/11_list/11.4/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"136707624","text":"from typing import Any, Dict\n\nimport arrow\nfrom rsserpent.utils import HTTPClient, cached\n\n\npath = \"/bilibili/user/{uid}/video\"\n\n\n@cached\nasync def provider(uid: int) -> Dict[str, Any]:\n \"\"\"订阅 up 上传的最新视频.\"\"\"\n user_info_api = f\"https://api.bilibili.com/x/space/acc/info?mid={uid}&jsonp=jsonp\"\n video_list_api = (\n f\"https://api.bilibili.com/x/space/arc/search?mid={uid}&ps=30\"\n \"&tid=0&pn=1&keyword=&order=pubdate&jsonp=jsonp\"\n )\n\n async with HTTPClient() as client:\n user_info = (await client.get(user_info_api)).json()\n video_list = (await client.get(video_list_api)).json()\n\n username = user_info[\"data\"][\"name\"]\n\n return {\n \"title\": f\"{username}的最新投稿视频\",\n \"link\": f\"https://space.bilibili.com/{uid}/video\",\n \"description\": user_info[\"data\"][\"sign\"],\n \"items\": [\n {\n \"title\": item[\"title\"],\n \"description\": item[\"description\"],\n \"link\": f\"https://www.bilibili.com/video/{item['bvid']}\",\n \"pubDate\": arrow.get(item[\"created\"]),\n \"author\": username,\n }\n for item in video_list[\"data\"][\"list\"][\"vlist\"]\n ],\n }\n","sub_path":"rsserpent_plugin_bilibili/user/video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"482380708","text":"promedio = 0\r\nfor x in range(1,11):\r\n while True:\r\n try:\r\n promedio += int(input(\"num\"+str(x)+\" = \"))\r\n break\r\n except ValueError:\r\n print(\"solo numeros\")\r\n continue\r\nprint(\"promdeio es igual a =\",promedio/10)\r\n\r\n","sub_path":"ejercicio 48.py","file_name":"ejercicio 48.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"135165109","text":"import string\nimport random\nfrom itertools import combinations\nfrom collections import deque\n\n\n\nclass User:\n def __init__(self, name):\n self.name = name\n\n\nclass SocialGraph:\n def __init__(self):\n self.lastID = 0\n self.users = {}\n self.friendships = {}\n\n\n\n def addFriendship(self, userID, friendID):\n \"\"\"\n Creates a bi-directional friendship\n \"\"\"\n if userID == friendID:\n print(\"WARNING: You cannot be friends with yourself\")\n elif friendID in self.friendships[userID] or userID in self.friendships[friendID]:\n print(\"WARNING: Friendship already exists\")\n else:\n self.friendships[userID].add(friendID)\n self.friendships[friendID].add(userID)\n\n def addUser(self, name):\n \"\"\"\n Create a new user with a sequential integer ID\n \"\"\"\n self.lastID += 1 # automatically increment the ID to assign the new user\n\n\n\n self.users[self.lastID] = User(name) \n self.friendships[self.lastID] = set()\n\n def bfs(self, starting_vertex, target): \n visited_bfs = []\n queue = deque()\n queue.append([starting_vertex])\n visited = {}\n # avg = 0\n \n while queue: \n path = queue.popleft()\n last_node = path[-1:][0]\n if last_node not in visited_bfs:\n visited[last_node] = path\n # avg += (len(path) - 1)\n # print(last_node, path)\n if last_node == target:\n return path\n visited_bfs.append(last_node)\n for v in self.friendships[last_node]:\n new_list = list(path)\n new_list.append(v)\n queue.append(new_list)\n # print('Avg', avg)\n return visited\n\n def populateGraph(self, numUsers, avgFriendships):\n \"\"\"\n Takes a number of users and an average number of friendships\n as arguments\n\n Creates that number of users and a randomly distributed friendships\n between those users.\n\n The number of users must be greater than the average number of friendships.\n \"\"\"\n # Reset graph\n self.lastID = 0\n self.users = {}\n self.friendships = {}\n # !!!! IMPLEMENT ME\n\n # Add users\n for i in range(numUsers):\n self.addUser(f'User {i}') \n\n # Create friendship pairs \n # len = friends(friends - 1)/2 O(n^2)\n possible_friendships = list(combinations(range(1, numUsers+1), 2))\n\n random.shuffle(possible_friendships) # O(n)\n\n T = int(numUsers/2 * avgFriendships) # total friendship needed O(1)\n\n actual_friendships = possible_friendships[:T] # O(1)\n\n # c=0\n for friendship in actual_friendships: # O(n) \n # c += 1\n self.addFriendship(friendship[0], friendship[1])\n # print(c, 'C')\n\n\n\n \n\n def getAllSocialPaths(self, userID):\n \"\"\"\n Takes a user's userID as an argument\n\n Returns a dictionary containing every user in that user's\n extended network with the shortest friendship path between them.\n\n The key is the friend's ID and the value is the path.\n \"\"\"\n # visited = {} # Note that this is a dictionary, not a set\n # !!!! IMPLEMENT ME\n\n # visited = self.bfs(userID, self.users[1])\n\n return self.bfs(userID, self.users[1])\n\n\n # !! When there are 100 users with 10 friends each, addFriendship must be called 500 times. \n # !! (100 users / 2) * (10 friends / 1 user) => 500 friendships formed\n # !! The 2 is needed since the friendships are bi-directional\n\n # !! When there are 1000 users with an average of 5 friends:\n # !! 100% of users are in a user's extended social network\n # !! (with such a large number there were no empty sets.)\n # !! On average they are 3.2 degrees of separation from each other. Most are 4 degrees apart with some a little closer.\n\n # !! Stretch: Instead of creating one average for the numbers of friends provide different averages for different population groups. \n # !! Higher average number of contacts for people who are: outgoing, travel frequently, live in larger cities, involved in many social clubs,etc. And a lower average for the inverse.\n\n\n\n\nif __name__ == '__main__':\n sg = SocialGraph()\n sg.populateGraph(10, 2) # 30/10\n print(sg.friendships)\n connections = sg.getAllSocialPaths(1)\n print(connections)\n # print('*' * 10)\n # sg2 = SocialGraph()\n # sg2.populateGraph(100, 10) # >200/100\n # # print(sg2.friendships)\n # connections2 = sg2.getAllSocialPaths(1)\n # print(connections2)\n # print('*' * 10)\n # sg3 = SocialGraph()\n # sg3.populateGraph(1000, 10)\n # # print(sg2.friendships)\n # connections3 = sg3.getAllSocialPaths(1) # >3000/1000\n # print(connections3)","sub_path":"projects/graph/social/social.py","file_name":"social.py","file_ext":"py","file_size_in_byte":5031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"537697667","text":"\"\"\"\n.. _ex-source-space-power-phase-locking:\n\n=========================================================\nCompute power and phase lock in label of the source space\n=========================================================\n\nCompute time-frequency maps of power and phase lock in the source space.\nThe inverse method is linear based on dSPM inverse operator.\n\nThe example also shows the difference in the time-frequency maps\nwhen they are computed with and without subtracting the evoked response\nfrom each epoch. The former results in induced activity only while the\nlatter also includes evoked (stimulus-locked) activity.\n\"\"\"\n# Authors: Alexandre Gramfort \n#\n# License: BSD-3-Clause\n\n# %%\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mne\nfrom mne import io\nfrom mne.datasets import sample\nfrom mne.minimum_norm import read_inverse_operator, source_induced_power\n\nprint(__doc__)\n\n# %%\n# Set parameters\ndata_path = sample.data_path()\nmeg_path = data_path / \"MEG\" / \"sample\"\nraw_fname = meg_path / \"sample_audvis_raw.fif\"\nfname_inv = meg_path / \"sample_audvis-meg-oct-6-meg-inv.fif\"\nlabel_name = \"Aud-rh\"\nfname_label = meg_path / \"labels\" / f\"{label_name}.label\"\n\ntmin, tmax, event_id = -0.2, 0.5, 2\n\n# Setup for reading the raw data\nraw = io.read_raw_fif(raw_fname)\nevents = mne.find_events(raw, stim_channel=\"STI 014\")\ninverse_operator = read_inverse_operator(fname_inv)\n\ninclude = []\nraw.info[\"bads\"] += [\"MEG 2443\", \"EEG 053\"] # bads + 2 more\n\n# Picks MEG channels\npicks = mne.pick_types(\n raw.info, meg=True, eeg=False, eog=True, stim=False, include=include, exclude=\"bads\"\n)\nreject = dict(grad=4000e-13, mag=4e-12, eog=150e-6)\n\n# Load epochs\nepochs = mne.Epochs(\n raw,\n events,\n event_id,\n tmin,\n tmax,\n picks=picks,\n baseline=(None, 0),\n reject=reject,\n preload=True,\n)\n\n# Compute a source estimate per frequency band including and excluding the\n# evoked response\nfreqs = np.arange(7, 30, 2) # define frequencies of interest\nlabel = mne.read_label(fname_label)\nn_cycles = freqs / 3.0 # different number of cycle per frequency\n\n# subtract the evoked response in order to exclude evoked activity\nepochs_induced = epochs.copy().subtract_evoked()\n\nplt.close(\"all\")\n\nfor ii, (this_epochs, title) in enumerate(\n zip([epochs, epochs_induced], [\"evoked + induced\", \"induced only\"])\n):\n # compute the source space power and the inter-trial coherence\n power, itc = source_induced_power(\n this_epochs,\n inverse_operator,\n freqs,\n label,\n baseline=(-0.1, 0),\n baseline_mode=\"percent\",\n n_cycles=n_cycles,\n n_jobs=None,\n )\n\n power = np.mean(power, axis=0) # average over sources\n itc = np.mean(itc, axis=0) # average over sources\n times = epochs.times\n\n ##########################################################################\n # View time-frequency plots\n plt.subplots_adjust(0.1, 0.08, 0.96, 0.94, 0.2, 0.43)\n plt.subplot(2, 2, 2 * ii + 1)\n plt.imshow(\n 20 * power,\n extent=[times[0], times[-1], freqs[0], freqs[-1]],\n aspect=\"auto\",\n origin=\"lower\",\n vmin=0.0,\n vmax=30.0,\n cmap=\"RdBu_r\",\n )\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Frequency (Hz)\")\n plt.title(\"Power (%s)\" % title)\n plt.colorbar()\n\n plt.subplot(2, 2, 2 * ii + 2)\n plt.imshow(\n itc,\n extent=[times[0], times[-1], freqs[0], freqs[-1]],\n aspect=\"auto\",\n origin=\"lower\",\n vmin=0,\n vmax=0.7,\n cmap=\"RdBu_r\",\n )\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Frequency (Hz)\")\n plt.title(\"ITC (%s)\" % title)\n plt.colorbar()\n\nplt.show()\n","sub_path":"examples/time_frequency/source_label_time_frequency.py","file_name":"source_label_time_frequency.py","file_ext":"py","file_size_in_byte":3682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"574757488","text":"import sys\nimport winreg\nfrom argparse import ArgumentParser\n\n\ndef search(needle):\n found = False\n with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, \"SOFTWARE\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Installer\\\\UserData\", access=winreg.KEY_READ | winreg.KEY_WOW64_64KEY) as userDataParentHandle:\n for userDataIndex in range(0, winreg.QueryInfoKey(userDataParentHandle)[0]):\n user = winreg.EnumKey(userDataParentHandle, userDataIndex)\n with winreg.OpenKey(userDataParentHandle, user) as userDataHandle:\n with winreg.OpenKey(userDataHandle, \"Components\") as componentsParentHandle:\n for componentIndex in range(0, winreg.QueryInfoKey(componentsParentHandle)[0]):\n with winreg.OpenKey(componentsParentHandle, winreg.EnumKey(componentsParentHandle, componentIndex)) as componentHandle:\n for valueIndex in range(0, winreg.QueryInfoKey(componentHandle)[1]):\n valueName, valueData = winreg.EnumValue(componentHandle, valueIndex)[0:2]\n if needle.casefold() in valueData.casefold():\n with winreg.OpenKey(userDataHandle, \"Products\\\\\" + valueName + \"\\\\InstallProperties\") as propertiesHandle:\n if not found:\n found = True\n else:\n print()\n\n print(\"File: \" + valueData)\n print(\"Product: \" + winreg.QueryValueEx(propertiesHandle, \"DisplayName\")[0])\n print(\"Install user: \" + user)\n print(\"Cached installer: \" + winreg.QueryValueEx(propertiesHandle, \"LocalPackage\")[0])\n\n if not found:\n print('No file path containing \"{}\" was found in any installed package.'.format(needle))\n\n\ndef search_command(opts):\n return search(opts.pattern)\n\n\ndef packages_command(opts):\n print('{} - Not yet implemented'.format(opts.command))\n\n\ndef components_command(opts):\n print('{} - Not yet implemented'.format(opts.command))\n\n\ndef create_parser(prog_name):\n parser = ArgumentParser(prog=prog_name)\n sp = parser.add_subparsers(title='commands', dest='command', description='valid commands:')\n search = sp.add_parser('search', help='Search for a file within an installed component')\n search.add_argument('pattern', help='Name of the file')\n search.set_defaults(func=search_command)\n packages = sp.add_parser('packages', help='Inventory packages on this system')\n packages.set_defaults(func=packages_command)\n components = sp.add_parser('components', help='Show components of a package')\n components.add_argument('pattern', help='Name of the package')\n components.set_defaults(func=components_command)\n return parser\n\n\ndef main():\n parser = create_parser(sys.argv[0])\n opts = parser.parse_args(sys.argv[1:])\n if not hasattr(opts, 'func'):\n parser.print_help()\n else:\n opts.func(opts)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"win_inventory.py","file_name":"win_inventory.py","file_ext":"py","file_size_in_byte":3195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"344231579","text":"from django.db import models\nimport django.contrib.auth.models as authModels\n\n\nclass Article (models.Model):\n #Publication date of an article\n datePub = models.DateTimeField('Publication date')\n #Title of an article\n title = models.CharField(max_length=200)\n #Type of article: either SenProc or Wiki\n typeArticleList = (\n ('ScenProc', 'Scenario/Procédure'),\n ('Wiki', 'Wiki')\n )\n TArticle = models.CharField(\n max_length=10,\n choices=typeArticleList,\n default='Wiki'\n )\n permOnArticle = models.ForeignKey(\n 'RolesArticle',\n null=True\n )\n #Tag linked to Article, not yet implemented\n tagArt = models.ManyToManyField(\n 'Tag',\n blank=True,\n null=True\n )\n\n\nclass Content (models.Model):\n #Modification date\n dateModif = models.DateTimeField('Modification date')\n #Content, in HTML\n contentHTML = models.TextField()\n #Content, in Markdown\n contentMD = models.TextField()\n #Object that refers to the previous version of the content, set to NULL if\n #we create an article\n contentPrev = models.ForeignKey(\n 'Content',\n blank=True,\n null=True\n )\n #Language available for an article (Def: Fr, Available: Fr, En)\n #First, we define our language list with a Field.choices\n languageList = (\n ('FR', 'Français'),\n ('EN', 'English')\n )\n #Then we define the method filed.\n language = models.CharField(\n max_length=2,\n choices=languageList,\n default='FR'\n )\n #Object that referes to the article\n ArticleRef = models.ForeignKey(\n 'Article',\n null=True\n )\n class Tag (models.Model):\n tagName = models.CharField(max_length=50)\n\n\nclass WikiArt (Article):\n Article.TArticle='Wiki'\n\n\nclass ScenProcArt(Article):\n Article.TArticle='ScenProc'\n #Difficulty for a procedure (because of skills required, ...)\n difficulty= models.IntegerField()\n #Duration, in second, for a procedure to be executed\n duration=models.IntegerField()\n\n\nclass RolesArticle(models.Model):\n #Read permition on an article\n canRead = models.BooleanField(default=True)\n #Write permition on an article\n canWrite = models.BooleanField(default=False)\n #Role associated to this permissions\n roleRef = models.ForeignKey(\n 'Role',\n null = True\n )\n\n\n# This class defined the role in the C4 structure\n# e.g, its defined the operator's level\nclass Role(models.Model):\n #Name of the specific role: N1, N2,...\n roleName = models.CharField(max_length=15)\n # Reference to the system user\n userRoleRef = models.ForeignKey(\n 'UserRoles',\n null = True)\n #References to the system group\n GroupRoleRef = models.ForeignKey(\n 'GroupRoles',\n null = True\n )\n\n\n#Defined the user in the system, not in the C4\n#e.g, its defined the user\nclass UserRoles (models.Model):\n #The user's adminitrative level: students, teachers, ...\n userRoleName = models.CharField(max_length=15)\n #Ref to the django user class\n userRef = models.ManyToManyField(\n authModels.User,\n null=True\n )\n\n\n#Defined the group int the system.\n#e.g, its defined the user's administrative group\nclass GroupRoles (models.Model):\n #The group's adminitrative level:\n roleGroup = models.CharField(max_length=10)\n #Ref to the django group class\n grpRef = models.ManyToManyField(\n authModels.Group,\n null=True\n )\n","sub_path":"c4wiki/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"202311797","text":"from test.framework.spider import Spider\nfrom test.framework.https.request import Request\n\n\nclass LJSpiderMw(Spider):\n name = \"LianJia_01\"\n custom_settings = [\"URL = https://sh.lianjia.com/ershoufang/\"]\n\n def __init__(self):\n super(LJSpiderMw,self).__init__()\n self._url = self.settings[\"URL\"]\n self._total_house = 0\n self.headers = {'User-Agent':['MMozilla/5.0 (Windows NT 6.1; WOW64; rv:31.0)'],'content-type':['application/json']}\n self._item_num = 0\n self._maxnum = 2\n self.download_delay = 0\n # self.change_header = True\n # self.proxy = ('47.105.165.81',5527,'spider:123456')\n # self.change_proxy = False\n\n def start_requests(self):\n start_url = list()\n for i in range(1,self._maxnum+1):\n if i == 1 :\n url = self._url\n else:\n i = str(i)\n url = self._url +\"pg\"+i\n start_url.append(url)\n for url in start_url:\n yield Request(url,callback=self._parse,headers=self.headers\n )\n\n def _parse(self,response):\n # web_body = BeautifulSoup(response.body,\"html.parser\")\n # total_house = web_body.find_all(\"h2\", class_='total fl')[0].span.string\n # if total_house:\n # self._total_house = total_house\n # house_list = web_body.find_all(\"ul\", class_='sellListContent')[0]\n # if None in house_list:\n # none_num = house_list.count(None)\n # else:\n # none_num = 0\n # one_page_numeber = len(house_list) - none_num\n # self._item_num += one_page_numeber\n self.request_headers = response.request.headers\n proxy = response.request.meta.get('proxy',None)\n # print(request_headers)\n # print(proxy)\n # response_headers = response.headers.getAllRawHeaders()\n # print(request_headers)\n # for i in response_headers:\n # print(i)\n return None\n","sub_path":"test/framework/test/test_example/check_spidermw/simple_spider_spidermw.py","file_name":"simple_spider_spidermw.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"124059349","text":"\nAnalyzer = 'mkShape'\nOutputdir = 'Output'\nCategory = 'SMP'\nYear = '2016'\nInSkim = 'ISR_v1'\nUserflags = 'ISR_Gen_Hists'\ntreeName = 'recoTree/SKFlat'\n\nsamplesFile = 'samples.py'\nplotFile = 'plot.py'\nvariablesFile = 'variables.py'\ncutsFile = 'cuts.py'\nnuisancesFile= 'nuisances.py'\n","sub_path":"PlotConfiguration/ISR/2016/unfolding/gen_hists/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"265886770","text":"class Solution(object):\n\n def addTwoNumber(self, n):\n if n == 0:\n return [\"\"]\n if n == 1:\n return [\"1\", \"8\", \"0\"]\n PreviousResult = self.addTwoNumber(n - 2)\n result = []\n for i in PreviousResult:\n if n != self.n:\n result.append(\"0\" + i + \"0\")\n result.append(\"1\" + i + \"1\")\n result.append(\"6\" + i + \"9\")\n result.append(\"9\" + i + \"6\")\n result.append(\"8\" + i + \"8\")\n return result\n\n def findStrobogrammatic(self, n):\n self.n = n\n return self.addTwoNumber(n)\n\n\ntestClass = Solution()\n\nprint(testClass.findStrobogrammatic(4))\n","sub_path":"247-strobogrammatic-number=2/247.py","file_name":"247.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"242304895","text":"from Player import Player\nimport random\n\nclass Deck:\n def __init__(self):\n suits = [3, 2, 1, 0]\n ranks = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]\n deck = []\n\n for suit in suits:\n for rank in ranks:\n card = Card(suit, rank)\n deck.append(card)\n self.deck = deck\n\n\n def shuffleCards(self):\n deck = self.deck\n for times in range(100):\n position = random.randint(0, len(deck) - 1)\n temp = deck[position]\n deck[position] = deck[0]\n deck[0] = temp\n\n def evalCards(self, cardPOne, cardPTwo):\n if cardPOne.val == cardPTwo.val:\n return 0\n elif cardPOne.val > cardPTwo.val:\n return 1\n else:\n return 2\n\nclass Card:\n def __init__(self, suit, val):\n self.suit = suit\n self.val = val\n\n def parseSuit(self):\n if self.suit == 0:\n return \"Hearts\"\n elif self.suit == 1:\n return \"Diamonds\"\n elif self.suit == 2:\n return \"Spades\"\n elif self.suit == 3:\n return \"Clubs\"\n\n def parseVal(self):\n if self.val < 11:\n return str(self.val)\n else:\n if self.val == 11:\n return \"Jack\"\n elif self.val == 12:\n return \"Queen\"\n elif self.val == 13:\n return \"King\"\n elif self.val == 14:\n return \"Ace\"\n\n def __str__(self):\n s = self.parseSuit()\n v = self.parseVal()\n return \"%s of %s\" % (v, s)\n","sub_path":"Cards.py","file_name":"Cards.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"471464325","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n# python\nimport base64\nimport re\nimport struct\nimport time\nimport urllib.parse\nfrom datetime import datetime, timedelta\nfrom dateutil.parser import parse as dateutil_parse\n\n# django and drf\nfrom django.contrib.auth import get_user_model\nfrom django.utils import translation\nfrom django.core.exceptions import ValidationError\nfrom django.conf import settings\nfrom django.core.urlresolvers import NoReverseMatch\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404\nfrom django.utils.translation import ugettext_lazy as _\nfrom rest_framework import (\n serializers, relations, viewsets, filters, generics, status\n)\nfrom rest_framework.settings import api_settings\nfrom rest_framework.reverse import reverse\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import ParseError\n\n\n# 3rd party\nfrom isodate import Duration, duration_isoformat, parse_duration\nfrom modeltranslation.translator import translator, NotRegistered\nfrom haystack.query import AutoQuery\nfrom munigeo.api import (\n GeoModelSerializer, GeoModelAPIView, build_bbox_filter, srid_to_srs\n)\nimport pytz\n\n# events\nfrom events import utils\nfrom events.custom_elasticsearch_search_backend import (\n CustomEsSearchQuerySet as SearchQuerySet\n)\nfrom events.models import (\n Place, Event, Keyword, Language, OpeningHoursSpecification, EventLink,\n Offer, DataSource, Organization \n)\nfrom events.translation import EventTranslationOptions\n\n\nSYSTEM_DATA_SOURCE_ID = 'system'\n\n\nserializers_by_model = {}\n\nall_views = []\ndef register_view(klass, name, base_name=None):\n entry = {'class': klass, 'name': name}\n if base_name is not None:\n entry['base_name'] = base_name\n all_views.append(entry)\n\n if klass.serializer_class and \\\n hasattr(klass.serializer_class, 'Meta') and \\\n hasattr(klass.serializer_class.Meta, 'model'):\n model = klass.serializer_class.Meta.model\n serializers_by_model[model] = klass.serializer_class\n\n\ndef urlquote_id(link):\n \"\"\"\n URL quote link's id part, e.g.\n http://127.0.0.1:8000/v0.1/place/tprek:20879/\n -->\n http://127.0.0.1:8000/v0.1/place/tprek%3A20879/\n This is DRF backwards compatibility function, 2.x quoted id automatically.\n\n :param link: URL str\n :return: quoted URL str\n \"\"\"\n if isinstance(link, str):\n parts = link.split('/')\n if len(parts) > 1 and ':' in parts[-2]:\n parts[-2] = urllib.parse.quote(parts[-2])\n link = '/'.join(parts)\n return link\n\n\ndef generate_id(namespace):\n t = time.time() * 1000\n postfix = base64.b32encode(struct.pack(\">Q\", int(t)).lstrip(b'\\x00'))\n postfix = postfix.strip(b'=').lower().decode(encoding='UTF-8')\n return '{}:{}'.format(namespace, postfix)\n\ndef parse_id_from_uri(uri):\n \"\"\"\n Parse id part from @id uri like\n 'http://127.0.0.1:8000/v0.1/event/matko%3A666/' -> 'matko:666'\n :param uri: str\n :return: str id\n \"\"\"\n if not uri.startswith('http'):\n return uri\n path = urllib.parse.urlparse(uri).path\n _id = path.rstrip('/').split('/')[-1]\n _id = urllib.parse.unquote(_id)\n return _id\n\ndef perform_id_magic_for(data):\n if 'id' in data:\n err = \"Do not send 'id' when POSTing a new Event (got id='{}')\"\n raise ParseError(err.format(data['id']))\n data['id'] = generate_id(data['data_source'])\n return data\n\n\nclass JSONLDRelatedField(relations.HyperlinkedRelatedField):\n \"\"\"\n Support of showing and saving of expanded JSON nesting or just a resource\n URL.\n Serializing is controlled by query string param 'expand', deserialization\n by format of JSON given.\n\n Default serializing is expand=true.\n \"\"\"\n\n invalid_json_error = _('Incorrect JSON. Expected JSON, received %s.')\n\n def __init__(self, *args, **kwargs):\n self.related_serializer = kwargs.pop('serializer', None)\n self.hide_ld_context = kwargs.pop('hide_ld_context', False)\n super(JSONLDRelatedField, self).__init__(*args, **kwargs)\n\n def use_pk_only_optimization(self):\n if self.is_expanded():\n return False\n else:\n return True\n\n def to_representation(self, obj):\n if isinstance(self.related_serializer, str):\n self.related_serializer = globals().get(self.related_serializer, None)\n if self.is_expanded():\n return self.related_serializer(obj, hide_ld_context=self.hide_ld_context,\n context=self.context).data\n link = super(JSONLDRelatedField, self).to_representation(obj)\n link = urlquote_id(link)\n return {\n '@id': link\n }\n\n def to_internal_value(self, value):\n if '@id' in value:\n return super(JSONLDRelatedField, self).to_internal_value(value['@id'])\n else:\n raise ValidationError(\n self.invalid_json_error % type(value).__name__)\n\n def is_expanded(self):\n return getattr(self, 'expanded', False)\n\n\nclass EnumChoiceField(serializers.Field):\n \"\"\"\n Database value of tinyint is converted to and from a string representation\n of choice field.\n\n TODO: Find if there's standardized way to render Schema.org enumeration\n instances in JSON-LD.\n \"\"\"\n\n def __init__(self, choices, prefix=''):\n self.choices = choices\n self.prefix = prefix\n super(EnumChoiceField, self).__init__()\n\n def to_representation(self, obj):\n if obj is None:\n return None\n return self.prefix + utils.get_value_from_tuple_list(self.choices,\n obj, 1)\n\n def to_internal_value(self, data):\n return utils.get_value_from_tuple_list(self.choices,\n self.prefix + str(data), 0)\n\n\nclass ISO8601DurationField(serializers.Field):\n\n def to_representation(self, obj):\n if obj:\n d = Duration(milliseconds=obj)\n return duration_isoformat(d)\n else:\n return None\n\n def to_internal_value(self, data):\n if data:\n value = parse_duration(data)\n return (\n value.days * 24 * 3600 * 1000000\n + value.seconds * 1000\n + value.microseconds / 1000\n )\n else:\n return 0\n\n\nclass MPTTModelSerializer(serializers.ModelSerializer):\n def __init__(self, *args, **kwargs):\n super(MPTTModelSerializer, self).__init__(*args, **kwargs)\n for field_name in 'lft', 'rght', 'tree_id', 'level':\n if field_name in self.fields:\n del self.fields[field_name]\n\n\nclass TranslatedModelSerializer(serializers.ModelSerializer):\n def __init__(self, *args, **kwargs):\n super(TranslatedModelSerializer, self).__init__(*args, **kwargs)\n model = self.Meta.model\n try:\n trans_opts = translator.get_options_for_model(model)\n except NotRegistered:\n self.translated_fields = []\n return\n\n self.translated_fields = trans_opts.fields.keys()\n lang_codes = [x[0] for x in settings.LANGUAGES]\n # Remove the pre-existing data in the bundle.\n for field_name in self.translated_fields:\n for lang in lang_codes:\n key = \"%s_%s\" % (field_name, lang)\n if key in self.fields:\n del self.fields[key]\n del self.fields[field_name]\n\n # def get_field(self, model_field):\n # kwargs = {}\n # if issubclass(\n # model_field.__class__,\n # (django_db_models.CharField,\n # django_db_models.TextField)):\n # if model_field.null:\n # kwargs['allow_none'] = True\n # kwargs['max_length'] = getattr(model_field, 'max_length')\n # return fields.CharField(**kwargs)\n # return super(TranslatedModelSerializer, self).get_field(model_field)\n\n def to_representation(self, obj):\n ret = super(TranslatedModelSerializer, self).to_representation(obj)\n if obj is None:\n return ret\n return self.translated_fields_to_representation(obj, ret)\n\n def to_internal_value(self, data):\n \"\"\"\n Convert complex translated json objects to flat format.\n E.g. json structure containing `name` key like this:\n {\n \"name\": {\n \"fi\": \"musiikkiklubit\",\n \"sv\": \"musikklubbar\",\n \"en\": \"music clubs\"\n },\n ...\n }\n Transforms this:\n {\n \"name\": \"musiikkiklubit\",\n \"name_fi\": \"musiikkiklubit\",\n \"name_sv\": \"musikklubbar\",\n \"name_en\": \"music clubs\"\n ...\n }\n :param data:\n :return:\n \"\"\"\n lang = settings.LANGUAGES[0][0]\n for field_name in self.translated_fields:\n # FIXME: handle default lang like others!?\n lang = settings.LANGUAGES[0][0] # Handle default lang\n if data.get(field_name, None) is None:\n continue\n values = data[field_name].copy() # Save original values\n\n key = \"%s_%s\" % (field_name, lang)\n val = data[field_name].get(lang)\n if val:\n values[key] = val # field_name_LANG\n values[field_name] = val # field_name\n if lang in values:\n del values[lang] # Remove original key LANG\n for lang in [x[0] for x in settings.LANGUAGES[1:]]:\n key = \"%s_%s\" % (field_name, lang)\n val = data[field_name].get(lang)\n if val:\n values[key] = val # field_name_LANG\n values[field_name] = val # field_name\n if lang in values:\n del values[lang] # Remove original key LANG\n data.update(values)\n del data[field_name] # Remove original field_name from data\n\n # do remember to call the super class method as well!\n data.update(super().to_internal_value(data))\n\n return data\n\n def translated_fields_to_representation(self, obj, ret):\n for field_name in self.translated_fields:\n d = {}\n default_lang = settings.LANGUAGES[0][0]\n d[default_lang] = getattr(obj, field_name)\n for lang in [x[0] for x in settings.LANGUAGES[1:]]:\n key = \"%s_%s\" % (field_name, lang) \n val = getattr(obj, key, None)\n if val == None:\n continue \n d[lang] = val\n\n # If no text provided, leave the field as null\n for key, val in d.items():\n if val != None:\n break\n else:\n d = None\n ret[field_name] = d\n\n return ret\n\n\nclass LinkedEventsSerializer(TranslatedModelSerializer, MPTTModelSerializer):\n \"\"\"Serializer with the support for JSON-LD/Schema.org.\n\n JSON-LD/Schema.org syntax::\n\n {\n \"@context\": \"http://schema.org\",\n \"@type\": \"Event\",\n \"name\": \"Event name\",\n ...\n }\n\n See full example at: http://schema.org/Event\n\n Args:\n hide_ld_context (bool):\n Hides `@context` from JSON, can be used in nested\n serializers\n \"\"\"\n\n def __init__(self, instance=None, files=None,\n context=None, partial=False, many=None,\n allow_add_remove=False, hide_ld_context=False, **kwargs):\n super(LinkedEventsSerializer, self).__init__(\n instance=instance, context=context, **kwargs)\n if 'created_by' in self.fields:\n del self.fields['created_by']\n if 'modified_by' in self.fields:\n del self.fields['modified_by']\n\n if context is not None:\n include_fields = context.get('include', [])\n for field_name in include_fields:\n if not field_name in self.fields:\n continue\n field = self.fields[field_name]\n if isinstance(field, relations.ManyRelatedField):\n field = field.child_relation\n if not isinstance(field, JSONLDRelatedField):\n continue\n field.expanded = True\n\n self.hide_ld_context = hide_ld_context\n\n self.disable_camelcase = True\n if self.context and 'request' in self.context:\n request = self.context['request']\n if 'disable_camelcase' in request.QUERY_PARAMS:\n self.disable_camelcase = True\n\n def to_representation(self, obj):\n \"\"\"\n Before sending to renderer there's a need to do additional work on\n to-be-JSON dictionary data:\n 1. Add @context, @type and @id fields\n 2. Convert field names to camelCase\n Renderer is the right place for this but now loop is done just once.\n Reversal conversion is done in parser.\n \"\"\"\n ret = super(LinkedEventsSerializer, self).to_representation(obj)\n if 'id' in ret and 'request' in self.context:\n try:\n ret['@id'] = reverse(self.view_name,\n kwargs={u'pk': ret['id']},\n request=self.context['request'])\n except NoReverseMatch:\n ret['@id'] = str(ret['id'])\n ret['@id'] = urlquote_id(ret['@id'])\n\n # Context is hidden if:\n # 1) hide_ld_context is set to True\n # 2) self.object is None, e.g. we are in the list of stuff\n if not self.hide_ld_context and self.instance is not None:\n if hasattr(obj, 'jsonld_context') \\\n and isinstance(obj.jsonld_context, (dict, list)):\n ret['@context'] = obj.jsonld_context\n else:\n ret['@context'] = 'http://schema.org'\n\n # Use jsonld_type attribute if present,\n # if not fallback to automatic resolution by model name.\n # Note: Plan 'type' could be aliased to @type in context definition to\n # conform JSON-LD spec.\n if hasattr(obj, 'jsonld_type'):\n ret['@type'] = obj.jsonld_type\n else:\n ret['@type'] = obj.__class__.__name__\n\n return ret\n\n\ndef _clean_qp(query_params):\n \"\"\"\n Strip 'event.' prefix from all query params.\n :rtype : QueryDict\n :param query_params: dict self.request.QUERY_PARAMS\n :return: QueryDict QUERY_PARAMS\n \"\"\"\n query_params = query_params.copy() # do not alter original dict\n nspace = 'event.'\n for key in query_params.keys():\n if key.startswith(nspace):\n new_key = key[len(nspace):]\n # .pop() returns a list(?), don't use\n # query_params[new_key] = query_params.pop(key)\n query_params[new_key] = query_params[key]\n del query_params[key]\n return query_params\n\n\nclass KeywordSerializer(LinkedEventsSerializer):\n view_name = 'keyword-detail'\n\n class Meta:\n model = Keyword\n\n\nclass KeywordViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Keyword.objects.all()\n serializer_class = KeywordSerializer\n\n def get_queryset(self):\n \"\"\"\n Return Keyword queryset. If request has parameter show_all_keywords=1\n all Keywords are returned, otherwise only which have events.\n Additional query parameters:\n event.data_source\n event.start\n event.end\n \"\"\"\n queryset = Keyword.objects.all()\n if self.request.QUERY_PARAMS.get('show_all_keywords'):\n # Limit by data_source anyway, if it is set\n data_source = self.request.QUERY_PARAMS.get('data_source')\n if data_source:\n data_source = data_source.lower()\n queryset = queryset.filter(data_source=data_source)\n else:\n events = Event.objects.all()\n params = _clean_qp(self.request.QUERY_PARAMS)\n events = _filter_event_queryset(events, params)\n keyword_ids = events.values_list('keywords',\n flat=True).distinct().order_by()\n queryset = queryset.filter(id__in=keyword_ids)\n # Optionally filter keywords by filter parameter,\n # can be used e.g. with typeahead.js\n val = self.request.QUERY_PARAMS.get('filter')\n if val:\n queryset = queryset.filter(name__startswith=val)\n return queryset\n\nregister_view(KeywordViewSet, 'keyword')\n\n\nclass PlaceSerializer(LinkedEventsSerializer, GeoModelSerializer):\n view_name = 'place-detail'\n\n class Meta:\n model = Place\n\n\nclass PlaceViewSet(GeoModelAPIView, viewsets.ReadOnlyModelViewSet):\n queryset = Place.objects.all()\n serializer_class = PlaceSerializer\n\n def get_queryset(self):\n \"\"\"\n Return Place queryset. If request has parameter show_all_places=1\n all Places are returned, otherwise only which have events.\n Additional query parameters:\n event.data_source\n event.start\n event.end\n \"\"\"\n queryset = Place.objects.all()\n if self.request.QUERY_PARAMS.get('show_all_places'):\n pass\n else:\n events = Event.objects.all()\n params = _clean_qp(self.request.QUERY_PARAMS)\n events = _filter_event_queryset(events, params)\n location_ids = events.values_list('location_id',\n flat=True).distinct().order_by()\n queryset = queryset.filter(id__in=location_ids)\n return queryset\n\nregister_view(PlaceViewSet, 'place')\n\n\nclass OpeningHoursSpecificationSerializer(LinkedEventsSerializer):\n class Meta:\n model = OpeningHoursSpecification\n\n\nclass LanguageSerializer(LinkedEventsSerializer):\n view_name = 'language-detail'\n\n class Meta:\n model = Language\n\n\nclass LanguageViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Language.objects.all()\n serializer_class = LanguageSerializer\n\nregister_view(LanguageViewSet, 'language')\n\nLOCAL_TZ = pytz.timezone(settings.TIME_ZONE)\n\nclass EventLinkSerializer(serializers.ModelSerializer):\n def to_representation(self, obj):\n ret = super(EventLinkSerializer, self).to_representation(obj)\n if not ret['name']:\n ret['name'] = None\n return ret\n\n class Meta:\n model = EventLink\n exclude = ['id', 'event']\n\nclass OfferSerializer(TranslatedModelSerializer):\n class Meta:\n model = Offer\n exclude = ['id', 'event']\n\n\nclass EventSerializer(LinkedEventsSerializer, GeoModelAPIView):\n location = JSONLDRelatedField(serializer=PlaceSerializer, required=False,\n view_name='place-detail', read_only=True)\n # provider = OrganizationSerializer(hide_ld_context=True)\n keywords = JSONLDRelatedField(serializer=KeywordSerializer, many=True,\n required=False,\n view_name='keyword-detail', read_only=True)\n super_event = JSONLDRelatedField(required=False, view_name='event-detail',\n read_only=True)\n event_status = EnumChoiceField(Event.STATUSES)\n external_links = EventLinkSerializer(many=True)\n offers = OfferSerializer(many=True)\n sub_events = JSONLDRelatedField(serializer='EventSerializer',\n required=False, view_name='event-detail',\n many=True, read_only=True)\n\n view_name = 'event-detail'\n\n def __init__(self, *args, skip_empties=False, skip_fields=set(), **kwargs):\n super(EventSerializer, self).__init__(*args, **kwargs)\n # The following can be used when serializing when\n # testing and debugging.\n self.skip_empties = skip_empties\n self.skip_fields = skip_fields\n\n def get_location(self, data):\n \"\"\"\n Replace location id dict in data with a Place object\n \"\"\"\n location = data.get('location')\n if location and '@id' in location:\n location_id = parse_id_from_uri(location['@id'])\n try:\n data['location'] = Place.objects.get(id=location_id)\n except Place.DoesNotExist:\n err = 'Place with id {} does not exist'\n raise ParseError(err.format(location_id))\n return data\n\n def get_keywords(self, data):\n \"\"\"\n Replace list of keyword dicts in data with a list of Keyword objects\n \"\"\"\n new_kw = []\n\n for kw in data.get('keywords', []):\n\n if '@id' in kw:\n kw_id = parse_id_from_uri(kw['@id'])\n\n try:\n keyword = Keyword.objects.get(id=kw_id)\n except Keyword.DoesNotExist:\n err = 'Keyword with id {} does not exist'\n raise ParseError(err.format(kw_id))\n\n new_kw.append(keyword)\n\n data['keywords'] = new_kw\n return data\n\n def get_datetimes(self, data):\n for field in ['date_published', 'start_time', 'end_time']:\n val = data.get(field, None)\n if val:\n if isinstance(val, str):\n data[field] = parse_time(val, True)\n return data\n\n def to_internal_value(self, data):\n data = super().to_internal_value(data)\n\n # TODO: figure out how to get this via JSONLDRelatedField\n if 'location' in data:\n location_id = parse_id_from_uri(data['location']['@id'])\n data['location'] = Place.objects.get(id=location_id)\n\n # TODO: figure out how to get these via JSONLDRelatedField\n data = self.get_keywords(data)\n\n return data\n\n def create(self, validated_data):\n offers = validated_data.pop('offers', [])\n links = validated_data.pop('external_links', [])\n keywords = validated_data.pop('keywords', [])\n\n # create object\n e = Event.objects.create(**validated_data)\n\n # create and add related objects \n for offer in offers:\n Offer.objects.create(event=e, **offer)\n for link in links:\n EventLink.objects.create(event=e, **link)\n e.keywords.add(*keywords)\n\n return e\n\n def update(self, instance, validated_data):\n\n # prepare a list of fields to be updated\n update_fields = [\n 'start_time', 'end_time', 'location'\n ]\n\n languages = [x[0] for x in settings.LANGUAGES]\n for field in EventTranslationOptions.fields:\n for lang in languages:\n update_fields.append(field + '_' + lang)\n\n # update values\n for field in update_fields:\n orig_value = getattr(instance, field)\n new_value = validated_data.get(field, orig_value)\n setattr(instance, field, new_value)\n\n # also update `has_end_time` if needed\n if instance.end_time:\n instance.has_end_time = True\n\n # save changes\n instance.save()\n\n # update offers\n if 'offers' in validated_data:\n instance.offers.all().delete()\n for offer in validated_data.get('offers', []):\n Offer.objects.create(event=instance, **offer)\n\n # update ext links\n if 'external_links' in validated_data:\n instance.external_links.all().delete()\n for link in validated_data.get('external_links', []):\n EventLink.objects.create(event=instance, **link)\n\n # update keywords\n instance.keywords.clear() \n instance.keywords.add(*validated_data['keywords'])\n\n return instance\n\n def to_representation(self, obj):\n ret = super(EventSerializer, self).to_representation(obj)\n if 'start_time' in ret and not obj.has_start_time:\n # Return only the date part\n ret['start_time'] = obj.start_time.astimezone(LOCAL_TZ).strftime('%Y-%m-%d')\n if 'end_time' in ret and not obj.has_end_time:\n # If we're storing only the date part, do not pretend we have the exact time.\n if obj.end_time - obj.start_time <= timedelta(days=1):\n ret['end_time'] = None\n if hasattr(obj, 'days_left'):\n ret['days_left'] = int(obj.days_left)\n if self.skip_empties:\n for k in list(ret.keys()):\n val = ret[k]\n try:\n if val is None or len(val) == 0:\n del ret[k]\n except TypeError:\n # not list/dict\n pass\n for field in self.skip_fields:\n del ret[field]\n return ret\n\n class Meta:\n model = Event\n exclude = ['has_start_time', 'has_end_time', 'is_recurring_super']\n\n\ndef parse_time(time_str, is_start):\n time_str = time_str.strip()\n # Handle dates first. Assume dates are given in local timezone.\n # FIXME: What if there's no local timezone?\n try:\n dt = datetime.strptime(time_str, '%Y-%m-%d')\n dt = LOCAL_TZ.localize(dt)\n except ValueError:\n dt = None\n if not dt:\n if time_str.lower() == 'today':\n dt = datetime.utcnow().replace(tzinfo=pytz.utc)\n dt = dt.astimezone(LOCAL_TZ)\n dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)\n if dt:\n # With start timestamps, we treat dates as beginning\n # at midnight the same day. End timestamps are taken to\n # mean midnight on the following day.\n if not is_start:\n dt = dt + timedelta(days=1)\n else:\n try:\n # Handle all other times through dateutil.\n dt = dateutil_parse(time_str)\n except (TypeError, ValueError):\n raise ParseError('time in invalid format (try ISO 8601 or yyyy-mm-dd)')\n return dt\n\n\nclass JSONAPIViewSet(viewsets.ReadOnlyModelViewSet):\n def initial(self, request, *args, **kwargs):\n ret = super(JSONAPIViewSet, self).initial(request, *args, **kwargs)\n self.srs = srid_to_srs(self.request.QUERY_PARAMS.get('srid', None))\n return ret\n\n def get_serializer_context(self):\n context = super(JSONAPIViewSet, self).get_serializer_context()\n\n include = self.request.QUERY_PARAMS.get('include', '')\n context['include'] = [x.strip() for x in include.split(',') if x]\n context['srs'] = self.srs\n\n return context\n\n\nclass LinkedEventsOrderingFilter(filters.OrderingFilter):\n ordering_param = 'sort'\n\n\nclass EventOrderingFilter(LinkedEventsOrderingFilter):\n def filter_queryset(self, request, queryset, view):\n queryset = super(EventOrderingFilter, self).filter_queryset(request, queryset, view)\n ordering = self.get_ordering(request, queryset, view)\n if not ordering:\n ordering = []\n if 'days_left' in [x.lstrip('-') for x in ordering]:\n queryset = queryset.extra(select={'days_left': 'date_part(\\'day\\', end_time - start_time)'})\n return queryset\n\n\ndef parse_duration(duration):\n m = re.match(r'(\\d+)\\s*(d|h|m|s)?$', duration.strip().lower())\n if not m:\n raise ParseError(\"Invalid duration supplied. Try '1d' or '2h'.\")\n val, unit = m.groups()\n if not unit:\n unit = 's'\n\n if unit == 'm':\n mul = 60\n elif unit == 'h':\n mul = 3600\n elif unit == 'd':\n mul = 24 * 3600\n\n return int(val) * mul\n\ndef _filter_event_queryset(queryset, params, srs=None):\n \"\"\"\n Filter events queryset by params\n (e.g. self.request.QUERY_PARAMS in EventViewSet)\n \"\"\"\n # Filter by string (case insensitive). This searches from all fields\n # which are marked translatable in translation.py\n val = params.get('text', None)\n if val:\n val = val.lower()\n # Free string search from all translated fields\n fields = EventTranslationOptions.fields\n # and these languages\n languages = [x[0] for x in settings.LANGUAGES]\n qset = Q()\n for field in fields:\n for lang in languages:\n kwarg = {field + '_' + lang + '__icontains': val}\n qset |= Q(**kwarg)\n queryset = queryset.filter(qset)\n\n val = params.get('last_modified_since', None)\n # This should be in format which dateutil.parser recognizes, e.g.\n # 2014-10-29T12:00:00Z == 2014-10-29T12:00:00+0000 (UTC time)\n # or 2014-10-29T12:00:00+0200 (local time)\n if val:\n dt = parse_time(val, is_start=False)\n queryset = queryset.filter(Q(last_modified_time__gte=dt))\n\n val = params.get('start', None)\n if val:\n dt = parse_time(val, is_start=True)\n queryset = queryset.filter(Q(end_time__gt=dt) | Q(start_time__gte=dt))\n\n val = params.get('end', None)\n if val:\n dt = parse_time(val, is_start=False)\n queryset = queryset.filter(Q(end_time__lt=dt) | Q(start_time__lte=dt))\n\n val = params.get('bbox', None)\n if val:\n bbox_filter = build_bbox_filter(srs, val, 'position')\n places = Place.geo_objects.filter(**bbox_filter)\n queryset = queryset.filter(location__in=places)\n\n val = params.get('data_source', None)\n if val:\n queryset = queryset.filter(data_source=val)\n\n # Filter by location id, multiple ids separated by comma\n val = params.get('location', None)\n if val:\n val = val.split(',')\n queryset = queryset.filter(location_id__in=val)\n\n # Filter by keyword id, multiple ids separated by comma\n val = params.get('keyword', None)\n if val:\n val = val.split(',')\n queryset = queryset.filter(keywords__pk__in=val)\n\n # Filter only super or sub events if recurring has value\n val = params.get('recurring', None)\n if val:\n val = val.lower()\n if val == 'super':\n queryset = queryset.filter(is_recurring_super=True)\n elif val == 'sub':\n queryset = queryset.filter(is_recurring_super=False)\n\n val = params.get('max_duration', None)\n if val:\n dur = parse_duration(val)\n cond = 'end_time - start_time <= %s :: interval'\n queryset = queryset.extra(where=[cond], params=[str(dur)])\n\n val = params.get('min_duration', None)\n if val:\n dur = parse_duration(val)\n cond = 'end_time - start_time >= %s :: interval'\n queryset = queryset.extra(where=[cond], params=[str(dur)])\n\n return queryset\n\n\nclass EventViewSet(viewsets.ModelViewSet, JSONAPIViewSet):\n \"\"\"\n # Filtering retrieved events\n\n Query parameters can be used to filter the retrieved events by\n the following criteria.\n\n ## Event time\n\n Use `start` and `end` to restrict the date range of returned events.\n Any events that intersect with the given date range will be returned.\n\n The parameters `start` and `end` can be given in the following formats:\n\n - ISO 8601 (including the time of day)\n - yyyy-mm-dd\n\n In addition, `today` can be used as the value.\n\n Example:\n\n event/?start=2014-01-15&end=2014-01-20\n\n [See the result](?start=2014-01-15&end=2014-01-20 \"json\")\n\n ## Event location\n\n ### Bounding box\n\n To restrict the retrieved events to a geographical region, use\n the query parameter `bbox` in the format\n\n bbox=west,south,east,north\n\n Where `west` is the longitude of the rectangle's western boundary,\n `south` is the latitude of the rectangle's southern boundary,\n and so on.\n\n Example:\n\n event/?bbox=24.9348,60.1762,24.9681,60.1889\n\n [See the result](?bbox=24.9348,60.1762,24.9681,60.1889 \"json\")\n\n # Getting detailed data\n\n In the default case, keywords, locations, and other fields that\n refer to separate resources are only displayed as simple references.\n\n If you want to include the complete data from related resources in\n the current response, use the keyword `include`. For example:\n\n event/?include=location,keywords\n\n [See the result](?include=location,keywords \"json\")\n\n # Response data for the current URL\n\n \"\"\"\n queryset = Event.objects.all()\n # Use select_ and prefetch_related() to reduce the amount of queries\n queryset = queryset.select_related('location')\n queryset = queryset.prefetch_related(\n 'offers', 'keywords', 'external_links', 'sub_events')\n serializer_class = EventSerializer\n filter_backends = (EventOrderingFilter,)\n ordering_fields = ('start_time', 'end_time', 'days_left')\n\n def get_object(self):\n # Overridden to prevent queryset filtering from being applied\n # outside list views.\n return get_object_or_404(Event.objects.all(), pk=self.kwargs['pk'])\n\n def filter_queryset(self, queryset):\n \"\"\"\n TODO: convert to use proper filter framework\n \"\"\"\n\n queryset = super(EventViewSet, self).filter_queryset(queryset)\n\n if 'show_all' not in self.request.QUERY_PARAMS:\n queryset = queryset.filter(\n Q(event_status=Event.SCHEDULED)\n )\n queryset = _filter_event_queryset(queryset, self.request.QUERY_PARAMS,\n srs=self.srs)\n return queryset\n\n\n def get_authorized_publisher(self, request, data):\n user = request.user\n\n # require user\n assert user.is_authenticated(), 'User needs to be authenticated.'\n\n # require permission to publish\n objs = user.organizations.all()\n assert objs, 'User needs to be authorized to publish events.'\n assert objs.count() == 1, (\n 'User is connected to multiple organizations. This is currently '\n 'not supported.'\n )\n\n # pick publisher\n data['publisher'] = objs.first().id\n return data\n\n def create(self, request, *args, **kwargs):\n data = request.data\n\n # all events created by api are marked coming from the system data\n # source\n data['data_source'] = SYSTEM_DATA_SOURCE_ID\n\n # get publisher from the auth user\n data = self.get_authorized_publisher(request, data)\n\n # generate event id\n data = perform_id_magic_for(data)\n\n # then do the usual stuff defined in `rest_framework.CreateModelMixin`\n serializer = self.get_serializer(data=data)\n serializer.is_valid(raise_exception=True)\n\n self.perform_create(serializer)\n\n return Response(\n serializer.data,\n status=status.HTTP_201_CREATED,\n headers=self.get_success_headers(serializer.data)\n )\n\n\nregister_view(EventViewSet, 'event')\n\n\nclass SearchSerializer(serializers.Serializer):\n def to_representation(self, search_result):\n model = search_result.model\n assert model in serializers_by_model, \"Serializer for %s not found\" % model\n ser_class = serializers_by_model[model]\n data = ser_class(search_result.object, context=self.context).data\n data['object_type'] = model._meta.model_name\n data['score'] = search_result.score\n return data\n\nDATE_DECAY_SCALE = '30d'\n\nclass SearchViewSet(GeoModelAPIView, viewsets.ViewSetMixin, generics.ListAPIView):\n serializer_class = SearchSerializer\n\n def list(self, request, *args, **kwargs):\n languages = [x[0] for x in settings.LANGUAGES]\n\n # If the incoming language is not specified, go with the default.\n self.lang_code = request.QUERY_PARAMS.get('language', languages[0])\n if self.lang_code not in languages:\n raise ParseError(\"Invalid language supplied. Supported languages: %s\" %\n ','.join(languages))\n\n input_val = request.QUERY_PARAMS.get('input', '').strip()\n q_val = request.QUERY_PARAMS.get('q', '').strip()\n if not input_val and not q_val:\n raise ParseError(\"Supply search terms with 'q=' or autocomplete entry with 'input='\")\n if input_val and q_val:\n raise ParseError(\"Supply either 'q' or 'input', not both\")\n\n old_language = translation.get_language()[:2]\n translation.activate(self.lang_code)\n\n queryset = SearchQuerySet()\n if input_val:\n queryset = queryset.filter(autosuggest=input_val)\n now = datetime.utcnow()\n queryset = queryset.filter(end_time__gt=now).decay({\n 'gauss': {\n 'end_time': {\n 'origin': now,\n 'scale': DATE_DECAY_SCALE }}})\n else:\n queryset = queryset.filter(text=AutoQuery(q_val))\n\n self.object_list = queryset.load_all()\n\n page = self.paginate_queryset(self.object_list)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(self.object_list, many=True)\n resp = Response(serializer.data)\n\n translation.activate(old_language)\n\n return resp\n\n\nregister_view(SearchViewSet, 'search', base_name='search')\n","sub_path":"events/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":37180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"218253293","text":"import tensorflow as tf\nfrom housing import HousingPrice\n\nX_train, X_test, y_train, y_test = HousingPrice().prepare_data()\n\nwith tf.name_scope(\"place\"):\n X = tf.placeholder(tf.float32, shape=(None, X_train.shape[1]), name='X')\n y = tf.placeholder(tf.float32, shape=(None), name='y')\n\nwith tf.name_scope(\"DNN\"):\n hidden1 = tf.layers.dense(X, 100, activation=tf.nn.relu, name=\"hidden1\")\n drop1 = tf.layers.dropout(hidden1, rate=0.8, name=\"drop1\")\n hidden2 = tf.layers.dense(drop1, 80, activation=tf.nn.relu, name=\"hidden2\")\n logits = tf.layers.dense(hidden2, 1, name=\"outputs\")\n\nwith tf.name_scope(\"loss\"):\n # predictions = tf.squeeze(logits, 1)\n loss = tf.losses.mean_squared_error(labels=y, predictions=logits)\n\nwith tf.name_scope(\"train\"):\n optimizer = tf.train.MomentumOptimizer(0.01, 0.9)\n training_op = optimizer.minimize(loss)\n\nwith tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n for i in range(2):\n sess.run(training_op, feed_dict={X: X_train, y: y_train})\n loss_val = loss.eval(feed_dict={X: X_train, y: y_train})\n print('epoch #', i)\n print(loss_val)\n loss_val = loss.eval(feed_dict={X: X_test, y: y_test})\n print(loss_val)\n","sub_path":"src/028_tf_housing.py","file_name":"028_tf_housing.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"253424594","text":"#/usr/bin/env python2.7\n\nimport sys\n\nimport udp_options\nimport udp_usrreq\n\nimport packet_generators\n\n# Test all UDP Options packet generators and wait for an echo\nif __name__ == \"__main__\":\n src = \"192.168.0.1\"\n dst = \"192.168.0.1\"\n\n if len(sys.argv) > 1:\n src = sys.argv[2]\n if len(sys.argv) > 2:\n dst = sys.argv[3]\n\n for generator in packet_generators.generators:\n opts = generator() \n print(opts)\n udp_usrreq.udp_output(\"Hello Options Space on a packet\\n\", \n {\"src\":src, \"dst\":dst, \"sport\":2500, \"dport\":2600}, \n options=opts)\n","sub_path":"udpoptions/all_generators_echo_test.py","file_name":"all_generators_echo_test.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"417700534","text":"#!/usr/bin/env python3\nfrom flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import render_template,abort\nfrom datetime import datetime\nfrom sqlalchemy import create_engine\nfrom pymongo import MongoClient\nimport os\nimport json\n\napp=Flask(__name__)\napp.config['TEMPLATES_AUTO_RELOAD']=True\napp.config['SQLALCHEMY_DATABASE_URI']='mysql://root@localhost/shiyanlou'\ndb=SQLAlchemy(app)\nengine=create_engine('mysql://root@localhost/shiyanlou') \nclient=MongoClient('127.0.0.1',27017)\ndbm=client.shiyanlou\n#-----------------------------\nclass File(db.Model):\n id=db.Column(db.Integer, primary_key=True)\n title=db.Column(db.String(80))\n created_time=db.Column(db.DateTime)\n category_id=db.Column(db.Integer, db.ForeignKey('category.id'))\n category=db.relationship('Category',backref=db.backref('posts',lazy='dynamic'))\n content=db.Column(db.Text)\n\n def __init__(self,title,created_time,category,content):\n self.title =title\n self.created_time = created_time\n self.category=category \n self.content = content\n\n def __repr__(self):\n return '' % self.title\n \n def add_tag(self,tag_name):\n tag_id=len(self.title) \n tag={'tag_id':tag_id,'tag_name':tag_name}\n dbm.taglist.insert_one(tag)\n\n def remove_tag(self,tag_name):\n dbm.taglist.deleteMany({'tag_name':tag_name})\n\n @property\n def tags(self):\n tag_id=len(self.title) \n tags=[]\n for tag in dbm.taglist.find({'tag_id':tag_id}):\n tags.append(tag['tag_name'])\n return tags\n\n\nclass Category(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(80), unique=True)\n\n def __init__(self,name):\n self.name = name\n\n def __repr__(self):\n return '' % self.name\n\n#--------------\n\ndef GetTag(title):\n tags=[]\n tag_id=len(title) \n for tag in dbm.taglist.find({'tag_id':tag_id}):\n tags.append(tag['tag_name'])\n return tags\n\n#print(GetTag())\n\ndef GetAll(table):\n comm='select * from '+table\n datas=engine.execute(comm).fetchall()\n idlist=[]\n for data in datas:\n temp={}\n temp['id']=data[0]\n temp['title']=data[1]\n temp['tag']=GetTag(data[1])\n idlist.append(temp)\n return idlist\n\ndef GetInfo(table,id):\n comm='select * from ' +table+' where id= '+id\n datas=engine.execute(comm).fetchall()\n info=[]\n for data in datas:\n temp={}\n temp['id']=data[0]\n temp['title']=data[1]\n temp['create_time']=data[2]\n temp['content']=data[4]\n info.append(temp)\n return info[0]\n\n#----------\n\n@app.route('/')\ndef index():\n pagelist=GetAll('file')\n return render_template('index.html',titles=pagelist)\n\n@app.route('/files/')\ndef file(file_id):\n data = GetInfo('file',file_id)\n if len(data)!=0:\n return render_template('file.html',file=data)\n else:\n abort(404)\n\n@app.errorhandler(404)\ndef not_found(error):\n\treturn render_template('404.html'),404\n \n","sub_path":"news-w2-3/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"113305928","text":"import csv\nimport os\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom busstation.models import Station, Route\n\n\nclass Command(BaseCommand):\n help = 'Load data from files'\n\n def add_arguments(self, parser):\n pass\n\n def handle(self, *args, **options):\n BASE_DIR = os.path.dirname(settings.BASE_DIR)\n file_path = os.path.join(BASE_DIR, 'moscow_bus_stations.csv')\n with open(file_path, 'r') as csvfile:\n station_reader = csv.reader(csvfile, delimiter=';')\n next(station_reader)\n\n for line in station_reader:\n latitude, longitude, raw_routes, name = (line[3], line[2], line[7],\n line[1])\n station = Station.objects.create(name=name, latitude=latitude, longitude=longitude)\n routes = raw_routes.split(';')\n for route in routes:\n route = route.strip()\n route_add = Route.objects.filter(name=route).exists()\n if not route_add:\n route_add = Route.objects.create(name=route)\n station.routes.add(route_add)\n station.save()\n print('Данные из файла moscow_bus_stations.csv загружены')","sub_path":"project/project/busstation/management/commands/import_stations.py","file_name":"import_stations.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"31927759","text":"import numpy as np\n\nmatrixA = np.array([[10,-7,0],[-3,2,6],[5,-1,5]])\nmatrixB = np.array([[-73,78,24],[92,66,25],[-80,37,10]])\n\nrandY = []\n\nfor i in range(0,30000):\n\trandY = randY + [np.random.randint(-100,100, size=3)]\n\ncollectZA = []\ncollectZB = []\n\nfor k in randY:\n\tcollectZA = collectZA +[np.linalg.solve(matrixA,k)]\n\tcollectZB = collectZB +[np.linalg.solve(matrixB,k)]\n\nmaxRA = -1\nmaxRB = -1\n\nfor i in range(0,30000):\n\n\tratio1 = np.linalg.norm(collectZA[i]) / np.linalg.norm(randY[i])\n\tratio2 = np.linalg.norm(collectZB[i]) / np.linalg.norm(randY[i])\n\t\n\tif ratio1 > maxRA:\n\t\tmaxRA = ratio1\n\n\tif ratio2 > maxRB:\n\t\tmaxRB = ratio2\n\ncondA = np.linalg.norm(matrixA) * maxRA\ncondB = np.linalg.norm(matrixB) * maxRB\nrealCA = np.linalg.cond(matrixA)\nrealCB = np.linalg.cond(matrixB)\n\nprint(matrixA)\nprint('Cond(A) 1-norm real = {}' .format(realCA))\nprint('Cond(A) 1-norm appr = {}' .format(condA))\n\nprint(matrixB)\nprint('Cond(B) 1-norm real = {}' .format(realCB))\nprint('Cond(B) 1-norm appr = {}' .format(condB))","sub_path":"Numerical Anaylsis Couse/2_4_HW5.py","file_name":"2_4_HW5.py","file_ext":"py","file_size_in_byte":1009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"343368833","text":"import copy\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport json\nimport os.path\nimport sys\nsys.path.append('../')\nfrom types import SimpleNamespace as Namespace\nfrom feature.SimpleFeatureExtractor import SimpleFeatureExtractor\nfrom util.Util import Util\n\nclassificationNum = 3\nrootDir = '../../'\npath = rootDir + Util.getConfig('trials_folder_path')\ntmpPath = rootDir + Util.getConfig('tmp_path')\ndataFileNames = ['0a.json','0b.json','0c.json']\nlabels = [0, 1, 1] #['normal', 'hole', 'scallop']\nextractor = SimpleFeatureExtractor()\ndfAll = None\nfor index, dataFileName in enumerate(dataFileNames):\n df = extractor.getSimpleFeaturedData(path + dataFileName, labels[index])\n # print(len(df))\n print(path + dataFileName, labels[index],len(df))\n if dfAll is None:\n dfAll = df\n continue\n else:\n dfAll = dfAll.append(df)\n\nprint(len(dfAll))\n\ndfAll = dfAll[['x', 'y', 'z', 'label']]\ndata = dfAll.as_matrix()\nprint(data)\n\nprint('****************Start to run classifications***************')\nrand_data = copy.deepcopy(data)\nnp.random.shuffle(rand_data)\n# extract a stack of 28x28 bitmaps\nX_rand = rand_data[:,:len(data[0])-1]\ny_rand = rand_data[:,len(data[0])-1]\n\n# X_rand = digits[:, 0:784]\n# y_rand = digits[:, 784:785]\nheldout_len = int(len(X_rand)*0.8)\nx_train = X_rand[:heldout_len]\ny_train = y_rand[:heldout_len]\nx_test = X_rand[heldout_len:]\ny_test = y_rand[heldout_len:]\n# X = data[:,:3]\n# y = data[:,4]\n\nfor numTree in range(1,11):\n if(numTree %2 == 0):\n continue\n \"\"\"Random Forest\"\"\"\n from sklearn.ensemble import RandomForestClassifier\n rf_model = RandomForestClassifier(n_estimators=numTree)\n model = rf_model\n print('Random Forest(',numTree,'):')\n\n # \"\"\"Artificial Neural Network\"\"\"\n # from sklearn.neural_network import MLPClassifier\n # ann_model = MLPClassifier()\n # model = ann_model\n # print('ANN:')\n #\n # \"\"\"SVM\"\"\"\n # from sklearn.svm import SVC\n # svm_model = SVC()\n # model = svm_model\n # print('SVM:')\n\n model.fit(x_train,y_train)\n print('Training score: ',model.score(x_train,y_train))\n print('Testing score: ', model.score(x_test,y_test))\n\n from sklearn.metrics import classification_report\n y_true = y_test\n y_pred = model.predict(x_test)\n target_names = ['0', '1']\n print(classification_report(y_true, y_pred, target_names=target_names))\n\n\n # from sklearn.model_selection import cross_val_score\n # cross_val_score = cross_val_score(model, x_train, y_train, cv=10)\n # print(cross_val_score)\n","sub_path":"script/process/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"304692450","text":"import requests\nfrom lxml import etree\nfrom .util import xml_to_dict\n\ndef request_directory(client):\n # Create XML message\n root = etree.Element('DirectoryReq', nsmap={\n None: 'http://www.betaalvereniging.nl/iDx/messages/Merchant-Acquirer/1.0.0'\n })\n root.attrib['productID'] = 'NL:BVN:eMandatesCore:1.0'\n root.attrib['version'] = '1.0.0'\n root.append(client._timestamp())\n root.append(client._merchant(False))\n\n # Sign XML message and convert it to string\n data = client.sign_to_string(root)\n\n # Post signed XML message to directory endpoint\n r = requests.post(client.directory_url, data=data)\n\n if r.status_code >= 200 and r.status_code <= 399:\n # Parse XML response\n xml_result = etree.fromstring(r.text.encode('utf8'))\n\n # Verify response\n if not client.verify(xml_result):\n return {'is_error': True, 'error_code': 'SO100', 'error_message': 'Invalid response', 'error_detail': 'Signature verification failed'}\n\n # Convert XML object to dictionary\n result = xml_to_dict(xml_result)\n\n # Check for error\n if hasattr(result, 'error'):\n r = result['error']\n r['is_error'] = True\n return r\n\n # Parse response\n response = {\n 'is_error': False,\n 'timestamp': result['create_date_timestamp'],\n 'acquirer_id': result['acquirer']['acquirer_id'],\n 'countries': result['directory']['country']\n }\n\n if not isinstance(response['countries'], list):\n response['countries'] = [response['countries']]\n\n for country in response['countries']:\n country['name'] = country['country_names']\n del country['country_names']\n\n if not isinstance(country['issuer'], list):\n country['issuers'] = [country['issuer']]\n else:\n country['issuers'] = country['issuer']\n\n del country['issuer']\n\n return response\n else:\n # An HTTP error occurred\n return {'is_error': True, 'error_code': 'SO100', 'error_message': 'An unknown error occurred', 'error_detail': 'HTTP request returned error code'}\n","sub_path":"sepa_netherlands/directory.py","file_name":"directory.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"585353658","text":"import asyncio\nimport logging\n\nfrom django.utils import timezone\n\nfrom django_hpc_job_controller.client.core.messaging.message import Message\nfrom django_hpc_job_controller.client.scheduler.status import JobStatus\nfrom django_hpc_job_controller.server.utils import get_job_submission_lock, get_job_model_instance\n\n\n# Get the logger\nlogger = logging.getLogger(__name__)\n\n\nasync def handle_message(sock, token, queue, message):\n \"\"\"\n Handles an incoming message from a non file websocket\n\n :param sock: The socket that the message was received from\n :param token: The token used for this websocket connection\n :param queue: The queue to send messages on\n :param message: The raw message received\n :return: Nothing\n \"\"\"\n # Convert the raw message to a Message object\n msg = Message(data=message)\n\n # Get the message id\n msg_id = msg.pop_uint()\n\n if msg_id == Message.SUBMIT_JOB:\n # Acquire the job submission lock\n with get_job_submission_lock():\n # Clean up the django connection\n from django.db import connection\n connection.close()\n\n # Look up the job\n job = get_job_model_instance().objects.get(id=msg.pop_uint(), job_status=JobStatus.SUBMITTING)\n\n # Mark the job submitted\n job.job_status = JobStatus.SUBMITTED\n\n # Set the submitted time\n job.job_submitted_time = timezone.now()\n\n # Save the job\n job.save()\n\n elif msg_id == Message.UPDATE_JOB:\n # Clean up the django connection\n from django.db import connection\n connection.close()\n \n # Look up the job we are updating the status of\n job = get_job_model_instance().objects.get(id=msg.pop_uint())\n\n # Check that the jobs cluster matches the tokens cluster\n if job.cluster != token.cluster:\n logger.info(\"A different cluster ({} (id: {})) tried to update a job ({} (id: {})) it does not own!\".format(\n str(token.cluster), token.cluster.id, str(job), job.id\n ))\n return\n\n # Set the new status\n job.job_status = msg.pop_uint()\n\n # Set the extra details if there are any\n job.job_details = (job.job_details or '') + \"{}: New status: {}\\n{}\\n\\n\".format(\n timezone.now(), job.job_status, msg.pop_string() or 'No detail')\n\n # Check if we need to update various time stamps\n if job.job_status == JobStatus.QUEUED:\n job.job_queued_time = timezone.now()\n\n if job.job_status == JobStatus.RUNNING:\n if not job.job_queued_time:\n job.job_queued_time = timezone.now()\n job.job_running_time = timezone.now()\n\n if job.job_status in [JobStatus.CANCELLED, JobStatus.ERROR, JobStatus.WALL_TIME_EXCEEDED,\n JobStatus.OUT_OF_MEMORY, JobStatus.COMPLETED]:\n if not job.job_queued_time:\n job.job_queued_time = timezone.now()\n if not job.job_running_time:\n job.job_running_time = timezone.now()\n job.job_finished_time = timezone.now()\n\n # Save the job\n job.save()\n\n elif msg_id == Message.TRANSMIT_ASSURED_RESPONSE_WEBSOCKET_MESSAGE:\n # Create the socket\n from django_hpc_job_controller.server.settings import HPC_IPC_UNIX_SOCKET\n reader, writer = await asyncio.open_unix_connection(HPC_IPC_UNIX_SOCKET + \".\" + msg.pop_string())\n\n # Send the encapsulated message\n data = msg.pop_bytes()\n from django_hpc_job_controller.server.server import send_message_writer\n send_message_writer(data, writer, True)\n","sub_path":"server/cluster_manager.py","file_name":"cluster_manager.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"80932136","text":"import unittest\nimport os\n\nfrom interaction_engine.text_populator.variety_populator import VarietyPopulator\n\nfile1_path = 'file1.csv'\nfile1_contents = \"\"\"\nCode,Text\ngreeting,Hi\ngreeting,Hello\ngreeting,Hola\ngreeting-morning,Good morning\nquestion,Do you like green?\nquestion,Do you like dogs?\nquestion,Do you like apples?\nquestion,Do you like me?\nspecial_question,What is a special_question?\none-part-two,Foo\nthree-part-four,Foo\nfoo,foo\nfoo,fake\nfoobar,foo-bar\nfakebar,fake-bar\n\"\"\"\n\nfile2_path = 'file2.csv'\nfile2_contents = \"\"\"\nCode,Text\ngreeting,Bonjour\ngreeting,Hei\nnew,bar\n\"\"\"\n\nduplicate_entry_path = 'duplicate_entry.csv'\nduplicate_entry_contents = \"\"\"\nCode,Text\nfoo,foo\nfoo,foo\n\"\"\"\n\nfiles = (\n (file1_path, file1_contents),\n (file2_path, file2_contents),\n (duplicate_entry_path, duplicate_entry_contents),\n)\n\n\ndef write_file(path, contents):\n with open(path, 'w', newline='') as csvfile:\n csvfile.write(contents.strip())\n\n\ndef delete_file(path):\n os.remove(path)\n\n\nclass TestVarietyPopulator(unittest.TestCase):\n\n def setUp(self):\n for path, contents in files:\n write_file(path, contents)\n\n def tearDown(self):\n for path, _ in files:\n delete_file(path)\n\n def test_get_replacement(self):\n vp = VarietyPopulator(file1_path)\n num_entries = vp.get_num_variations('greeting')\n self.assertEqual(\n 3,\n num_entries\n )\n entries = []\n for i in range(num_entries):\n entries.append(vp.get_replacement(\n 'greeting',\n index=i,\n ))\n\n for _ in range(100):\n self.assertTrue(\n vp.get_replacement('greeting') in entries\n )\n\n def test_wrap_index_error(self):\n\n vp = VarietyPopulator(file1_path)\n num_entries = vp.get_num_variations('greeting')\n entries = []\n for i in range(num_entries):\n entries.append(vp.get_replacement(\n 'greeting',\n index=i,\n ))\n\n for i in range(100):\n self.assertEqual(\n entries[i % num_entries],\n vp.get_replacement(\n 'greeting',\n index=i,\n is_wrap_index=True,\n )\n )\n\n for i in range(num_entries + 1, 100):\n self.assertRaises(\n IndexError,\n vp.get_replacement,\n 'greeting',\n index=i,\n is_wrap_index=False,\n )\n\n def test_create_dict_from_file(self):\n variation_dict = VarietyPopulator._create_dict(file1_path)\n\n self.assertTrue('Code' not in variation_dict)\n self.assertTrue('Text' not in variation_dict)\n for k in ['greeting', 'question', 'foo', 'foobar', 'fakebar']:\n self.assertTrue(k in variation_dict)\n self.assertEqual(\n 3,\n len(variation_dict['greeting'])\n )\n self.assertEqual(\n 4,\n len(variation_dict['question'])\n )\n self.assertEqual(\n 'fake-bar',\n variation_dict['fakebar'][0]\n )\n\n def test_create_dict_from_multiple_files(self):\n\n variation_dict1 = VarietyPopulator._create_dict([file1_path, file2_path])\n\n variation_dict2 = VarietyPopulator._create_dict(file1_path)\n variation_dict2 = VarietyPopulator._create_dict(file2_path, variation_dict2)\n\n variation_dict3 = VarietyPopulator._create_dict(file2_path)\n variation_dict3 = VarietyPopulator._create_dict(file1_path, variation_dict3)\n\n for vd in [variation_dict1, variation_dict2, variation_dict3]:\n\n for k in ['greeting', 'question', 'foo', 'foobar', 'fakebar', 'new']:\n self.assertTrue(k in vd)\n self.assertEqual(\n 5,\n len(vd['greeting'])\n )\n\n def test_duplicate_items(self):\n self.assertRaises(\n ValueError,\n VarietyPopulator._create_dict,\n duplicate_entry_path\n )\n\n def test_wildcard_symbol(self):\n\n vp = VarietyPopulator(file1_path)\n\n tag = 'greeting*'\n num_entries = vp.get_num_variations(tag)\n self.assertEqual(\n 4,\n num_entries\n )\n entries = []\n for i in range(num_entries):\n entries.append(vp.get_replacement(\n tag,\n index=i,\n ))\n\n tag = '*question'\n num_entries = vp.get_num_variations(tag)\n self.assertEqual(\n 5,\n num_entries\n )\n entries = []\n for i in range(num_entries):\n entries.append(vp.get_replacement(\n tag,\n index=i,\n ))\n\n tag = '*part*'\n num_entries = vp.get_num_variations(tag)\n self.assertEqual(\n 2,\n num_entries\n )\n entries = []\n for i in range(num_entries):\n entries.append(vp.get_replacement(\n tag,\n index=i,\n ))\n\n def test_exception_on_wildcard_symbols(self):\n vp = VarietyPopulator(file1_path)\n\n for tag in ['abc.', 'waoh$$', '(what)']:\n self.assertRaises(\n ValueError,\n vp.get_num_variations,\n tag\n )\n self.assertRaises(\n ValueError,\n vp.get_replacement,\n tag\n )\n","sub_path":"tests/test_variety_populator.py","file_name":"test_variety_populator.py","file_ext":"py","file_size_in_byte":5537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"359430962","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Feb 23 21:56:00 2017\n\n@author: ankur\n\"\"\"\n\nimport os\nimport csv\nimport cv2\nimport matplotlib\nmatplotlib.use('Qt4Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom sklearn.model_selection import train_test_split\n\nfrom data_generator import generator\n\n##utility function to plot data\n\ndef plot_image_data(train_batch_X, train_batch_y):\n gs = gridspec.GridSpec(len(train_batch_y)//3, 3, hspace = 0.5, wspace=0.3)\n plt.figure(figsize=(5, len(train_batch_y)*1.5//3))\n\n for i in range(len(train_batch_X)):\n ax = plt.subplot(gs[i])\n #ax.tick_params('off')\n ax.imshow(cv2.cvtColor(train_batch_X[i], cv2.COLOR_BGR2RGB))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n title = train_batch_y[i]\n ax.set_title(title)\n plt.xticks([], [])\n plt.yticks([], [])\n plt.axis('off')\n plt.suptitle(\"Sample Images from generator\") \n plt.show()\n plt.savefig(\"generator.png\")\n \n \nsamples = []\ndata_dir = ['./'];\nfor training_dir in data_dir:\n if not os.path.isdir(training_dir):\n print(\"data directory doesn't exist\")\n\n csv_file = os.path.join(training_dir, 'driving_log.csv')\n if not os.path.isfile(csv_file):\n print(\"Could not find CSV file\")\n\n image_dir = os.path.join(training_dir, 'IMG')\n if not os.path.isdir(image_dir):\n print(\"Could not find image directory\")\n \n print(csv_file)\n with open(csv_file) as csvfile:\n reader = csv.reader(csvfile)\n for line in reader:\n samples.append(line)\n\ntrain_samples, validation_samples = train_test_split(samples, test_size=0.2, random_state=1200)\ntrain_samples = train_samples[5:6]\ntrain_generator = generator(train_samples, batch_size=1, drop_prob=0)\nplt.figure(figsize=(6, 2))\nprint(train_samples)\nplt.xticks([], [])\nplt.yticks([], [])\nplt.subplot(1,3,1)\nplt.imshow(cv2.cvtColor(cv2.imread(train_samples[0][1].strip()), cv2.COLOR_BGR2RGB))\ns = float(train_samples[0][3])+0.2;\nplt.title('Left: {}'.format(str(s)))\nplt.xticks([], [])\nplt.yticks([], [])\nplt.subplot(1,3,2)\nplt.imshow(cv2.cvtColor(cv2.imread(train_samples[0][0].strip()), cv2.COLOR_BGR2RGB))\ns = s-0.2;\nplt.title('Center: {}'.format(str(s)))\nplt.xticks([], [])\nplt.yticks([], [])\nplt.subplot(1,3,3)\nplt.imshow(cv2.cvtColor(cv2.imread(train_samples[0][2].strip()), cv2.COLOR_BGR2RGB))\ns = s-0.2;\nplt.title('Right: {}'.format(str(s)))\nplt.xticks([], [])\nplt.yticks([], [])\nplt.suptitle(\"Input to generator\") \nplt.show()\nplt.savefig(\"input.png\")\nvalidation_generator = generator(validation_samples, batch_size=1)\ntrain_batch_X, train_batch_y = next(train_generator)\nplot_image_data(train_batch_X, train_batch_y)","sub_path":"generator_test.py","file_name":"generator_test.py","file_ext":"py","file_size_in_byte":2742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"291668507","text":"\"\"\"\n# Criando um dicionário dentro de uma lista\nbrasil = []\nestado1 ={'uf':'Rio de janeiro','sigla':'RG'}\nestado2 ={'uf':'São Paulo','Sigla':'SP'}\nbrasil.append(estado1)\nbrasil.append(estado2)\nprint(brasil[0]['uf'])\n\"\"\"\npessoas = {'nome':'Italo','sexo':'m','idade':20}\n#print(f'O {pessoas[\"nome\"]} tem {pessoas[\"idade\"]} anos')\n#for k in pessoas.keys():\n# print(k)\n#for k in pessoas.values():\n# print(k)\n\n#for k, v in pessoas.items():\n# print(f'{k} = {v}')\n\nestado = dict()\nbrasil = list()\nfor c in range(0,3):\n estado['uf'] = str(input('Unidade Federativa: '))\n estado['sigla'] = str(input('Unidade Federativa: '))\n brasil.append(estado.copy())#serve para fazer uma copia sem ligações como [:] em listas\nprint(brasil)","sub_path":"Curso-Em-Video/Dicionarios/testes.py","file_name":"testes.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"610858436","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 14 10:02:45 2019\r\n\r\n@author: Matthew\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Oct 9 08:53:27 2019\r\n\r\n@author: Matthew\r\n\"\"\"\r\n\r\nfrom datetime import datetime\r\nimport matplotlib.pyplot as plt\r\nimport statistics\r\nimport csv\r\n\r\nfilename = r\"CSV Files\\activity2.csv\"\r\n\r\ndates, steps_data, missing_data = [], [], []\r\nmissing_data_count = 0\r\n\r\nwith open(filename) as file:\r\n \r\n thefile = csv.reader(file)\r\n headerRow = next(thefile)\r\n hold_steps = 0\r\n prev_date = \"\"\r\n \r\n for row in thefile:\r\n try:\r\n current_date = datetime.strptime(row[1], \"%Y-%m-%d\").date()\r\n steps_datum = eval(row[0])\r\n except:\r\n missing_data_count += 1\r\n missing_data.append(current_date)\r\n steps_datum = 0\r\n if current_date == prev_date:\r\n hold_steps += 0\r\n else:\r\n dates.append(current_date)\r\n steps_data.append(hold_steps)\r\n prev_date = current_date\r\n hold_steps = 0\r\n hold_steps += 0\r\n else:\r\n if current_date == prev_date:\r\n hold_steps += steps_datum\r\n else:\r\n dates.append(current_date)\r\n steps_data.append(hold_steps)\r\n prev_date = current_date\r\n hold_steps = 0\r\n hold_steps += steps_datum\r\n \r\n file.close()\r\n \r\n \r\n\r\nfig = plt.figure(dpi=100, figsize=(12, 7))\r\nplt.hist(x = steps_data, bins='auto', color='#0504aa', alpha=0.7)\r\nplt.title(\"Steps Taken in 2 Months\")\r\nplt.xlabel(\"Number of Steps Taken\")\r\nplt.ylabel(\"Frequency\")\r\n\r\nplt.savefig(r\"Figures\\Steps Each Day 2 (HISTOGRAM).png\")\r\n\r\nplt.show()\r\n\r\nprint(\"The number of missing data:\", missing_data_count)\r\n\r\nfor i in range(len(steps_data)-1):\r\n print(\"The total number of steps taken (Day %i):\" %(i+1), steps_data[i+1])\r\n \r\nprint(\"Average:\", statistics.mean(steps_data))\r\nprint(\"Median:\", statistics.median(steps_data))\r\n\r\n","sub_path":"Exercise_12 - CSV Files/Matthew ES - Steps Each Day 2 (HISTOGRAM).py","file_name":"Matthew ES - Steps Each Day 2 (HISTOGRAM).py","file_ext":"py","file_size_in_byte":2058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"528049113","text":"# Lisa Westover\n# CS1400 7 week\n# Unit4/Task3 Pattern.py\n\nimport turtle\nimport random\n\n\n# Erases all of the patterns and starts overdef reset\ndef reset():\n turtle.reset()\n turtle.speed(0)\n\n# configures turtle to draw quickly\n#congfigures turtle to have a 1000 x 800 window\ndef setup():\n turtle.speed(0)\n turtle.setup(1000,800)\n\n#draws a single rectangle given appropriate paramenters\ndef drawRectanglePattern(centerX, centerY, offset, width, height, count, rotation):\n turtle.penup()\n newHeading = 0\n turtle.goto(centerX, centerY)\n for i in range(0, count + 1):\n turtle.penup()\n turtle.setheading(newHeading)\n if offset >= 0:\n turtle.forward(offset)\n else:\n turtle.backward(offset)\n\n drawRectangle(width, height, rotation)\n #reset the x and y to draw the shape in a new location\n turtle.penup()\n turtle.goto(centerX, centerY)\n turtle.setheading(0)\n newHeading += int(360/count)\n turtle.setheading(newHeading)\n rotation += 360/count\n\ndef drawRectangle(width, height, rotation):\n\n setRandomColor()\n turtle.setheading(rotation)\n turtle.pendown()\n turtle.forward(height)\n turtle.setheading(rotation - 90)\n turtle.forward(width)\n turtle.setheading(rotation - 180)\n turtle.forward(height)\n turtle.setheading(rotation - 270)\n turtle.forward(width)\n\n# draws the circle in a circular pattern\ndef drawCirclePattern(centerX, centerY, offset, radius, count):\n newHeading = 0\n turtle.penup()\n turtle.goto(centerX, centerY)\n\n for i in range(int(count) + 1):\n turtle.penup()\n turtle.setheading(newHeading)\n if offset >= 0:\n turtle.penup()\n turtle.forward(offset)\n\n else:\n turtle.penup()\n turtle.backward(offset)\n turtle.right(90)\n turtle.pendown()\n setRandomColor()\n turtle.circle(radius)\n # reset the x and y to draw the shape in a new location\n turtle.penup()\n turtle.goto(centerX, centerY)\n turtle.setheading(0)\n newHeading += int(360 / count)\n turtle.setheading(newHeading)\n\n\n\n# radomly draws Rectangle and Circle patterns, each based on reasonable random values, some negative\ndef drawSuperPattern(num):\n randomNum = 0\n for i in range(int(num)):\n randomNum = random.randint(1, 2)\n if randomNum == 1:\n for i in range(7):\n randomeNum = random.randint(-300, 300)\n randomNum2 = random.randint(0, 200)\n drawRectanglePattern(randomNum, randomNum, randomNum, randomNum2, randomNum2, randomNum2, randomNum)\n else:\n randomeNum = random.randint(-300, 300)\n randomNum2 = random.randint(0, 200)\n drawCirclePattern(randNum, randNum, randNum, randNum2, randNum2)\n\n\n# not using any parameters, set turtle to draw in a random color\ndef setRandomColor():\n\n num = random.randint(0, 3)\n if num == 0:\n turtle.color(\"red\")\n elif num == 1:\n turtle.color(\"blue\")\n elif num == 2:\n turtle.color(\"green\")\n else:\n turtle.color(\"orange\")\n\n# called when user quits program, keeps turtle window open\ndef done():\n turtle.done()\n\n\n","sub_path":"Assignments/Westover-Lisa-Unit4/pattern.py","file_name":"pattern.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"598464006","text":"import urllib.request\r\nimport json\r\n\r\nfrom file_names import *\r\nfrom file_utils import append_to_file\r\n\r\nif __name__ == '__main__':\r\n print('Loading list of groups to file \\'' + GROUPS_FILE_NAME + '\\'')\r\n limit = 100\r\n offset = 0\r\n counter = 0\r\n while True:\r\n url = \"http://api.rozklad.org.ua/v2/groups/?filter={'limit':\" + str(limit) + \",'offset':\" + str(offset) + \"}\"\r\n\r\n req = urllib.request.Request(url)\r\n resp = urllib.request.urlopen(req)\r\n resp_data = resp.read()\r\n\r\n data = json.loads(resp_data.decode('utf-8'))\r\n data = data['data']\r\n\r\n if data is None: break\r\n\r\n for group in data:\r\n group_full_name = group['group_full_name']\r\n group_id = group['group_id']\r\n counter += 1\r\n try:\r\n print(\"{}) {}\".format(counter, group_full_name))\r\n except UnicodeEncodeError:\r\n print(str(counter) + \")\\t\" + \"UnicodeEncodeError\")\r\n\r\n append_to_file(GROUPS_FILE_NAME, (str(group_id) + '.' + group_full_name + '\\n'))\r\n\r\n offset += limit\r\n","sub_path":"DBfilling/create_list_of_groups.py","file_name":"create_list_of_groups.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"63570652","text":"# DB 연결\nimport pymysql\n\nconn = pymysql.connect(\n # private key\n)\n\ncurs = conn.cursor()\nprint(type(curs))\n\nsql = '''\nCREATE TABLE stock (\nstockcode varchar(255),\nstockname varchar(255),\ntime DATETIME,\nprice varchar(255),\nrate varchar(255))\n'''\ncurs.execute(sql)\nconn.commit()\nconn.close()\n","sub_path":"Stock_Data/db_stock.py","file_name":"db_stock.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"289631098","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport os\nimport re\nimport logging\nimport threading\nimport socket\nimport select\nimport time\nimport base64\n\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] %(levelname)s %(message)s', handlers=[logging.StreamHandler()])\n\n\nBLACKLOG = 5\nCR = chr(13) \nLF = chr(10) \nCRLF = CR + LF\n\nHTTP_STATUS_CODE = {\n\n 200: 'HTTP/1.1 200 OK',\n 201: 'HTTP/1.1 201 Created', \n 202: 'HTTP/1.1 202 Accepted',\n 203: 'HTTP/1.1 203 Non-Authoritative Information (since HTTP/1.1)',\n 204: 'HTTP/1.1 204 No Content',\n 205: 'HTTP/1.1 205 Reset Content',\n 206: 'HTTP/1.1 206 Partial Content',\n 207: 'HTTP/1.1 207 Multi-Status(WebDAV; RFC 4918)',\n 208: 'HTTP/1.1 208 Already Reported (WebDAV; RFC 5842)',\n 226: 'HTTP/1.1 226 IM Used (RFC 3229)',\n 300: 'HTTP/1.1 300 Multiple Choices',\n 301: 'HTTP/1.1 301 Moved Permanently',\n 302: 'HTTP/1.1 302 Found',\n 303: 'HTTP/1.1 303 See Other',\n 304: 'HTTP/1.1 304 Not Modified',\n 400: 'HTTP/1.1 400 Bad Request',\n 401: 'HTTP/1.1 401 Unauthorized',\n 403: 'HTTP/1.1 403 Forbidden',\n 404: 'HTTP/1.1 404 Not Found',\n 505: 'HTTP/1.1 505 HTTP Version Not Supported'\n\n }\n\n\n_current_date = lambda: time.strftime(\"%a, %d %b %Y %H:%M:%S\", time.localtime())\n\n\ndef gen_headers(**kwargs):\n\n code = kwargs.get('code', 200)\n\n headers = HTTP_STATUS_CODE[code] + '\\n'\n headers += 'Date: %s\\n'\n headers += 'Content-Type: %s\\n'\n headers += 'Server: HTTPServer\\n'\n headers += 'Connection: close\\n'\n headers += '\\n'\n\n\n headers = headers % (_current_date(), kwargs.get('content-type', 'text/html'))\n\n return headers\n\nclass HTTPRequestParser(object):\n\n def __init__(self, request):\n\n self.error_code = (None, None)\n self.error = 0\n\n self.method = None\n self.uri = None\n self.ver = None\n self.close_connection = 1\n self._parse_request(request)\n\n def set_error(self, code, error):\n self.error_code = (code, error)\n self.error = 1\n\n def _parse_request(self, request):\n request = request.decode('ascii').split('\\n', 1)\n requestline = request[0].split()\n if len(requestline) == 3:\n method, uri, ver = requestline\n if ver[:4] != 'HTTP':\n self.set_error(400, \"Bad request version (%r)\" % version)\n return False\n try:\n version = ver.split('/', 1)[1]\n version_number = version.split('.')\n if len(version_number) != 2:\n raise ValueError\n version_number = int(version_number[0]), int(version_number[1])\n except (ValueError, IndexError) as e:\n self.set_error(400, \"Bad request version\")\n return False\n if version_number == (1, 1):\n self.close_connection = 0\n else:\n self.set_error(505, \"HTTP Version Not Supported. (%r)\" % version)\n return False\n elif len(requestline) == 2:\n method, uri = requestline\n if method != 'GET':\n self.set_error(400, \"Bad request method\")\n return False\n else:\n self.set_error(400, \"Bad request syntax\")\n return False\n self.method = method\n self.uri = uri\n self.ver = version\n \n if len(request) == 2:\n headers = dict(re.findall(r\"(?P.*?): (?P.*?)\\r\\n\", request[1]))\n self.headers = headers\n return True\n\n\n\n\nclass HTTPServer(object):\n\n def __init__(self, host='', port=8080):\n\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP Socket\n self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.s.bind((host, port))\n self.s.listen(BLACKLOG)\n\n self.host = socket.gethostbyname(host)\n self.port = port\n\n self.routes = {}\n \n def route(self, *args, **kwargs):\n def decorator(func):\n for arg in args:\n self.routes[arg] = {'func': func, 'kwargs': dict(kwargs)}\n return func\n return decorator\n\n def serve(self, output=True):\n logger.propagate = output\n logger.info(\"Serving on %s port %s\" % (self.host, self.port))\n logger.info(\"Server is awaiting for connections..\")\n while 1:\n conn, addr = self.s.accept()\n logger.info(\"Connection from %s\" % ':'.join([str(element) for element in addr]))\n data = conn.recv(4096)\n request = HTTPRequestParser(data)\n if not request.error:\n connection_type = request.headers.get('Connection', 'close')\n response = ''\n\n if request.method == 'GET':\n match = None\n for pattern, dictionary in iter(self.routes.items()):\n match = re.match(pattern, request.uri)\n if match:\n headers = gen_headers(**dictionary['kwargs'])\n response += headers\n response += dictionary['func'](*match.groups())\n break\n if not match:\n response += gen_headers(code=404)\n response += \"404 Not Found

Not found

The requested URL %s was not found on this server.

\" % (request.uri)\n\n if response:\n conn.sendall(bytes(response, 'UTF-8'))\n\n if connection_type == 'close':\n conn.close()\n\n conn.close() # Close anyway\n self.s.close()\n\n\n\n\n\n \n\n\n\n\n\n","sub_path":"HTTPServ/HTTPServer.py","file_name":"HTTPServer.py","file_ext":"py","file_size_in_byte":6179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"414327222","text":"# This function test verifys image preprocessing, imsub & imsub thresholding, contouring, camera, and pixelwise scannign\n\nfrom piGantry.imageProcess import cameraFunc as camera\nfrom piGantry.piSerial import mathFunc as mathFunc\nimport cv2\nimport time\nimport json\n\n\n\n# Set width & height to high value to set resolution as max \ncam = camera.cameraObj(10000, 10000, -3)\n\ndef main():\n \n while True:\n try:\n img = cam.grabFrame()\n if img is not None:\n cv2.imwrite(\"tests/testImg.png\", img)\n break\n except Exception as e:\n print(e)\n print(\"camera done\")\n # Release cam only when photo taking is done\n cam.cam.release()\n\n\n t3 = time.perf_counter()\n\n # Read images from data set\n exposureVal = 4\n frameVal = 4\n\n img2 = cv2.imread(\"imgData/comparisonExternalPSU/22.56v_{0}/opencv_frame_{1}.png\".format(exposureVal, frameVal))\n img3 = cv2.imread(\"imgData/comparisonExternalPSU/22.81v_{0}/opencv_frame_{1}.png\".format(exposureVal, frameVal))\n img4 = cv2.imread(\"imgData/comparisonExternalPSU/23.17v_{0}/opencv_frame_{1}.png\".format(5, frameVal))\n img5 = cv2.imread(\"imgData/comparisonExternalPSU/23.56v_{0}/opencv_frame_{1}.png\".format(5, frameVal))\n\n # print(\"compareImg far 1 & 2 {0}\\n\".format(camera.compareImg(img2,img3, 0.005)))\n\n # Image preprocessing\n t1 = time.perf_counter()\n procImg = camera.preProcImg(img2)\n procImg2 = camera.preProcImg(img3)\n procImg3 = camera.preProcImg(img4)\n procImg4 = camera.preProcImg(img5)\n t2 = time.perf_counter()\n\n print(t2)\n\n # Pixelwise operations\n def pixelScan(img):\n print(\"\\npixel start\\n\")\n t1 = time.perf_counter()\n # Calling on actual imageProcess module's pixelWiseScan function\n pixelWise=camera.pixelWiseScan(img, 1, 50)\n t2 = time.perf_counter()\n print(\"pixel end in {0} \\n pixel: {1} \\n dots: {2}\\n\".format((t2-t1),pixelWise,len(pixelWise)))\n\n # Contouring \n def contourScan(img, origImg, imgName, minArea, maxArea, exemptArea):\n print(\"\\ncontour1 start\\n\")\n t1 = time.perf_counter()\n # Calling on actual imageProcess module's contour function\n contours=camera.retContour(img, origImg, minArea, maxArea, exemptArea, \"{}Contour.png\".format(imgName))\n t2 = time.perf_counter()\n print(\"contour end in {0} \\n contour: {1} \\n dots: {2}\\n\".format((t2-t1),contours[0],len(contours[0])))\n # Plot best fit line\n mathFunc.bestFitPoly(contours[1][1], contours[1][0], 4, origImg)\n return contours[0]\n\n\n contourList = [contourScan(procImg, img2, \"22.56v_5\", 5, 50, 100), \n contourScan(procImg2, img3, \"22.81v_5\", 1, 50, 100),\n contourScan(procImg3, img4, \"23.17v_5\", 5, 50, 100), \n contourScan(procImg4, img5, \"23.56v_5\", 1, 50, 100)]\n\n for i in enumerate(contourList):\n with open(\"dataContour{0}.json\".format(i[0]), \"w\") as write_file:\n json.dump(i[1], write_file, indent=4)\n \nmain()","sub_path":"tests/dotTest.py","file_name":"dotTest.py","file_ext":"py","file_size_in_byte":3055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"276008291","text":"import io\nimport os.path\nimport re\nimport slackbot_settings\nfrom slacker import Slacker\nfrom requests import ReadTimeout\n\n\nslackapi = Slacker(slackbot_settings.API_TOKEN)\n\nusers = {}\n\ndef get_user(user_id):\n if user_id not in users:\n response = slackapi.users.info(user_id)\n users[user_id] = response.body[\"user\"]\n return users[user_id]\n\ndef get_channel(channel_id):\n response = slackapi.channels.info(channel_id)\n return response.body[\"channel\"]\n\ndef get_message(channel_id, ts):\n latest = str(float(ts) + 0.000001)\n oldest = str(float(ts) - 0.000001)\n response = slackapi.channels.history(channel=channel_id, latest=latest, oldest=oldest, count=1)\n if response.body[\"messages\"]:\n return response.body[\"messages\"][0]\n return None\n\ndef get_user_mame(user_id):\n emoji = re.compile(r\":.+?:\")\n user = get_user(user_id)\n return emoji.sub('', user[\"profile\"][\"real_name\"])\n\ndef get_channel_name(channel_id):\n channel = get_channel(channel_id)\n return channel[\"name\"]\n\ndef get_channel_tag(channel_id):\n return \"<#{0}|{1}>\".format(channel_id, get_channel_name(channel_id))\n\ndef post_image(message, pillow_image, title=None, comment=None, file_name=None):\n def filename_to_filetype(file_name):\n root, ext = os.path.splitext(file_name or 'sample.png')\n file_type = ext[1:] if ext else 'png'\n return file_type if file_type != 'jpg' else 'jpeg'\n\n output = io.BytesIO()\n pillow_image.save(output, filename_to_filetype(file_name), quality=100)\n data = {\n 'filename': file_name,\n 'title': title,\n 'initial_comment': comment,\n 'channels': message.body['channel']\n }\n files = {\n \"file\": output.getvalue()\n }\n try:\n slackapi.files.post('files.upload', data=data, files=files)\n except ReadTimeout as e:\n message.send(\"slackの調子が少し悪いみたいですね...\")\n\n\n","sub_path":"plugins/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"251337190","text":"import os\nimport pytest\nimport requests\nfrom dotenv import find_dotenv, load_dotenv\nfrom app import create_app\n\n\nclass MockResponse(object):\n def __init__(self, json):\n self.response_json = json\n\n def json(self):\n return self.response_json\n\n\n@pytest.fixture\ndef client():\n # Use our test integration config instead of the 'real' version\n file_path = find_dotenv('.env.test')\n\n # Record existing env variable values\n existing_values = {\n \"TRELLO_TOKEN\": os.environ.get(\"TRELLO_TOKEN\", \"\"),\n \"TRELLO_KEY\": os.environ.get(\"TRELLO_KEY\", \"\"),\n \"TRELLO_BOARD_NAME\": os.environ.get(\"TRELLO_BOARD_NAME\", \"\"),\n \"TRELLO_BOARD_ID\": os.environ.get(\"TRELLO_BOARD_ID\", \"\"),\n }\n\n load_dotenv(file_path, override=True)\n\n # Create the new app.\n test_app = create_app('test-todo-list-id', 'test-doing-list-id', 'test-done-list-id')\n\n # Use the app to create a test_client that can be used in our tests.\n test_app.testing = True\n with test_app.test_client() as client:\n yield client\n\n # Restore proper config\n file_path = find_dotenv('.env')\n if file_path:\n load_dotenv(file_path, override=True)\n else:\n for env_var_name in existing_values:\n os.environ[env_var_name] = existing_values[env_var_name]\n\n\ndef test_index_page(monkeypatch, client):\n\n # Arrange\n def mock_get(url, params):\n assert url == f'https://api.trello.com/1/boards/trello-board-id/cards'\n assert params['key'] == 'trello-key'\n assert params['token'] == 'trello-token'\n return MockResponse(\n [\n {\n \"name\": \"test-item-1\",\n \"id\": \"test-id-1\",\n \"desc\": \"test-description-1\",\n \"due\": \"2020-12-25T12:00:00.000000Z\",\n \"dateLastActivity\": \"2020-10-25T12:00:00.000000Z\",\n \"idList\": \"test-todo-list-id\"\n },\n {\n \"name\": \"test-item-2\",\n \"id\": \"test-id-2\",\n \"desc\": \"test-description-2\",\n \"due\": \"2020-12-26T12:00:00.000000Z\",\n \"dateLastActivity\": \"2020-10-26T12:00:00.000000Z\",\n \"idList\": \"test-doing-list-id\"\n }\n ])\n\n\n monkeypatch.setattr(requests, \"get\", mock_get)\n\n # Act\n response = client.get('/')\n\n # Assert\n assert response.status_code == 200\n\n decoded_response = response.data.decode('utf-8')\n\n assert \"test-item-1\" in decoded_response\n assert \"test-item-2\" in decoded_response\n","sub_path":"tests/test_integration.py","file_name":"test_integration.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"390209990","text":"class Solution:\n def minPathSum(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n rows = len(grid)\n cols = len(grid[0])\n \n for col in range(1,cols):\n grid[0][col] += grid[0][col-1]\n \n for row in range(1,rows):\n grid[row][0] += grid[row-1][0]\n \n for row in range(1,rows):\n for col in range(1,cols):\n grid[row][col] += min(grid[row-1][col], grid[row][col-1])\n \n return grid[-1][-1]\n ","sub_path":"python3/064_minimum_path_sum.py","file_name":"064_minimum_path_sum.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"195724259","text":"#coding: utf-8\nimport math\nimport time\n#import numpy as np\nimport numpy.random as npr\nimport cupy as cp #GPUを使うためのnumpy\nimport chainer \nfrom chainer import cuda, Function, Variable, optimizers\nfrom chainer import Link, Chain\nimport chainer.functions as F\nimport chainer.links as L\n\nfrom NNFP import load_data \nfrom NNFP import result_plot \nfrom NNFP import normalize_array\nfrom NNFP import Deep_neural_network\nfrom NNFP import Finger_print\n\n\ntask_params = {'target_name' : 'measured log solubility in mols per litre',\n\t\t\t\t'data_file' : 'delaney.csv'}\n\nN_train = 70\nN_val = 1\nN_test = 10\n\n\nmodel_params = dict(fp_length = 50, \n\t\t\t\t\tfp_depth = 4, #NNの層と、FPの半径は同じ\n\t\t\t\t\tconv_width = 20, #必要なパラメータはこれだけ(?)\n\t\t\t\t\th1_size = 100, #最上位の中間層のサイズ\n\t\t\t\t\tL2_reg = cp.exp(-2))\n\ntrain_params = dict(num_iters = 100,\n\t\t\t\t\tbatch_size = 50,\n\t\t\t\t\tinit_scale = cp.exp(-4),\n\t\t\t\t\tstep_size = cp.exp(-6))\n\n\t\nclass Main(Chain):\n\tdef __init__(self, model_params):\n\t\tsuper(Main, self).__init__(\n\t\t\tfp = Finger_print.FP(model_params),\n\t\t\tdnn = Deep_neural_network.DNN(model_params),\n\t\t)\n\t\n\tdef __call__(self, x, y):\n\t\tt = time.time()\n\t\ty = Variable(cp.array(y, dtype=cp.float32))\n\t\tprint(\"variable : \", time.time() - t)\n\t\tpred = self.prediction(x)\n\t\treturn F.mean_squared_error(pred, y)\n\n\tdef prediction(self, x):\n\t\tx = Variable(cuda.to_cpu(x))\n\t\tfinger_print = self.fp(x)\n\t\tpred = self.dnn(finger_print)\n\t\treturn pred\n\n\tdef mse(self, x, y, undo_norm):\n\t\ty = Variable(cp.array(y, dtype=cp.float32))\n\t\tpred = undo_norm(self.prediction(x))\n\t\treturn F.mean_squared_error(pred, y)\n\t\ndef train_nn(model, train_smiles, train_raw_targets, seed=0,\n\t\t\t\tvalidation_smiles=None, validation_raw_targets=None):\n\n\tnum_print_examples = N_train\n\ttrain_targets, undo_norm = normalize_array(train_raw_targets)\n\ttraining_curve = []\n\toptimizer = optimizers.Adam()\n\toptimizer.setup(model)\n\toptimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))\t\n\t\n\tnum_epoch = 100\n\tnum_data = len(train_smiles)\n\tbatch_size = 50\n\tx = train_smiles\n\ty = train_targets\n\tsff_idx = npr.permutation(num_data)\n\tTIME = time.time()\n\tfor epoch in range(num_epoch):\n\t\tepoch_time = time.time()\n\t\tfor idx in range(0,num_data, batch_size):\n\t\t\tbatched_x = x[sff_idx[idx:idx+batch_size\n\t\t\t\tif idx + batch_size < num_data else num_data]]\n\t\t\tbatched_y = y[sff_idx[idx:idx+batch_size\n\t\t\t\tif idx + batch_size < num_data else num_data]]\n\t\t\tupdate_time\t = time.time()\n\t\t\tmodel.zerograds()\n\t\t\tloss = model(batched_x, batched_y)\n\t\t\tloss.backward()\n\t\t\toptimizer.update()\n\t\t\tprint(\"UPDATE TIME : \", time.time() - update_time)\n\t\t#print \"epoch \", epoch, \"loss\", loss._data[0]\n\t\tif epoch % 10 == 0:\n\t\t\tprint_time = time.time()\n\t\t\ttrain_preds = model.mse(train_smiles, train_raw_targets, undo_norm)\n\t\t\tcur_loss = loss._data[0]\n\t\t\ttraining_curve.append(cur_loss)\n\t\t\tprint(\"PRINT TIME : \", time.time() - print_time)\n\t\t\tprint(\"Iteration\", epoch, \"loss\", math.sqrt(cur_loss), \\\n\t\t\t\t\"train RMSE\", math.sqrt((train_preds._data[0])))\n\t\t\tif validation_smiles is not None:\n\t\t\t\tvalidation_preds = model.mse(validation_smiles, validation_raw_targets, undo_norm)\n\t\t\t\tprint(\"Validation RMSE\", epoch, \":\", math.sqrt((validation_preds._data[0])))\n\t\tprint(\"1 EPOCH TIME : \", time.time() - epoch_time)\n\t\t#print loss\n\n\t\t\n\treturn model, training_curve, undo_norm\n\ndef main():\n\tprint(\"Loading data...\")\n\ttraindata, valdata, testdata = load_data(\n\t\ttask_params['data_file'], (N_train, N_val, N_test),\n\t\tinput_name = 'smiles', target_name = task_params['target_name'])\n\tx_trains, y_trains = traindata\n\tx_vals, y_vals = valdata\n\tx_tests, y_tests = testdata\n\tx_trains = cp.reshape(x_trains, (N_train, 1))\n\ty_trains = cp.reshape(y_trains, (N_train, 1)).astype(cp.float32)\n\tx_vals = cp.reshape(x_vals, (N_val, 1))\n\ty_vals = cp.reshape(y_vals, (N_val, 1)).astype(cp.float32)\n\tx_tests = cp.reshape(x_tests, (N_test, 1))\n\ty_tests = cp.reshape(y_tests, (N_test, 1)).astype(cp.float32)\n\n\tdef run_conv_experiment():\n\t\t'''Initialize model'''\n\t\tNNFP = Main(model_params) \n\t\toptimizer = optimizers.Adam()\n\t\toptimizer.setup(NNFP)\n\n\t\tgpu_device = 0\n\t\tcuda.get_device(gpu_device).use()\n\t\tNNFP.to_gpu(gpu_device)\n\t\t#xp = cuda.cupy\n\t\t'''Learn'''\n\t\ttrained_NNFP, conv_training_curve, undo_norm = \\\n\t\t\ttrain_nn(NNFP, \n\t\t\t\t\t x_trains, y_trains, \n\t\t\t\t\t validation_smiles=x_vals, \n\t\t\t\t\t validation_raw_targets=y_vals)\n\t\treturn math.sqrt(trained_NNFP.mse(x_tests, y_tests, undo_norm)._data[0]), conv_training_curve\n\n\tprint(\"Starting neural fingerprint experiment...\")\n\ttest_loss_neural, conv_training_curve = run_conv_experiment()\n\tprint() \n\tprint(\"Neural test RMSE\", test_loss_neural)\n\t#result_plot(conv_training_curve, train_params)\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"Graduation-thesis/NNFP_chainer/regression_gpu/chainer_regression.py","file_name":"chainer_regression.py","file_ext":"py","file_size_in_byte":4742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"518763301","text":"import numpy as np\n\ndef HeartSpecific(inF1, inF2):\n ouFile = open(inF2 + '_LungTop1', 'w')\n S = {}\n inFile = open(inF1)\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n tissue = fields[1].split(' - ')[0]\n sample = fields[0]\n S[sample] = tissue\n inFile.close()\n\n inFile = open(inF2)\n head = inFile.readline().strip()\n ouFile.write(head + '\\n')\n head = head.split('\\t')\n for line in inFile:\n D = {}\n line = line.strip()\n fields = line.split('\\t')\n gene = fields[0] + '\\t' + fields[1]\n for n in range(2, len(head)):\n tissue = S[head[n]]\n D.setdefault(tissue, [])\n D[tissue].append(float(fields[n]))\n d = D.items()\n d.sort(lambda x,y:cmp(np.median(x[1]), np.median(y[1])), reverse=True)\n Top1 = [d[0][0]]\n Top3 = [d[0][0], d[1][0], d[2][0]]\n #Top5 = [d[0][0], d[1][0], d[2][0], d[3][0], d[4][0]]\n if 'Lung' in Top1: \n ouFile.write(line + '\\n')\n inFile.close()\n ouFile.close()\n\n\nHeartSpecific('GTEx_Sample2Tissue', 'GTEx-Gene_Counts-Norm-ProteinCoding')\n","sub_path":"Data/GTEx/LungSpecific/01-HeartSpecific.py","file_name":"01-HeartSpecific.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"322222322","text":"from __future__ import print_function\n\nimport glob\nimport os\nimport shutil\nimport time\n\nimport numpy as np\n\nfrom lensit.pbs import pbs\nfrom lensit.ffs_deflect import ffs_deflect\nfrom lensit.ffs_qlms import qlms as ql\nfrom lensit.ffs_covs import ffs_specmat, ffs_cov\nfrom lensit.misc.misc_utils import PartialDerivativePeriodic as PDP, cl_inverse\nfrom lensit.ffs_iterators import bfgs\nfrom lensit.qcinv import multigrid, chain_samples, ffs_ninv_filt_ideal_nufft as filtr\nfrom lensit.sims import ffs_phas\n\n\n_types = ['T', 'QU', 'TQU']\n\n\ndef prt_time(dt, label=''):\n dh = np.floor(dt / 3600.)\n dm = np.floor(np.mod(dt, 3600.) / 60.)\n ds = np.floor(np.mod(dt, 60))\n print(\"\\r [\" + ('%02d:%02d:%02d' % (dh, dm, ds)) + \"] \" + label)\n return\n\n\nclass ffs_iterator(object):\n r\"\"\"Flat-sky iterator template class\n\n Args:\n lib_dir: many things will be written there\n typ: 'T', 'QU' or 'TQU' for estimation on temperature data, polarization data or jointly\n filt: inverse-variance filtering instance (e.g. *lensit.qcinv.ffs_ninv_filt* )\n dat_maps: data maps or path to maps.\n lib_qlm: lib_alm (*lensit.ffs_covs.ell_mat.ffs_alm*) instance describing the lensing estimate Fourier arrays\n Plm0: Starting point for the iterative search. alm array consistent with *lib_qlm*\n H0: initial isotropic likelihood curvature approximation (roughly, inverse lensing noise bias :math:`N^{(0)}_L`)\n cpp_prior: fiducial lensing power spectrum, used for the prior part of the posterior density.\n chain_descr: multigrid conjugate gradient inversion chain description\n nufft_epsilon: precision of the nufft interpolation\n\n\n \"\"\"\n def __init__(self, lib_dir, typ, filt: filtr, dat_maps, lib_qlm, Plm0, H0, cpp_prior,\n use_Pool_lens=0, use_Pool_inverse=0, chain_descr=None, opfilt=None, soltn0=None, cache_magn=False,\n no_deglensing=False, NR_method=100, tidy=10, verbose=True, maxcgiter=150, PBSSIZE=None, PBSRANK=None,\n nufft_epsilon=1e-7,\n **kwargs):\n\n assert typ in _types\n assert chain_descr is not None\n assert opfilt is not None\n assert filt.lib_skyalm.lsides == lib_qlm.lsides\n\n\n self.PBSSIZE = pbs.size if PBSSIZE is None else PBSSIZE\n self.PBSRANK = pbs.rank if PBSRANK is None else PBSRANK\n assert self.PBSRANK < self.PBSSIZE, (self.PBSRANK, self.PBSSIZE)\n self.barrier = (lambda: 0) if self.PBSSIZE == 1 else pbs.barrier\n\n self.type = typ\n self.lib_dir = lib_dir\n self.dat_maps = dat_maps\n\n self.chain_descr = chain_descr\n self.opfilt = opfilt\n self.cl_pp = cpp_prior\n self.lib_qlm = lib_qlm\n\n self.cache_magn = cache_magn\n\n self.lsides = filt.lib_skyalm.lsides\n self.lmax_qlm = self.lib_qlm.ellmax\n self.NR_method = NR_method\n\n self.tidy = tidy\n self.maxiter = maxcgiter\n self.verbose = verbose\n\n self.nodeglensing = no_deglensing\n if self.verbose:\n print(\" I see t\", filt.Nlev_uKamin('t'))\n print(\" I see q\", filt.Nlev_uKamin('q'))\n print(\" I see u\", filt.Nlev_uKamin('u'))\n\n # Defining a trial newton step length :\n\n def newton_step_length(it, norm_incr): # FIXME\n # Just trying if half the step is better for S4 QU\n if filt.Nlev_uKamin('t') > 2.1: return 1.0\n if filt.Nlev_uKamin('t') <= 2.1 and norm_incr >= 0.5:\n return 0.5\n return 0.5\n\n self.newton_step_length = newton_step_length\n self.soltn0 = soltn0\n self.nufft_epsilon = nufft_epsilon\n\n assert hasattr(filt, 'f')\n self.cov = filt\n if self.PBSRANK == 0:\n if not os.path.exists(self.lib_dir): os.makedirs(self.lib_dir)\n self.barrier()\n\n self.soltn_cond = True\n print('ffs iterator : This is %s trying to setup %s' % (self.PBSRANK, lib_dir))\n # Lensed covariance matrix library :\n # We will redefine the displacement at each iteration step\n self.use_Pool = use_Pool_lens\n self.use_Pool_inverse = use_Pool_inverse\n\n if self.PBSRANK == 0: # FIXME : hash and hashcheck\n if not os.path.exists(self.lib_dir):\n os.makedirs(self.lib_dir)\n if not os.path.exists(self.lib_dir + '/MAPlms'):\n os.makedirs(self.lib_dir + '/MAPlms')\n if not os.path.exists(self.lib_dir + '/cghistories'):\n os.makedirs(self.lib_dir + '/cghistories')\n\n # pre_calculation of qlm_norms with rank 0:\n if self.PBSRANK == 0 and \\\n (not os.path.exists(self.lib_dir + '/qlm_%s_H0.dat' % ('P'))\n or not os.path.exists(self.lib_dir + '/%shi_plm_it%03d.npy' % ('P', 0))):\n print('++ ffs_%s_iterator: Caching qlm_norms and N0s' % typ + self.lib_dir)\n\n # Caching qlm norm that we will use as zeroth order curvature : (with lensed weights)\n # Prior curvature :\n # Gaussian priors\n prior_pp = cl_inverse(self.cl_pp[0:self.lmax_qlm + 1])\n prior_pp[0] *= 0.5\n\n curv_pp = H0 + prior_pp # isotropic estimate of the posterior curvature at the starting point\n self.cache_cl(self.lib_dir + '/qlm_%s_H0.dat' % ('P'), cl_inverse(curv_pp))\n print(\" cached %s\" % self.lib_dir + '/qlm_%s_H0.dat' % 'P')\n fname_P = self.lib_dir + '/%shi_plm_it%03d.npy' % ('P', 0)\n self.cache_qlm(fname_P, self.load_qlm(Plm0))\n self.barrier()\n\n if not os.path.exists(self.lib_dir + '/Hessian') and self.PBSRANK == 0:\n os.makedirs(self.lib_dir + '/Hessian')\n # We store here the rank 2 updates to the Hessian according to the BFGS iterations.\n\n if not os.path.exists(self.lib_dir + '/history_increment.txt') and self.PBSRANK == 0:\n with open(self.lib_dir + '/history_increment.txt', 'w') as file:\n file.write('# Iteration step \\n' +\n '# Exec. time in sec.\\n' +\n '# Increment norm (normalized to starting point displacement norm) \\n' +\n '# Total gradient norm (all grad. norms normalized to initial total gradient norm)\\n' +\n '# Quad. gradient norm\\n' +\n '# Det. gradient norm\\n' +\n '# Pri. gradient norm\\n' +\n '# Newton step length\\n')\n file.close()\n\n if self.PBSRANK == 0: print('++ ffs_%s masked iterator : setup OK' % type)\n self.barrier()\n\n def get_mask(self):\n ret = np.ones(self.cov.lib_datalm.shape, dtype=float)\n ret[np.where(self.cov.ninv_rad <= 0.)] *= 0\n return ret\n\n def get_datmaps(self):\n return np.load(self.dat_maps) if isinstance(self.dat_maps, str) else self.dat_maps\n\n def cache_qlm(self, fname, alm, pbs_rank=None):\n if pbs_rank is not None and self.PBSRANK != pbs_rank:\n return\n else:\n assert self.load_qlm(alm).ndim == 1 and self.load_qlm(alm).size == self.lib_qlm.alm_size\n print('rank %s caching ' % self.PBSRANK + fname)\n self.lib_qlm.write_alm(fname, self.load_qlm(alm))\n return\n\n def load_qlm(self, fname):\n return self.lib_qlm.read_alm(fname) if isinstance(fname, str) else fname\n\n def cache_rlm(self, fname, rlm):\n assert rlm.ndim == 1 and rlm.size == 2 * self.lib_qlm.alm_size, (rlm.ndim, rlm.size)\n print('rank %s caching ' % self.PBSRANK, fname)\n np.save(fname, rlm)\n\n def load_rlm(self, fname):\n rlm = np.load(fname)\n assert rlm.ndim == 1 and rlm.size == 2 * self.lib_qlm.alm_size, (rlm.ndim, rlm.size)\n return rlm\n\n @staticmethod\n def cache_cl(fname, cl):\n assert cl.ndim == 1\n np.savetxt(fname, cl)\n\n @staticmethod\n def load_cl(fname):\n assert os.path.exists(fname), fname\n return np.loadtxt(fname)\n\n def get_H0(self, key):\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n fname = os.path.join(self.lib_dir, 'qlm_%s_H0.dat' % key.upper())\n assert os.path.exists(fname), fname\n return self.load_cl(fname)\n\n def is_previous_iter_done(self, it, key):\n if it == 0: return True\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n fn = os.path.join(self.lib_dir, '%s_plm_it%03d.npy' % ({'p': 'Phi', 'o': 'Om'}[key.lower()], it - 1))\n return os.path.exists(fn)\n\n\n def how_many_iter_done(self, key):\n \"\"\" Returns the number of points already calculated. 0th is the qest.\n\n \"\"\"\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n fn = os.path.join(self.lib_dir, '%s_plm_it*.npy' % {'p': 'Phi', 'o': 'Om'}[key.lower()])\n return len( glob.glob(fn))\n\n def get_Plm(self, it, key):\n \"\"\"Loads solution at iteration *it*\n\n \"\"\"\n if it < 0:\n return np.zeros(self.lib_qlm.alm_size, dtype=complex)\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n fn = os.path.join(self.lib_dir,'%s_plm_it%03d.npy' % ({'p': 'Phi', 'o': 'Om'}[key.lower()], it))\n assert os.path.exists(fn), fn\n return self.load_qlm(fn)\n\n def get_Phimap(self, it, key):\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n return self.lib_qlm.alm2map(self.get_Plm(it, key))\n\n def _getfnames_f(self, key, it):\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n fname_dx = os.path.join(self.lib_dir, 'f_%s_it%03d_dx.npy' % (key.lower(), it))\n fname_dy = os.path.join(self.lib_dir, 'f_%s_it%03d_dy.npy' % (key.lower(), it))\n return fname_dx, fname_dy\n\n def _getfnames_finv(self, key, it):\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n fname_dx = os.path.join(self.lib_dir, 'finv_%s_it%03d_dx.npy' % (key.lower(), it))\n fname_dy = os.path.join(self.lib_dir, 'finv_%s_it%03d_dy.npy' % (key.lower(), it))\n return fname_dx, fname_dy\n\n def _calc_ffinv(self, it, key):\n \"\"\"Calculates displacement at iter and its inverse. Only mpi rank 0 can do this.\n\n \"\"\"\n assert self.PBSRANK == 0, 'NO MPI METHOD'\n if it < 0: return\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n fname_dx, fname_dy = self._getfnames_f(key, it)\n if not os.path.exists(fname_dx) or not os.path.exists(fname_dy):\n lib_dir = os.path.join(self.lib_dir, 'f_%04d_libdir' % it)\n if not os.path.exists(lib_dir): os.makedirs(lib_dir)\n # FIXME : does this from plm\n assert self.is_previous_iter_done(it, key)\n Phi_est_WF = self.get_Phimap(it, key)\n assert self.cov.lib_skyalm.shape == Phi_est_WF.shape\n assert self.cov.lib_skyalm.shape == self.lib_qlm.shape\n assert self.cov.lib_skyalm.lsides == self.lib_qlm.lsides\n rmin = np.array(self.cov.lib_skyalm.lsides) / np.array(self.cov.lib_skyalm.shape)\n print('rank %s caching displacement comp. for it. %s for key %s' % (self.PBSRANK, it, key))\n if key.lower() == 'p':\n dx = PDP(Phi_est_WF, axis=1, h=rmin[1])\n dy = PDP(Phi_est_WF, axis=0, h=rmin[0])\n else:\n dx = -PDP(Phi_est_WF, axis=0, h=rmin[0])\n dy = PDP(Phi_est_WF, axis=1, h=rmin[1])\n if self.PBSRANK == 0:\n np.save(fname_dx, dx)\n np.save(fname_dy, dy)\n del dx, dy\n return\n\n def _load_f(self, it, key):\n \"\"\"Loads current displacement solution at iteration iter\n\n \"\"\"\n fname_dx, fname_dy = self._getfnames_f(key, it)\n lib_dir = os.path.join(self.lib_dir, 'f_%04d_libdir' % it)\n assert os.path.exists(fname_dx), fname_dx\n assert os.path.exists(fname_dx), fname_dy\n assert os.path.exists(lib_dir), lib_dir\n return ffs_deflect.ffs_displacement(fname_dx, fname_dy, self.lsides,\n verbose=(self.PBSRANK == 0), lib_dir=lib_dir, cache_magn=self.cache_magn, nuffteps=self.nufft_epsilon)\n\n\n def load_soltn(self, it, key):\n assert key.lower() in ['p', 'o']\n for i in np.arange(it, -1, -1):\n fname = os.path.join(self.lib_dir, 'MAPlms/Mlik_%s_it%s.npy' % (key.lower(), i))\n if os.path.exists(fname):\n print(\"rank %s loading \" % pbs.rank + fname)\n return np.load(fname)\n if self.soltn0 is not None: return np.load(self.soltn0)[:self.opfilt.TEBlen(self.type)]\n return np.zeros((self.opfilt.TEBlen(self.type), self.cov.lib_skyalm.alm_size), dtype=complex)\n\n def _cache_tebwf(self, TEBMAP, it, key):\n assert key.lower() in ['p', 'o']\n fname = os.path.join(self.lib_dir, 'MAPlms/Mlik_%s_it%s.npy' % (key.lower(), it))\n print(\"rank %s caching \" % pbs.rank + fname)\n np.save(fname, TEBMAP)\n\n def get_gradPpri(self, it, key, cache_only=False):\n \"\"\"Builds prior gradient at iteration *it*\n\n \"\"\"\n assert self.PBSRANK == 0, 'NO MPI method!'\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n assert it > 0, it\n fname = os.path.join(self.lib_dir, 'qlm_grad%spri_it%03d.npy' % (key.upper(), it - 1))\n if os.path.exists(fname):\n return None if cache_only else self.load_qlm(fname)\n assert self.is_previous_iter_done(it, key)\n grad = self.lib_qlm.almxfl(self.get_Plm(it - 1, key),\n cl_inverse(self.cl_pp if key.lower() == 'p' else self.cl_oo))\n self.cache_qlm(fname, grad, pbs_rank=0)\n return None if cache_only else self.load_qlm(fname)\n\n def _mlik2rest_tqumlik(self, TQUMlik, it, key):\n \"\"\"Produces B^t Ni (data - B D Mlik) in TQU space, that is fed into the qlm estimator.\n\n \"\"\"\n f_id = ffs_deflect.ffs_id_displacement(self.cov.lib_skyalm.shape, self.cov.lib_skyalm.lsides)\n self.cov.set_ffi(self._load_f(it - 1, key))\n temp = ffs_specmat.TQU2TEBlms(self.type, self.cov.lib_skyalm, TQUMlik)\n maps = self.get_datmaps() - self.cov.apply_Rs(self.type, temp)\n self.cov.apply_maps(self.type, maps, inplace=True)\n self.cov.set_ffi(f_id)\n temp = self.cov.apply_Rts(self.type, maps)\n return ffs_specmat.TEB2TQUlms(self.type, self.cov.lib_skyalm, temp)\n\n def calc_gradplikpdet(self, it, key):\n \"\"\"Calculates the likelihood gradient (quadratic and mean-field parts)\n\n \"\"\"\n assert 0, 'subclass this'\n\n def load_graddet(self, it, key):\n \"\"\"Loads mean-field gradient at iteration *it*\n\n Gradient must have already been calculated\n\n \"\"\"\n fname_detterm = os.path.join(self.lib_dir, 'qlm_grad%sdet_it%03d.npy' % (key.upper(), it))\n assert os.path.exists(fname_detterm), fname_detterm\n return self.load_qlm(fname_detterm)\n\n def load_gradpri(self, it, key):\n \"\"\"Loads prior gradient at iteration *it*\n\n Gradient must have already been calculated\n\n \"\"\"\n fname_prior = os.path.join(self.lib_dir, 'qlm_grad%spri_it%03d.npy' % (key.upper(), it))\n assert os.path.exists(fname_prior), fname_prior\n return self.load_qlm(fname_prior)\n\n def load_gradquad(self, it, key):\n \"\"\"Loads likelihood quadratic piece gradient at iteration *it*\n\n Gradient must have already been calculated\n\n \"\"\"\n fname_likterm = os.path.join(self.lib_dir, 'qlm_grad%slik_it%03d.npy' % (key.upper(), it))\n assert os.path.exists(fname_likterm), fname_likterm\n return self.load_qlm(fname_likterm)\n\n def load_total_grad(self, it, key):\n \"\"\"Load the total gradient at iteration *it*.\n\n All gradients must have already been calculated.\n\n \"\"\"\n return self.load_gradpri(it, key) + self.load_gradquad(it, key) + self.load_graddet(it, key)\n\n def _calc_norm(self, qlm):\n return np.sqrt(np.sum(self.lib_qlm.alm2rlm(qlm) ** 2))\n\n def _apply_curv(self, k, key, alphak, plm):\n \"\"\"Apply curvature matrix making use of information incuding sk and yk.\n\n Applies v B_{k + 1}v = v B_k v + (y^t v)** 2/(y^t s) - (s^t B v) ** 2 / (s^t B s))\n (B_k+1 = B + yy^t / (y^ts) - B s s^t B / (s^t Bk s)) (all k on the RHS))\n\n For quasi Newton, s_k = x_k1 - x_k = - alpha_k Hk grad_k with alpha_k newton step-length.\n\n --> s^t B s at k is alpha_k^2 g_k H g_k\n B s = -alpha g_k\n \"\"\"\n H = self.get_Hessian(max(k + 1,0), key) # get_Hessian(k) loads sk and yk from 0 to k - 1\n assert H.L > k, 'not implemented'\n assert len(alphak) >= (k + 1),(k + 1,len(alphak))\n dot_op = lambda plm1,plm2,:np.sum(self.lib_qlm.alm2cl(plm1,alm2=plm2) * self.lib_qlm.get_Nell()[:self.lib_qlm.ellmax + 1])\n if k <= -1:\n return dot_op(plm,self.lib_qlm.rlm2alm(H.applyB0k(self.lib_qlm.alm2rlm(plm),0)))\n ret = self._apply_curv(k - 1, key, alphak, plm)\n Hgk = H.get_mHkgk(self.lib_qlm.alm2rlm(self.load_total_grad(k, key)), k)\n st_Bs = alphak[k] ** 2 * dot_op(self.load_total_grad(k, key),self.lib_qlm.rlm2alm(Hgk))\n yt_s = dot_op(self.lib_qlm.rlm2alm(H.s(k)),self.lib_qlm.rlm2alm(H.y(k)))\n yt_v = dot_op(self.lib_qlm.rlm2alm(H.y(k)),plm)\n st_Bv = - alphak[k] *dot_op(self.load_total_grad(k, key),plm)\n return ret + yt_v ** 2 / yt_s - st_Bv ** 2 / st_Bs\n\n def get_lndetcurv_update(self, k, key, alphak):\n #Builds update to the BFGS log-determinant\n\n H = self.get_Hessian(k, key)\n Hgk = H.get_mHkgk(self.lib_qlm.alm2rlm(self.load_total_grad(k, key)), k)\n denom = np.sum(self.lib_qlm.alm2rlm(self.load_total_grad(k, key)) * Hgk)\n num = np.sum(self.lib_qlm.alm2rlm(self.load_total_grad(k + 1, key)) * Hgk)\n assert 1. - num / denom / alphak > 0.\n return np.log(1. - num / denom / alphak)\n\n def get_Gaussnoisesample(self, it, key,plm_noisephas, real_space=False, verbose=False):\n \"\"\"Produce a Gaussian random field from the approximate BFGS covariance\n\n Args:\n it: iteration index\n key: 'p' or 'o' for lensing gradient or curl iteration\n plm_noisepha: unit spectra random phases of the right shape\n real_space: produces random field in real space if set, otherwise alm array\n\n\n \"\"\"\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n assert plm_noisephas.shape == (self.lib_qlm.alm_size,),(plm_noisephas.shape,self.lib_qlm.alm_size)\n\n alm_0 = self.lib_qlm.almxfl(plm_noisephas, np.sqrt(self.get_H0(key)))\n ret = self.get_Hessian(max(it,0), key).sample_Gaussian(it, self.lib_qlm.alm2rlm(alm_0))\n return self.lib_qlm.alm2map(self.lib_qlm.rlm2alm(ret)) if real_space else self.lib_qlm.rlm2alm(ret)\n\n\n def get_Hessian(self, it, key):\n \"\"\"Build the L-BFGS Hessian at iteration *it*\n\n\n \"\"\"\n # Zeroth order inverse Hessian :\n apply_H0k = lambda rlm, k: \\\n self.lib_qlm.alm2rlm(self.lib_qlm.almxfl(self.lib_qlm.rlm2alm(rlm), self.get_H0(key)))\n apply_B0k = lambda rlm, k: \\\n self.lib_qlm.alm2rlm(self.lib_qlm.almxfl(self.lib_qlm.rlm2alm(rlm), cl_inverse(self.get_H0(key))))\n BFGS_H = bfgs.BFGS_Hessian(os.path.join(self.lib_dir, 'Hessian'), apply_H0k, {}, {}, L=self.NR_method,\n verbose=self.verbose,apply_B0k=apply_B0k)\n # Adding the required y and s vectors :\n for k in range(np.max([0, it - BFGS_H.L]), it):\n BFGS_H.add_ys(os.path.join(self.lib_dir, 'Hessian', 'rlm_yn_%s_%s.npy' % (k, key)),\n os.path.join(self.lib_dir, 'Hessian', 'rlm_sn_%s_%s.npy' % (k, key)), k)\n return BFGS_H\n\n def build_incr(self, it, key, gradn):\n \"\"\"Search direction\n\n BGFS method with 'self.NR method' BFGS updates to the Hessian.\n Initial Hessian are built from N0s.\n It must be rank 0 here.\n\n Args:\n it: current iteration level. Will produce the increment to phi_{k-1}, from gradient est. g_{k-1}\n phi_{k_1} + output = phi_k\n key: 'p' or 'o'\n gradn: current estimate of the gradient (alm array)\n\n Returns:\n increment for next iteration (alm array)\n\n \"\"\"\n assert self.PBSRANK == 0, 'single MPI process method !'\n assert it > 0, it\n k = it - 2\n yk_fname = os.path.join(self.lib_dir, 'Hessian', 'rlm_yn_%s_%s.npy' % (k, key))\n if k >= 0 and not os.path.exists(yk_fname): # Caching Hessian BFGS yk update :\n yk = self.lib_qlm.alm2rlm(gradn - self.load_total_grad(k, key))\n self.cache_rlm(yk_fname, yk)\n k = it - 1\n BFGS = self.get_Hessian(k, key) # Constructing L-BFGS Hessian\n # get descent direction sk = - H_k gk : (rlm array). Will be cached directly\n sk_fname = os.path.join(self.lib_dir, 'Hessian', 'rlm_sn_%s_%s.npy' % (k, key))\n step = 0.\n if not os.path.exists(sk_fname):\n print(\"rank %s calculating descent direction\" % self.PBSRANK)\n t0 = time.time()\n incr = BFGS.get_mHkgk(self.lib_qlm.alm2rlm(gradn), k)\n norm_inc = self._calc_norm(self.lib_qlm.rlm2alm(incr)) / self._calc_norm(self.get_Plm(0, key))\n step = self.newton_step_length(it, norm_inc)\n self.cache_rlm(sk_fname,incr * step)\n prt_time(time.time() - t0, label=' Exec. time for descent direction calculation')\n assert os.path.exists(sk_fname), sk_fname\n return self.lib_qlm.rlm2alm(self.load_rlm(sk_fname)),step\n\n def iterate(self, it, key, cache_only=False):\n \"\"\"Performs an iteration\n\n This builds the gradients at iteration *it*, and the potential estimate, and saves the *it* + 1 estimate.\n\n \"\"\"\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n plm_fname = os.path.join(self.lib_dir, '%s_plm_it%03d.npy' % ({'p': 'Phi', 'o': 'Om'}[key.lower()], it))\n if os.path.exists(plm_fname): return None if cache_only else self.load_qlm(plm_fname)\n\n assert self.is_previous_iter_done(it, key), 'previous iteration not done'\n # Calculation in // of lik and det term :\n ti = time.time()\n if self.PBSRANK == 0: # Single processes routines :\n self._calc_ffinv(it - 1, key)\n self.get_gradPpri(it, key, cache_only=True)\n self.barrier()\n # Calculation of the likelihood term, involving the det term over MCs :\n irrelevant = self.calc_gradplikpdet(it, key)\n self.barrier() # Everything should be on disk now.\n if self.PBSRANK == 0:\n incr,steplength = self.build_incr(it, key, self.load_total_grad(it - 1, key))\n self.cache_qlm(plm_fname, self.get_Plm(it - 1, key) + incr, pbs_rank=0)\n\n # Saves some info about increment norm and exec. time :\n norm_inc = self._calc_norm(incr) / self._calc_norm(self.get_Plm(0, key))\n norms = [self._calc_norm(self.load_gradquad(it - 1, key))]\n norms.append(self._calc_norm(self.load_graddet(it - 1, key)))\n norms.append(self._calc_norm(self.load_gradpri(it - 1, key)))\n norm_grad = self._calc_norm(self.load_total_grad(it - 1, key))\n norm_grad_0 = self._calc_norm(self.load_total_grad(0, key))\n for i in [0, 1, 2]: norms[i] = norms[i] / norm_grad_0\n\n with open(os.path.join(self.lib_dir, 'history_increment.txt'), 'a') as file:\n file.write('%03d %.1f %.6f %.6f %.6f %.6f %.6f %.12f \\n'\n % (it, time.time() - ti, norm_inc, norm_grad / norm_grad_0, norms[0], norms[1], norms[2],\n steplength))\n file.close()\n\n if self.tidy > 2: # Erasing dx,dy and det magn (12GB for full sky at 0.74 amin per iteration)\n f1, f2 = self._getfnames_f(key, it - 1)\n f3, f4 = self._getfnames_finv(key, it - 1)\n for _f in [f1, f2, f3, f4]:\n if os.path.exists(_f):\n os.remove(_f)\n if self.verbose: print(\" removed :\", _f)\n if os.path.exists(os.path.join(self.lib_dir, 'f_%04d_libdir' % (it - 1))):\n shutil.rmtree(os.path.join(self.lib_dir, 'f_%04d_libdir' % (it - 1)))\n if self.verbose: print(\"Removed :\", os.path.join(self.lib_dir, 'f_%04d_libdir' % (it - 1)))\n if os.path.exists(os.path.join(self.lib_dir, 'finv_%04d_libdir' % (it - 1))):\n shutil.rmtree(os.path.join(self.lib_dir, 'finv_%04d_libdir' % (it - 1)))\n if self.verbose: print(\"Removed :\", os.path.join(self.lib_dir, 'finv_%04d_libdir' % (it - 1)))\n\n self.barrier()\n return None if cache_only else self.load_qlm(plm_fname)\n\n\nclass ffs_iterator_cstMF(ffs_iterator):\n r\"\"\"Iterator instance, that uses fixed, input mean-field at each step.\n\n Args:\n lib_dir: many things will be written there\n typ: 'T', 'QU' or 'TQU' for estimation on temperature data, polarization data or jointly\n filt: inverse-variance filtering instance (e.g. *lensit.qcinv.ffs_ninv_filt* )\n dat_maps: data maps or path to maps.\n lib_qlm: lib_alm (*lensit.ffs_covs.ell_mat.ffs_alm*) instance describing the lensing estimate Fourier arrays\n Plm0: Starting point for the iterative search. alm array consistent with *lib_qlm*\n H0: initial isotropic likelihood curvature approximation (roughly, inverse lensing noise bias :math:`N^{(0)}_L`)\n MF_qlms: mean-field alm array (also desribed by lib_qlm)\n cpp_prior: fiducial lensing power spectrum, used for the prior part of the posterior density.\n\n\n \"\"\"\n\n def __init__(self, lib_dir, typ, filt, dat_maps, lib_qlm, Plm0, H0, MF_qlms, cpp_prior, **kwargs):\n super(ffs_iterator_cstMF, self).__init__(lib_dir, typ, filt, dat_maps, lib_qlm, Plm0, H0, cpp_prior,\n PBSSIZE=1, PBSRANK=0, # so that all proc. act independently\n **kwargs)\n self.MF_qlms = MF_qlms\n\n def calc_gradplikpdet(self, it, key):\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n fname_likterm = os.path.join(self.lib_dir, 'qlm_grad%slik_it%03d.npy' % (key.upper(), it - 1))\n fname_detterm = os.path.join(self.lib_dir, 'qlm_grad%sdet_it%03d.npy' % (key.upper(), it - 1))\n assert it > 0, it\n if os.path.exists(fname_likterm) and os.path.exists(fname_detterm):\n return 0\n\n assert self.is_previous_iter_done(it, key)\n\n # Identical MF here\n self.cache_qlm(fname_detterm, self.load_qlm(self.MF_qlms))\n self.cov.set_ffi(self._load_f(it - 1, key))\n mchain = multigrid.multigrid_chain(self.opfilt, self.type, self.chain_descr, self.cov,\n no_deglensing=self.nodeglensing)\n # FIXME : The solution input is not working properly sometimes. We give it up for now.\n # FIXME don't manage to find the right d0 to input for a given sol ?!!\n soltn = self.load_soltn(it, key).copy() * self.soltn_cond\n self.opfilt._type = self.type\n mchain.solve(soltn, self.get_datmaps(), finiop='MLIK')\n self._cache_tebwf(soltn, it - 1, key)\n # soltn = self.opfilt.MLIK2BINV(soltn,self.cov,self.get_datmaps())\n # grad = - ql.get_qlms(self.type, self.cov.lib_skyalm, soltn, self.cov.cls, self.lib_qlm,\n # use_Pool=self.use_Pool, f=self.cov.f)[{'p': 0, 'o': 1}[key.lower()]]\n TQUMlik = self.opfilt.soltn2TQUMlik(soltn, self.cov)\n ResTQUMlik = self._mlik2rest_tqumlik(TQUMlik, it, key)\n grad = - ql.get_qlms_wl(self.type, self.cov.lib_skyalm, TQUMlik, ResTQUMlik, self.lib_qlm,\n use_Pool=self.use_Pool, f=self._load_f(it - 1, key))[{'p': 0, 'o': 1}[key.lower()]]\n\n self.cache_qlm(fname_likterm, grad, pbs_rank=self.PBSRANK)\n # It does not help to cache both grad_O and grad_P as they do not follow the trajectory in plm space.\n return 0\n\n\nclass ffs_iterator_pertMF(ffs_iterator):\n \"\"\"Iterator instance, that uses the deflection-perturbative prediction for the mean-field at each step.\n\n Args:\n lib_dir: many things will be written there\n typ: 'T', 'QU' or 'TQU' for estimation on temperature data, polarization data or jointly\n filt: inverse-variance filtering instance (e.g. *lensit.qcinv.ffs_ninv_filt* )\n dat_maps: data maps or path to maps.\n lib_qlm: lib_alm (*lensit.ffs_covs.ell_mat.ffs_alm*) instance describing the lensing estimate Fourier arrays\n Plm0: Starting point for the iterative search. alm array consistent with *lib_qlm*\n H0: initial isotropic likelihood curvature approximation (roughly, inverse lensing noise bias :math:`N^{(0)}_L`)\n cpp_prior: fiducial lensing power spectrum, used for the prior part of the posterior density.\n\n\n \"\"\"\n\n def __init__(self, lib_dir, typ, filt: filtr, dat_maps, lib_qlm, Plm0, H0, cpp_prior,\n init_rank=pbs.rank, init_barrier=pbs.barrier, **kwargs):\n super(ffs_iterator_pertMF, self).__init__(lib_dir, typ, filt, dat_maps, lib_qlm, Plm0, H0, cpp_prior,\n PBSSIZE=1, PBSRANK=0, # so that all proc. act independently\n **kwargs)\n\n lmax_ivf = filt.lib_datalm.ellmax\n iso_libdat = filt.lib_datalm\n cls_noise = {'t': (filt.Nlev_uKamin('t') / 60. / 180. * np.pi) ** 2 * np.ones(lmax_ivf + 1),\n 'q': (filt.Nlev_uKamin('q') / 60. / 180. * np.pi) ** 2 * np.ones(lmax_ivf + 1),\n 'u': (filt.Nlev_uKamin('u') / 60. / 180. * np.pi) ** 2 * np.ones(lmax_ivf + 1)}\n self.isocov = ffs_cov.ffs_diagcov_alm(os.path.join(lib_dir, 'isocov'),\n iso_libdat, filt.cls, filt.cls, filt.cl_transf, cls_noise,\n lib_skyalm=filt.lib_skyalm, init_rank=init_rank,\n init_barrier=init_barrier)\n\n def get_mfresp(self, key):\n return self.isocov.get_mfresplms(self.type, self.lib_qlm, use_cls_len=False)[{'p': 0, 'o': 1}[key.lower()]]\n\n def calc_gradplikpdet(self, it, key):\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n fname_likterm = os.path.join(self.lib_dir, 'qlm_grad%slik_it%03d.npy' % (key.upper(), it - 1))\n fname_detterm = os.path.join(self.lib_dir, 'qlm_grad%sdet_it%03d.npy' % (key.upper(), it - 1))\n assert it > 0, it\n if os.path.exists(fname_likterm) and os.path.exists(fname_detterm):\n return 0\n\n assert self.is_previous_iter_done(it, key)\n # Identical MF here\n self.cache_qlm(fname_detterm, self.load_qlm(self.get_mfresp(key.lower()) * self.get_Plm(it - 1, key.lower())))\n self.cov.set_ffi(self._load_f(it - 1, key))\n mchain = multigrid.multigrid_chain(self.opfilt, self.type, self.chain_descr, self.cov,\n no_deglensing=self.nodeglensing)\n # FIXME : The solution input is not working properly sometimes. We give it up for now.\n # FIXME don't manage to find the right d0 to input for a given sol ?!!\n soltn = self.load_soltn(it, key).copy() * self.soltn_cond\n self.opfilt._type = self.type\n mchain.solve(soltn, self.get_datmaps(), finiop='MLIK')\n self._cache_tebwf(soltn, it - 1, key)\n TQUMlik = self.opfilt.soltn2TQUMlik(soltn, self.cov)\n ResTQUMlik = self._mlik2rest_tqumlik(TQUMlik, it, key)\n grad = - ql.get_qlms_wl(self.type, self.cov.lib_skyalm, TQUMlik, ResTQUMlik, self.lib_qlm,\n use_Pool=self.use_Pool, f=self._load_f(it - 1, key))[{'p': 0, 'o': 1}[key.lower()]]\n\n self.cache_qlm(fname_likterm, grad, pbs_rank=self.PBSRANK)\n # It does not help to cache both grad_O and grad_P as they do not follow the trajectory in plm space.\n return 0\n\n\nclass ffs_iterator_simMF(ffs_iterator):\n r\"\"\"Iterator instance, that estimate the mean-field at each steps from Monte-Carlos.\n\n Args:\n lib_dir: many things will be written there\n typ: 'T', 'QU' or 'TQU' for estimation on temperature data, polarization data or jointly\n MFkey: mean-field estimator key\n nsims: number of sims to use at each step\n filt: inverse-variance filtering instance (e.g. *lensit.qcinv.ffs_ninv_filt* )\n dat_maps: data maps or path to maps.\n lib_qlm: lib_alm (*lensit.ffs_covs.ell_mat.ffs_alm*) instance describing the lensing estimate Fourier arrays\n Plm0: Starting point for the iterative search. alm array consistent with *lib_qlm*\n H0: initial isotropic likelihood curvature approximation (roughly, inverse lensing noise bias :math:`N^{(0)}_L`)\n cpp_prior: fiducial lensing power spectrum, used for the prior part of the posterior density.\n\n\n \"\"\"\n def __init__(self, lib_dir, typ, MFkey, nsims, filt: filtr, dat_maps, lib_qlm, Plm0, H0, cpp_prior, **kwargs):\n super(ffs_iterator_simMF, self).__init__(lib_dir, typ, filt, dat_maps, lib_qlm, Plm0, H0, cpp_prior,\n **kwargs)\n print('++ ffs_%s simMF iterator (PBSSIZE %s pbs.size %s) : setup OK' % (self.type, self.PBSSIZE, pbs.size))\n self.MFkey = MFkey\n self.nsims = nsims\n self.same_seeds = kwargs.pop('same_seeds', False)\n self.subtract_phi0 = kwargs.pop('subtract_phi0', True)\n self.barrier()\n\n def build_pha(self, it):\n \"\"\"Builds sims for the mean-field evaluation at iter *it*\n\n \"\"\"\n if self.nsims == 0: return None\n phas_pix = ffs_phas.pix_lib_phas(\n os.path.join(self.lib_dir, '%s_sky_noise_iter%s' % (self.type, it * (not self.same_seeds))),\n len(self.type), self.cov.lib_datalm.shape, nsims_max=self.nsims)\n phas_cmb = None # dont need it so far\n if self.PBSRANK == 0:\n for lib, lab in zip([phas_pix, phas_cmb], ['phas pix', 'phas_cmb']):\n if not lib is None and not lib.is_full():\n print(\"++ run iterator regenerating %s phases mf_sims rank %s...\" % (lab, self.PBSRANK))\n for idx in np.arange(self.nsims): lib.get_sim(idx, phas_only=True)\n self.barrier()\n return phas_pix, phas_cmb\n\n def calc_gradplikpdet(self, it, key, callback='default_callback'):\n \"\"\"Caches the det term for iter via MC sims, together with the data one, with MPI maximal //isation.\n\n \"\"\"\n assert key.lower() in ['p', 'o'], key # potential or curl potential.\n fname_detterm = os.path.join(self.lib_dir, 'qlm_grad%sdet_it%03d.npy' % (key.upper(), it - 1))\n fname_likterm = os.path.join(self.lib_dir, 'qlm_grad%slik_it%03d.npy' % (key.upper(), it - 1))\n if os.path.exists(fname_detterm) and os.path.exists(fname_likterm):\n return 0\n assert self.is_previous_iter_done(it, key)\n\n pix_pha, cmb_pha = self.build_pha(it)\n if self.PBSRANK == 0 and not os.path.exists(os.path.join(self.lib_dir, 'mf_it%03d' % (it - 1))):\n os.makedirs(os.path.join(self.lib_dir, 'mf_it%03d' % (it - 1)))\n self.barrier()\n\n # Caching gradients for the mc_sims_mf sims , plus the dat map.\n # The gradient of the det term is the data averaged lik term, with the opposite sign.\n\n jobs = []\n try:\n self.load_qlm(fname_likterm)\n except:\n jobs.append(-1) # data map\n for idx in range(self.nsims): # sims\n if not os.path.exists(os.path.join(self.lib_dir, 'mf_it%03d/g%s_%04d.npy' % (it - 1, key.lower(), idx))):\n jobs.append(idx)\n else:\n try: # just checking if file is OK.\n self.load_qlm(os.path.join(self.lib_dir, 'mf_it%03d/g%s_%04d.npy' % (it - 1, key.lower(), idx)))\n except:\n jobs.append(idx)\n self.opfilt._type = self.type\n # By setting the chain outside the main loop we avoid potential MPI barriers\n # in degrading the lib_alm libraries:\n mchain = multigrid.multigrid_chain(self.opfilt, self.type, self.chain_descr, self.cov,\n no_deglensing=self.nodeglensing)\n for i in range(self.PBSRANK, len(jobs), self.PBSSIZE):\n idx = jobs[i]\n print(\"rank %s, doing mc det. gradients idx %s, job %s in %s at iter level %s:\" \\\n % (self.PBSRANK, idx, i, len(jobs), it))\n ti = time.time()\n\n if idx >= 0: # sim\n grad_fname = os.path.join(self.lib_dir, 'mf_it%03d/g%s_%04d.npy' % (it - 1, key.lower(), idx))\n self.cov.set_ffi(self._load_f(it - 1, key))\n MFest = ql.MFestimator(self.cov, self.opfilt, mchain, self.lib_qlm,\n pix_pha=pix_pha, cmb_pha=cmb_pha, use_Pool=self.use_Pool)\n grad = MFest.get_MFqlms(self.type, self.MFkey, idx)[{'p': 0, 'o': 1}[key.lower()]]\n if self.subtract_phi0:\n isofilt = self.cov.turn2isofilt()\n chain_descr_iso = chain_samples.get_isomgchain(\n self.cov.lib_skyalm.ellmax, self.cov.lib_datalm.shape, iter_max=self.maxiter)\n mchain_iso = multigrid.multigrid_chain(\n self.opfilt, self.type, chain_descr_iso, isofilt, no_deglensing=self.nodeglensing)\n MFest = ql.MFestimator(isofilt, self.opfilt, mchain_iso, self.lib_qlm,\n pix_pha=pix_pha, cmb_pha=cmb_pha, use_Pool=self.use_Pool)\n grad -= MFest.get_MFqlms(self.type, self.MFkey, idx)[{'p': 0, 'o': 1}[key.lower()]]\n self.cache_qlm(grad_fname, grad, pbs_rank=self.PBSRANK)\n else:\n # This is the data.\n # FIXME : The solution input is not working properly sometimes. We give it up for now.\n # FIXME don't manage to find the right d0 to input for a given sol ?!!\n self.cov.set_ffi(self._load_f(it - 1, key))\n soltn = self.load_soltn(it, key).copy() * self.soltn_cond\n mchain.solve(soltn, self.get_datmaps(), finiop='MLIK')\n self._cache_tebwf(soltn, it - 1, key)\n TQUMlik = self.opfilt.soltn2TQUMlik(soltn, self.cov)\n ResTQUMlik = self._mlik2rest_tqumlik(TQUMlik, it, key)\n grad = - ql.get_qlms_wl(self.type, self.cov.lib_skyalm, TQUMlik, ResTQUMlik, self.lib_qlm,\n use_Pool=self.use_Pool, f=self._load_f(it - 1, key))[\n {'p': 0, 'o': 1}[key.lower()]]\n self.cache_qlm(fname_likterm, grad, pbs_rank=self.PBSRANK)\n\n print(\"%s it. %s sim %s, rank %s cg status \" % (key.lower(), it, idx, self.PBSRANK))\n # It does not help to cache both grad_O and grad_P as they do not follow the trajectory in plm space.\n # Saves some info about current iteration :\n if idx == -1: # Saves some info about iteration times etc.\n with open(os.path.join(self.lib_dir, 'cghistories','history_dat.txt'), 'a') as file:\n file.write('%04d %.3f \\n' % (it, time.time() - ti))\n file.close()\n else:\n with open(os.path.join(self.lib_dir, 'cghistories', 'history_sim%04d.txt' % idx), 'a') as file:\n file.write('%04d %.3f \\n' % (it, time.time() - ti))\n file.close()\n self.barrier()\n if self.PBSRANK == 0:\n # Collecting terms and caching det term.\n # We also cache arrays formed from independent sims for tests.\n print(\"rank 0, collecting mc det. %s gradients :\" % key.lower())\n det_term = np.zeros(self.lib_qlm.alm_size, dtype=complex)\n for i in range(self.nsims):\n fname = os.path.join(self.lib_dir, 'mf_it%03d'%(it -1),'g%s_%04d.npy'%(key.lower(), i))\n det_term = (det_term * i + self.load_qlm(fname)) / (i + 1.)\n self.cache_qlm(fname_detterm, det_term, pbs_rank=0)\n det_term *= 0.\n fname_detterm1 = fname_detterm.replace('.npy', 'MF1.npy')\n assert 'MF1' in fname_detterm1\n for i in np.arange(self.nsims)[0::2]:\n fname = os.path.join(self.lib_dir, 'mf_it%03d'%(it - 1),'g%s_%04d.npy'%(key.lower(), i))\n det_term = (det_term * i + self.load_qlm(fname)) / (i + 1.)\n self.cache_qlm(fname_detterm1, det_term, pbs_rank=0)\n det_term *= 0.\n fname_detterm2 = fname_detterm.replace('.npy', 'MF2.npy')\n assert 'MF2' in fname_detterm2\n for i in np.arange(self.nsims)[1::2]:\n fname = os.path.join(self.lib_dir, 'mf_it%03d'%(it - 1),'g%s_%04d.npy'%(key.lower(), i))\n det_term = (det_term * i + self.load_qlm(fname)) / (i + 1.)\n self.cache_qlm(fname_detterm2, det_term, pbs_rank=0)\n\n # Erase some temp files if requested to do so :\n if self.tidy > 1:\n # We erase as well the gradient determinant term that were stored on disk :\n files_to_remove = \\\n [os.path.join(self.lib_dir, 'mf_it%03d'%(it -1), 'g%s_%04d.npy'%(key.lower(), i)) for i in range(self.nsims)]\n print('rank %s removing %s maps in ' % (\n self.PBSRANK, len(files_to_remove)), os.path.join(self.lib_dir, 'mf_it%03d'%(it - 1)))\n for file in files_to_remove: os.remove(file)\n self.barrier()\n","sub_path":"lensit/ffs_iterators/ffs_iterator_nufft.py","file_name":"ffs_iterator_nufft.py","file_ext":"py","file_size_in_byte":42553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"408312350","text":"\n\nimport numpy as np\nimport time\nfrom time import sleep\nimport cv2\nfrom uf.wrapper.swift_api import SwiftAPI\nfrom uf.utils.log import *\nlogger_init(logging.INFO)\n\nSERVO_BOTTOM = 0\nSERVO_LEFT = 1\nSERVO_RIGHT = 2\nSERVO_HAND = 3\n\narm = SwiftAPI()\nsleep(2.0)\nposition_arr=[]\n\nangles_arr=[]\n'''\n Set servo angle, 0 - 180 degrees, this Function will include the manual servo offset.\n\n Args:\n servo_id: SERVO_BOTTOM, SERVO_LEFT, SERVO_RIGHT, SERVO_HAND\n angle: 0 - 180 degrees\n wait: if True, will block the thread, until get response or timeout\n\n Returns:\n \n succeed True or failed False\n '''\n\nfor _ in range(100):\n\n\n\n rand = np.random.rand(3)\n angles = rand*180\n\n print (\"angles are\",angles)\n\n arm.flush_cmd()\n arm.reset()\n\n arm.set_servo_angle_speed( SERVO_RIGHT ,angles[0], wait = True, timeout = 100,speed = 5000)\n\n arm.set_servo_angle_speed( SERVO_BOTTOM ,angles[1], wait = True, timeout = 100,speed = 5000)\n\n arm.set_servo_angle_speed( SERVO_LEFT ,angles[2] , wait = True, timeout = 100,speed = 5000)\n a= arm.get_is_moving()\n print (\"the status is\",a,\"\\n\")\n pos = arm.get_position() # float array of the format [x, y, z] of the robots current location\n # pixelpostion= get the pixel position here\n\n print(\"the position is \", pos ,\"\\n\")\n position_arr.append(pos)\n angles=angles.tolist()\n angles_arr.append(angles) # [right bottom left]\n sleep(2.0)\n\n\n\n\nDAT = np.row_stack((position_arr , angles_arr))\n\nprint(\"try final \", DAT)\n\nnp.savetxt(\"data1.text\", DAT , newline=\" \\n \",fmt='%s')\n","sub_path":"Final_Project_BIR/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"245309386","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 22 11:17:51 2017\n\n@author: louis\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom gen_data import load_2Dblobs\nfrom multilayer_nn import SimpleNeuralNetwork\nfrom sklearn.preprocessing import OneHotEncoder\nimport nn_utilities as utils\n\nnp.random.seed(1)\n\nplt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots\nplt.rcParams['image.interpolation'] = 'nearest'\nplt.rcParams['image.cmap'] = 'gray'\n\nX_train, Y_train, X_test, Y_test = load_2Dblobs(\n 5000, n_center=5, ratio=0.5, one_hot=True)\n\nenc = OneHotEncoder(sparse=False)\n# training sample should be large enough to cover all encoding cases\nY_train_one_hot, idx_to_one_hot, one_hot_to_idx = utils.convert_to_one_hot(\n Y_train, enc)\nY_test_one_hot, _, _ = utils.convert_to_one_hot(Y_test, enc)\n\nlayers = []\nlayers.append((X_train.shape[1], \"linear\"))\nlayers.append((10, \"relu\"))\nlayers.append((10, \"relu\"))\nlayers.append((10, \"relu\"))\nlayers.append((Y_train_one_hot.shape[1], \"softmax\"))\n\nmodel = SimpleNeuralNetwork(layers, \"xavier\")\n\"\"\"\n# gradient check\ngrad_check_x = X_train[0, :].reshape(-1, 1)\ngrad_check_y = Y_train_one_hot[0, :].reshape(-1, 1)\nmodel.gradient_check(grad_check_x, grad_check_y, epsilon=1e-8, weight_decay=.0)\n\"\"\"\n\n# model training\nmodel.train(\n X_train.T,\n Y_train_one_hot.T,\n X_test.T,\n Y_test_one_hot.T,\n mini_batch_size=64,\n learning_rate=0.005,\n weight_decay=0.,\n keep_prob=0.5,\n num_epochs=100)\n\nplt.scatter(\n X_train[:, 0], X_train[:, 1], s=40, c=Y_train, cmap=plt.cm.Spectral)\n\nutils.plot_decision_boundary(lambda x, y: model.predict_class_output(x.T, y),\n X_train, Y_train, one_hot_to_idx)\n","sub_path":"machine_learning/train_nn.py","file_name":"train_nn.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"141085615","text":"from .models import User\nfrom django.conf import settings\n\ndef setting_processor(request):\n\treturn {\n\t\t'site_name':settings.SITE_NAME\n\t}\n\ndef user_processor(request):\n logged_user = {}\n if 'logged_user' in request.session:\n logged_user = request.session['logged_user']\n if 'id' in logged_user:\n user = User.objects.filter(id=logged_user['id'])\n if len(user) > 0:\n user = user[0]\n return {'logged_user' : {\n 'username':user.username,\n 'fullname':user.fullname,\n 'data':user.data,\n 'email':user.email,\n }}\n return {}\n","sub_path":"simpleoj/context_processor.py","file_name":"context_processor.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"53720649","text":"from turtle import Screen, Turtle\r\nimport time\r\n\r\nwindow = Screen()\r\nwindow.title(\"7 Points Star\")\r\n\r\npen = Turtle()\r\npen.pensize(4)\r\n\r\nfor i in range(7):\r\n pen.left(51.428) # internal angle of rotation is 51.428 as in every isosceles triangle formed by the star, one angle is 77.143\r\n pen.forward(50)\r\n #time.sleep(1)\r\n pen.right(102.857) # external angle of rotation is 102.857\r\n pen.forward(50)\r\n #time.sleep(1)\r\n\r\ntime.sleep(5)\r\n\r\n#Note: internal angle = 77.143\r\n# internal angle of rotation = 51.428 (i.e(2a = 180-77.143) thus a=51.428)\r\n# external angle of rotation = 102.857 (i.e 180-77.143)\r\n","sub_path":"Python 3 Programming/Turtle Graphics/7_point_star_A.py","file_name":"7_point_star_A.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"616109561","text":"import torch\nimport torch.onnx\nimport os\nfrom unet import UNet\n\ncheckpoint_root = './checkpoint/'\ncuda = torch.cuda.is_available()\ndevice = torch.device(\"cuda\" if cuda else \"cpu\")\n\nunet = UNet()\nunet = unet.to(device)\ndummy_input = torch.randn(1, 1, 256, 256, device=device)\nfor file in os.listdir(checkpoint_root):\n if file.startswith(\"unet\") and file.endswith(\".tar\"):\n checkpoint = torch.load(checkpoint_root + file, map_location='cpu')\n unet.load_state_dict(checkpoint['state_dict'])\n\n\ntorch.onnx.export(unet, dummy_input, checkpoint_root + \"onnx_unet.onnx\")\n","sub_path":"torch_to_onnx.py","file_name":"torch_to_onnx.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"533362475","text":"import math\nimport random\n\nimport numpy as np\nfrom sklearn import svm\n\ndef get_data():\n inputs = list()\n outputs = list()\n for x in np.arange(-1.0, 1.0, 0.1):\n for y in np.arange(0.0, 5.0, 0.1):\n for z in np.arange(1.0, 5.0, 0.1):\n inputs.append([x, y, z])\n outputs.append([x*x + 2*x*y - math.sqrt(z)])\n return inputs, outputs\n\n\nif __name__ == '__main__':\n X, y = get_data()","sub_path":"Neural_networks/practice7/practice7.py","file_name":"practice7.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"540318391","text":"from django.shortcuts import render\nimport requests\nfrom .models import City\nfrom .forms import city_name\n\n\n\ndef index(request):\n url = 'http://api.openweathermap.org/data/2.5/forecast?q={}&APPID=4e3aadd76d19d3209156dccbff340001'\n\n form = city_name(request.POST)\n if request.method == 'POST':\n\n if form.is_valid():\n city = form.cleaned_data['name']\n else:\n city = \"Mumbai\" #Default City\n\n\n r = requests.get(url.format(city)).json()\n context = {\n 'city' : city,\n 'temperature' : r['list'][0]['main']['temp'],\n 'describe' : r['list'][0]['weather'][0]['description'] ,\n 'pressure' : r['list'][0]['main']['pressure'],\n 'sea_level' : r['list'][0]['main']['sea_level'],\n 'humidity' : r['list'][0]['main']['humidity'],\n 'form' : form,\n }\n\n return render(request,'weather/index.html',context = context)\n\n\ndef forms_view(request):\n form = forms.city_name()\n return render(request,'weather/forms.html',{'form' : form })\n","sub_path":"the_weather/weather/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"648842398","text":"\"\"\"\nOverview\n========\n\nThis plugin implements a mechanism to highligh pairs of ( ) [ ] { }.\n\nUsage\n=====\n\nPlace the cursor over one of the following characters ( ) [ ] { } \nin order to highligh its matching pair.\n\n\"\"\"\n\ndef install(area, setup={'background':'pink', 'foreground':'black'}, \n MAX=1500, TIME=500):\n\n def cave(tag, args):\n area.after(TIME, cave, tag, args)\n index = area.case_pair(MAX, *args)\n\n if not index: \n area.tag_remove(tag, '1.0', 'end')\n else:\n area.tag_update(tag, '1.0', 'end', \n ('insert', 'insert +1c'), (index, '%s +1c' % index))\n\n area.tag_config('_paren_', **setup)\n area.tag_config('_bracket_', **setup)\n area.tag_config('_brace_', **setup)\n cave('_paren_', ('(', ')'))\n cave('_bracket_', ('[', ']'))\n cave('_brace_', ('{', '}'))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"vy-code/vyapp/plugins/match_sym_pair.py","file_name":"match_sym_pair.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"478814717","text":"# This file is part of the Trezor project.\n#\n# Copyright (C) 2012-2019 SatoshiLabs and contributors\n#\n# This library is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License version 3\n# as published by the Free Software Foundation.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the License along with this library.\n# If not, see .\n\nimport pytest\n\nfrom trezorlib import btc, messages, tools\n\nfrom .. import bip32\nfrom ..common import MNEMONIC12\n\n\nclass TestMsgGetaddressShow:\n @pytest.mark.setup_client(mnemonic=MNEMONIC12)\n def test_show(self, client):\n assert (\n btc.get_address(client, \"Bitcoin\", [1], show_display=True)\n == \"1CK7SJdcb8z9HuvVft3D91HLpLC6KSsGb\"\n )\n assert (\n btc.get_address(client, \"Bitcoin\", [2], show_display=True)\n == \"15AeAhtNJNKyowK8qPHwgpXkhsokzLtUpG\"\n )\n assert (\n btc.get_address(client, \"Bitcoin\", [3], show_display=True)\n == \"1CmzyJp9w3NafXMSEFH4SLYUPAVCSUrrJ5\"\n )\n\n @pytest.mark.multisig\n @pytest.mark.setup_client(mnemonic=MNEMONIC12)\n def test_show_multisig_3(self, client):\n node = bip32.deserialize(\n \"xpub661MyMwAqRbcF1zGijBb2K6x9YiJPh58xpcCeLvTxMX6spkY3PcpJ4ABcCyWfskq5DDxM3e6Ez5ePCqG5bnPUXR4wL8TZWyoDaUdiWW7bKy\"\n )\n multisig = messages.MultisigRedeemScriptType(\n pubkeys=[\n messages.HDNodePathType(node=node, address_n=[1]),\n messages.HDNodePathType(node=node, address_n=[2]),\n messages.HDNodePathType(node=node, address_n=[3]),\n ],\n signatures=[b\"\", b\"\", b\"\"],\n m=2,\n )\n\n for i in [1, 2, 3]:\n assert (\n btc.get_address(\n client, \"Bitcoin\", [i], show_display=True, multisig=multisig\n )\n == \"3E7GDtuHqnqPmDgwH59pVC7AvySiSkbibz\"\n )\n\n @pytest.mark.skip_t1\n @pytest.mark.multisig\n def test_show_multisig_xpubs(self, client):\n nodes = [\n btc.get_public_node(\n client, tools.parse_path(f\"48h/0h/{i}h\"), coin_name=\"Bitcoin\"\n )\n for i in range(3)\n ]\n multisig = messages.MultisigRedeemScriptType(\n nodes=[n.node for n in nodes],\n signatures=[b\"\", b\"\", b\"\"],\n address_n=[0, 0],\n m=2,\n )\n\n xpubs = [[n.xpub[i * 16 : (i + 1) * 16] for i in range(5)] for n in nodes]\n\n for i in range(3):\n\n def input_flow():\n yield # show address\n assert client.debug.wait_layout().lines == [\n \"Multisig 2 of 3\",\n \"34yJV2b2GtbmxfZNw\",\n \"jPyuyUYkUbUnogqa8\",\n ]\n\n client.debug.press_no()\n yield # show QR code\n assert client.debug.wait_layout().text.startswith(\"Qr\")\n\n client.debug.press_no()\n yield # show XPUB#1\n lines = client.debug.wait_layout().lines\n assert lines[0] == \"XPUB #1 \" + (\"(yours)\" if i == 0 else \"(others)\")\n assert lines[1:] == xpubs[0]\n # just for UI test\n client.debug.swipe_up()\n\n client.debug.press_no()\n yield # show XPUB#2\n lines = client.debug.wait_layout().lines\n assert lines[0] == \"XPUB #2 \" + (\"(yours)\" if i == 1 else \"(others)\")\n assert lines[1:] == xpubs[1]\n # just for UI test\n client.debug.swipe_up()\n\n client.debug.press_no()\n yield # show XPUB#3\n lines = client.debug.wait_layout().lines\n assert lines[0] == \"XPUB #3 \" + (\"(yours)\" if i == 2 else \"(others)\")\n assert lines[1:] == xpubs[2]\n # just for UI test\n client.debug.swipe_up()\n\n client.debug.press_yes()\n\n with client:\n client.set_input_flow(input_flow)\n btc.get_address(\n client,\n \"Bitcoin\",\n tools.parse_path(f\"48h/0h/{i}h/0/0\"),\n show_display=True,\n multisig=multisig,\n script_type=messages.InputScriptType.SPENDMULTISIG,\n )\n\n @pytest.mark.multisig\n @pytest.mark.setup_client(mnemonic=MNEMONIC12)\n def test_show_multisig_15(self, client):\n node = bip32.deserialize(\n \"xpub661MyMwAqRbcF1zGijBb2K6x9YiJPh58xpcCeLvTxMX6spkY3PcpJ4ABcCyWfskq5DDxM3e6Ez5ePCqG5bnPUXR4wL8TZWyoDaUdiWW7bKy\"\n )\n\n pubs = []\n for x in range(15):\n pubs.append(messages.HDNodePathType(node=node, address_n=[x]))\n\n multisig = messages.MultisigRedeemScriptType(\n pubkeys=pubs, signatures=[b\"\"] * 15, m=15\n )\n\n for i in range(15):\n assert (\n btc.get_address(\n client, \"Bitcoin\", [i], show_display=True, multisig=multisig\n )\n == \"3QaKF8zobqcqY8aS6nxCD5ZYdiRfL3RCmU\"\n )\n","sub_path":"tests/device_tests/test_msg_getaddress_show.py","file_name":"test_msg_getaddress_show.py","file_ext":"py","file_size_in_byte":5514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"238924763","text":"def trapezoid(f, a, b, eps, verbose = False):\n '''\n Trapezoidal rule for numerical integration\n f = f(x) to integrate (a function)\n a = initial point \n b = final point\n eps = tolerance\n verbose = True if it is desired to print the output each iteration\n \n Return:\n Intergral from a to b, relative error, best order(k)\n '''\n # Initializing\n n = 1\n er = 1\n integral = []\n ER = []\n \n # Calculating order zero\n x = [a, b]\n y = list(map(f, x)) \n I = ((b-a)/2)*(y[0] + 2*sum(y[1:-1]) + y[-1])\n integral.append(I)\n \n k = 0\n \n if verbose:\n print('Integral value: ', integral[k])\n print('Relative Error: NaN')\n print('Order: ', k)\n \n while eps < er:\n k = k+1\n # Att the order and x division\n n = 2**k\n h = (b-a)/n\n x = list(np.linspace(a,b,n, endpoint=False))\n x = x + [b]\n # Calculating for all x\n y = list(map(f, x))\n \n # The integral\n I = (h/2)*(y[0] + 2*sum(y[1:-1]) + y[-1])\n integral.append(I)\n \n # Relative Error\n er = abs((integral[k] - integral[k-1])/integral[k-1])\n ER.append(er)\n \n if verbose:\n print('Integral value: ', integral[k])\n print('Relative Error: ', er)\n print('Order: ', k)\n \n \n \n return I, integral, ER, k\n \ndef plot(I, ER, save=False):\n xn = [i for i in range(len(I))]\n yy = 1\n xx = 2\n fig = make_subplots(\n rows=yy, cols=xx,\n horizontal_spacing = 0.15\n )\n \n # Layout adjustments\n #fig.update_layout(plot_bgcolor='rgb(255,255,255)')\n fig.update_xaxes(showline=True, linewidth=1, linecolor='black', mirror=True)\n fig.update_yaxes(showline=True, linewidth=1, linecolor='black', mirror=True)\n fig.update_layout(showlegend=False, title_text=\"Resultados da integração pela regra do trapézio\")\n fig.update_layout(font=dict(family=\"Palatino Linotype\",size=12,color=\"Black\"))\n \n # Integral plot\n fig.add_trace(go.Scatter(x=xn, y=I , marker=dict(\n color='royalblue',\n line_width=0.5\n )), row=1, col=1)\n fig.update_xaxes(title_text=\"Ordem\", row=1, col=1)\n fig.update_yaxes(title_text=\"Valor integral [UA]\", row=1, col=1)\n \n # Relative error plot\n fig.add_trace(go.Scatter(x=xn[1::], y=ER, marker=dict(\n color='red',\n line_width=0.5\n )), row=1, col=2)\n fig.update_xaxes(title_text=\"Ordem\", row=1, col=2)\n fig.update_yaxes(title_text=\"Erro relativo\", row=1, col=2)\n \n if save:\n fig.write_image(\"resultado_integracaotrapezio.png\")\n \n fig.show()\n\ndef to_df(I, ER, name = 'P_IntegracaoNumerica.xlsx', save=False):\n ER = [''] + ER\n df = pd.DataFrame(columns = 'Integral;Erro Relativo'.split(';'))\n df['Integral'] = I; df['Erro Relativo'] = ER;\n if save: df.to_excel(name)\n return df\n \neps = 1.0E-6\na = -1.0\nb = 1.0\n\nI, integral, ER, n = trapezoid(lambda x: 1/(x+2), a, b, eps, verbose=False)\nplot(integral, ER, save=True)\ndf = to_df(integral, ER, save=True)\n","sub_path":"trapezoid2.py","file_name":"trapezoid2.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"492050484","text":"from model import TEMP_FOLDER\nimport os\nimport shutil\n\n\ndef init():\n \"\"\"Global state for the service.\"\"\"\n import threading\n state_lock = threading.Lock()\n states = {}\n\n global state\n state = None\n\n global num_sessions\n num_sessions = 0\n\n global reset_state\n def reset_state(repo_id):\n global state\n if state and state[\"busy\"]:\n global num_sessions\n num_sessions -= 1\n\n states[repo_id] = {\n \"busy\": False,\n \"session_id\": None,\n \"dataset_id\": None,\n \"repo_id\": repo_id,\n \"current_round\": 0,\n \"num_nodes_averaged\": 0,\n \"num_nodes_chosen\": 0,\n \"current_weights\": None,\n \"current_gradients\": None,\n \"sigma_omega\": None,\n \"weights_shape\": None,\n \"initial_message\": None,\n \"last_message_time\": None,\n \"last_message_sent_to_library\": None,\n \"test\": False,\n \"h5_model_path\": None,\n \"library_type\": None,\n \"ios_type\": None\n }\n \n temp_folder = os.path.join(TEMP_FOLDER, repo_id)\n if os.path.isdir(temp_folder):\n shutil.rmtree(temp_folder)\n\n global start_state\n def start_state(repo_id):\n state_lock.acquire()\n if repo_id not in states:\n reset_state(repo_id)\n global state\n state = states[repo_id]\n\n global stop_state\n def stop_state():\n global state\n state = None\n state_lock.release()\n\n global start_state_by_session_id\n def start_state_by_session_id(session_id):\n for repo_id, state in states.items():\n if state[\"session_id\"] == session_id:\n start_state(repo_id)\n return repo_id\n return None\n","sub_path":"cloud-node/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":1826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"480973488","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom time import gmtime, strftime\n\n\n# Create your views here.\n\n# '/' OR '/session_words'\ndef index(request):\n return render(request,'words/index.html')\n\n\n# '/session_words/add'\ndef add(request):\n if 'words' not in request.session:\n request.session['words'] = []\n\n if 'size' not in request.POST:\n size = 'small'\n else:\n size = request.POST['size']\n \n time = strftime(\"%I:%M:%S %p, %B %d %Y\", gmtime())\n\n wordslist = request.session['words']\n wordslist.append({\n 'word': request.POST['word'],\n 'color': request.POST['color'],\n 'size': size,\n 'time': time\n })\n request.session['words'] = wordslist\n return redirect('/session_words')\n\n\n# '/session_words/clear'\ndef clear(request):\n request.session['words'] = []\n return redirect('/session_words')","sub_path":"apps/words/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"298593017","text":"import pandas as pd\r\nimport numpy\r\nimport xlrd\r\n\r\ndef strip_punctuation(file):\r\n \r\n text = []\r\n punctuation_chars = [\"'\", '\"', \",\", \".\", \"!\", \":\", \";\", \"#\", \"@\", \"-\", \"(\", \")\", \"_\"]\r\n \r\n for list in file:\r\n for z in list:\r\n for y in punctuation_chars:\r\n if y in z:\r\n z = z.replace(y, '')\r\n text.append(z)\r\n\r\n tx = \" \".join(text)\r\n return tx.lower()\r\n\r\n\r\n\r\ndef get_pos(file):\r\n count = 0\r\n positive_word = []\r\n \r\n main_file = strip_punctuation(file)\r\n with open(r\"C:\\Users\\ankit\\OneDrive\\Desktop\\PYTHON\\PROJECT_SENTIMENT_ANALYSIS\\positivewords.txt\") as posi:\r\n for lin in posi:\r\n if lin[0] != ';' and lin[0] != '\\n':\r\n positive_word.append(lin.strip()) \r\n for i in main_file.split(\" \"):\r\n for j in positive_word:\r\n if (j==i):\r\n count+=1\r\n return count\r\n\r\n\r\n\r\n\r\ndef get_neg(file):\r\n count = 0\r\n negative_word = []\r\n main_file = strip_punctuation(file)\r\n \r\n with open(r\"C:\\Users\\ankit\\OneDrive\\Desktop\\PYTHON\\PROJECT_SENTIMENT_ANALYSIS\\negativewords.txt\") as negi:\r\n for lin in negi:\r\n if lin[0] != ';' and lin[0] != '\\n':\r\n negative_word.append(lin.strip())\r\n for i in main_file.split(\" \"): \r\n for j in negative_word: \r\n if j==i:\r\n count+=1\r\n return count\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef run(data):\r\n df = pd.DataFrame(columns=[\"Positive Score\", \"Negative Score\", \"Net Score\"])\r\n i = 1\r\n for sheet in data.sheets():\r\n list_of_lists=[]\r\n for row in range(sheet.nrows): \r\n for column in range (sheet.ncols):\r\n k = str(sheet.cell(row, column).value) \r\n line_list = k.split()\r\n list_of_lists.append(line_list)\r\n \r\n #Positive Score\r\n positive_score = get_pos(list_of_lists)\r\n \r\n #Negative Score\r\n negative_score = get_neg(list_of_lists)\r\n \r\n #Net Score\r\n net_score = positive_score - negative_score\r\n df.loc[i, ['Positive Score']] = positive_score\r\n df.loc[i, ['Negative Score']] = negative_score\r\n df.loc[i, ['Net Score']] = net_score \r\n i = i+1\r\n print(df)\r\n \r\nif __name__ == \"__main__\":\r\n \r\n data = xlrd.open_workbook(r\"C:\\Users\\ankit\\OneDrive\\Desktop\\PYTHON\\PROJECT_SENTIMENT_ANALYSIS\\sample_movie_data.xlsx\")\r\n run(data)\r\n ","sub_path":"data_modification_new.py","file_name":"data_modification_new.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"310122589","text":"##xmlファイルを読み込み、解析\n#\"\"\" xmlを読み込んで、新しいxmlに圧縮した値を作成\nimport xml.etree.ElementTree as ET\n\nFILE = 'pic_ (1).xml'\nfile = open(FILE)\ntree = ET.parse(file)\nroot = tree.getroot()\n\nall_list = []\n\n# 画像ファイル名を取得\nimg_name = root.find('filename').text\n\n# 画像ファイルのサイズ(幅・高さ)を取得\nimg_size = root.find('size')\nimg_w = int(img_size.find('width').text)\nimg_h = int(img_size.find('height').text)\n\nfor obj in root.iter('object'):\n cls = obj.find('name').text\n xmlbox = obj.find('bndbox')\n xmin = int(xmlbox.find('xmin').text)\n ymin = int(xmlbox.find('ymin').text)\n xmax = int(xmlbox.find('xmax').text)\n ymax = int(xmlbox.find('ymax').text)\n\n all_list.append([img_name] + [cls])\n\nprint(all_list)\nprint(\"画像サイズ width = {}, height = {}\".format(img_w,img_h))\n\nstring_ = '''\\\n\n {}\n \n original\n original\n XXX\n 0\n \n \n 0\n ?\n \n \n {}\n {}\n 3\n \n \n pudding\n Unspecified\n 1\n 0\n \n {}\n {}\n {}\n {}\n \n \n'''\n\nsize_changed = 500#画像変更サイズ\nx_ = size_changed / img_w#X軸方方向の圧縮倍率\ny_ = size_changed / img_h#X軸方方向の圧縮倍率\n\nre_img_w = round(img_w * x_)\nre_img_h = round(img_h * y_)\nre_xmin = round(xmin * x_)\nre_ymin = round(ymin * y_)\nre_xmax = round(xmax * x_)\nre_ymax = round(ymax * y_)\nprint(re_img_w,re_img_h, re_xmin, re_ymin, re_xmax, re_ymax)\n\nwith open('_9_pic_ (1)_pressed_value.xml', 'w') as f:\n f.write(string_.format(img_name,re_img_w, re_img_h, re_xmin,re_ymin,re_xmax,re_ymax))\n","sub_path":"Python_Make_xml/_9_End_To_End_original_xml_maker.py","file_name":"_9_End_To_End_original_xml_maker.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"283483950","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^post$', views.post, name='post'),\n url(r'^confirm_remove/(?P\\d+)$', views.confirm, name='confirm'),\n url(r'^remove/(?P\\d+)$', views.remove, name='remove'),\n url(r'^course/(?P\\d+)$', views.comment, name='comment'),\n url(r'^comment/(?P\\d+)$', views.postComment, name='postComment'),\n]\n","sub_path":"apps/courses/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"78039832","text":"#!/usr/bin/env python\n# framework for retrieving map related data through weewar API (www.weewar.com)\n# using minidom\n# written by Mike McConnell\n\nimport re\nimport sys\nimport urllib\nfrom xml.dom import minidom\n\n\nclass Map:\n\n def __init__(self, id):\n \"\"\"\n Processes a map through API.\n Takes one argument (map ID), and stores all map information provided\n through API. Also generates counts on terrain types, starting units\n (by player) and starting bases (by player).\n \"\"\"\n # make sure ID passed is integer\n self.id = self._validate_int(id)\n \n # import api url\n baseurl = 'http://weewar.com/api1/map/'\n apiurl = baseurl + str(id)\n try:\n self.map_dom = minidom.parse(urllib.urlopen(apiurl))\n except:\n print(\"Error. Bad map link.\")\n\n # set map variables\n self.name = self._get_str_from_tag(self.map_dom, 'name')\n self.initialCredits = self._get_int_from_tag(self.map_dom, 'initialCredits')\n self.perBaseCredits = self._get_int_from_tag(self.map_dom, 'perBaseCredits')\n self.width = self._get_int_from_tag(self.map_dom, 'width')\n self.height = self._get_int_from_tag(self.map_dom, 'height')\n self.maxPlayers = self._get_int_from_tag(self.map_dom, 'maxPlayers')\n self.url = self._get_str_from_tag(self.map_dom, 'url')\n self.thumbnail = self._get_str_from_tag(self.map_dom, 'thumbnail')\n self.preview = self._get_str_from_tag(self.map_dom, 'preview')\n self.revision = self._get_int_from_tag(self.map_dom, 'revision')\n self.creator = self._get_str_from_tag(self.map_dom, 'creator')\n self.creatorProfile = self._get_str_from_tag(self.map_dom, 'creatorProfile')\n\n # regex patterns for grabbing terrain information\n typepattern = re.compile('type=(\"[a-zA-Z]+\")')\n unitpattern = re.compile('startUnit=(\"[a-zA-Z]+\")')\n ownerpattern = re.compile('startUnitOwner=(\"[0-9]\")')\n factionpattern = re.compile('startFaction=(\"[0-9]\")')\n \n # create dictionary to hold all terrain information\n self.terrain = {}\n \n # create list of dictionaries to hold unit and base information\n self.unit = [{} for player in range(self.maxPlayers)]\n self.base = [{} for player in range(self.maxPlayers)]\n\n # for each hex on map, record terrain, unit and base (if present)\n for hex in self.map_dom.getElementsByTagName('terrain'):\n \n # format terrain type information\n cur_hex = hex.toxml()\n type = re.search(typepattern, cur_hex).group(0)\n type = self._strip_tag(type, 'type')\n \n # count terrain type into dictionary\n if self.terrain.has_key(type):\n self.terrain[type] = self.terrain[type] + 1\n else:\n self.terrain[type] = 1\n\n # add starting unit information (if present)\n if re.search(unitpattern, cur_hex) and \\\n re.search(ownerpattern, cur_hex):\n \n # format start unit information\n startunit = re.search(unitpattern, cur_hex).group(0)\n startunit = self._strip_tag(startunit, 'startUnit')\n startunitowner = re.search(ownerpattern, cur_hex).group(0)\n startunitowner = self._strip_tag(startunitowner, 'startUnitOwner')\n\n # make sure unit extracted is an integer\n owner = self._validate_int(startunitowner)\n \n # count unit type into player's dictionary\n if self.unit[owner].has_key(startunit):\n self.unit[owner][startunit] = self.unit[owner][startunit] + 1\n else:\n self.unit[owner][startunit] = 1\n\n # add starting base information (if present)\n if re.search(typepattern, cur_hex) and \\\n re.search(factionpattern, cur_hex):\n\n # format start base information\n startbase = re.search(typepattern, cur_hex).group(0)\n startbase = self._strip_tag(startbase, 'type')\n startbaseowner = re.search(factionpattern, cur_hex).group(0)\n startbaseowner = self._strip_tag(startbaseowner, 'startFaction')\n\n # make sure base extracted is an integer\n owner = self._validate_int(startbaseowner)\n\n # count base type into player's dictionary\n if self.base[owner].has_key(startbase):\n self.base[owner][startbase] = self.base[owner][startbase] + 1\n else:\n self.base[owner][startbase] = 1\n\n # calculate total number of terrain (for percentages)\n self.total_terrain = 0\n for amount in self.terrain.values():\n self.total_terrain = self.total_terrain + amount\n\n def get_terrain_count(self, type):\n \"\"\"\n Takes one argument (terrain type).\n Returns number of terrain units on map. If type does not exist, returns\n None.\n \"\"\"\n if self.terrain.has_key(type):\n return self.terrain[type]\n else:\n return None\n\n def get_terrain_percentage(self, type, format=False):\n \"\"\"\n Takes one argument (terrain type). Optional second argument will format\n results to percentage (0.1882 -> '18.82%').\n Returns percentage of terrain (compared to all terrain). If type does\n not exist, returns None.\n \"\"\"\n if self.terrain.has_key(type):\n if (format == False):\n return (float(self.terrain[type]) / self.total_terrain)\n else:\n return \"{0:.2f}%\".format(float(self.terrain[type]) / \n self.total_terrain * 100)\n else:\n return None\n\n def get_starting_unit(self, player):\n \"\"\"\n Takes one argument (player number; starts at 0).\n Returns dictionary containing units player starts with.\n \"\"\"\n if self._validate_int(player) != None:\n return self.unit[player]\n \n def get_starting_base(self, player):\n \"\"\"\n Takes one argument (player number; starts at 0).\n Returns dictionary containing units player starts with.\n \"\"\"\n if self._validate_int(player) != None:\n return self.base[player]\n\n def _get_str_from_tag(self, dom, tag, count=0):\n \"\"\"\n Takes tag in given position and strips out brackets.\n Does not handle non-ascii characters.\n Returns as string.\n \"\"\"\n attr = dom.getElementsByTagName(tag)[count].toxml()\n attr = attr.replace(('<' + tag + '>'),'').replace((''),'')\n try:\n return str(attr)\n except ValueError:\n print(attr, \"could not be formatted as string\")\n\n def _get_int_from_tag(self, dom, tag, count=0):\n \"\"\"\n Takes tag in given position and strips out brackets.\n Returns as integer.\n \"\"\"\n attr = dom.getElementsByTagName(tag)[count].toxml()\n attr = attr.replace(('<' + tag + '>'),'').replace((''),'')\n try:\n return int(attr)\n except ValueError:\n print(attr, \"could not be formatted as integer\")\n \n def _strip_tag(self, tag, text):\n \"\"\"\n Removes quotation marks and tag information for legibility. 'text' is\n xml variable name.\n Returns as string.\n \"\"\"\n return str(tag.replace('\"', \"\").replace(text + '=', \"\"))\n\n def _validate_int(self, value):\n \"\"\"\n Returns value as integer.\n \"\"\"\n try:\n value = int(value)\n except ValueError:\n print(value, \"is not an integer.\")\n \n return value\n","sub_path":"weemap.py","file_name":"weemap.py","file_ext":"py","file_size_in_byte":7897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"43236913","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nControl Parser\n~~~~~~~~~~~~~~\nThis module handles parsing control statement, which add annotations and namespaces to the document.\n\nSee: https://wiki.openbel.org/display/BLD/Control+Records\n\"\"\"\n\nimport logging\nimport re\n\nfrom pyparsing import Suppress, MatchFirst\nfrom pyparsing import pyparsing_common as ppc\n\nfrom .baseparser import BaseParser, quote, delimitedSet, And, oneOf\nfrom .parse_exceptions import *\nfrom .utils import is_int\nfrom ..constants import BEL_KEYWORD_STATEMENT_GROUP, BEL_KEYWORD_CITATION, BEL_KEYWORD_EVIDENCE, BEL_KEYWORD_SUPPORT, \\\n BEL_KEYWORD_ALL, ANNOTATIONS\nfrom ..constants import CITATION_ENTRIES, EVIDENCE, CITATION_TYPES, BEL_KEYWORD_SET, BEL_KEYWORD_UNSET, CITATION\n\nlog = logging.getLogger('pybel')\n\n\nclass ControlParser(BaseParser):\n def __init__(self, annotation_dicts=None, annotation_expressions=None, citation_clearing=True):\n \"\"\"Builds parser for BEL valid_annotations statements\n\n :param annotation_dicts: A dictionary of {annotation: set of valid values} for parsing\n :type annotation_dicts: dict\n :param annotation_expressions: A dictionary of {annotation: regular expression string}\n :type annotation_expressions: dict\n :param citation_clearing: Should :code:`SET Citation` statements clear evidence and all annotations?\n :type citation_clearing: bool\n \"\"\"\n\n self.citation_clearing = citation_clearing\n\n self.valid_annotations = {} if annotation_dicts is None else annotation_dicts\n self.annotations_re = {} if annotation_expressions is None else annotation_expressions\n self.annotations_re_compiled = {k: re.compile(v) for k, v in self.annotations_re.items()}\n\n self.statement_group = None\n self.citation = {}\n self.evidence = None\n self.annotations = {}\n\n annotation_key = ppc.identifier('key').setParseAction(self.handle_annotation_key)\n\n self.set_statement_group = And([Suppress(BEL_KEYWORD_STATEMENT_GROUP), Suppress('='), quote('group')])\n self.set_statement_group.setParseAction(self.handle_set_statement_group)\n\n self.set_citation = And([Suppress(BEL_KEYWORD_CITATION), Suppress('='), delimitedSet('values')])\n self.set_citation.setParseAction(self.handle_set_citation)\n\n supporting_text_tags = oneOf([BEL_KEYWORD_EVIDENCE, BEL_KEYWORD_SUPPORT])\n self.set_evidence = And([Suppress(supporting_text_tags), Suppress('='), quote('value')])\n self.set_evidence.setParseAction(self.handle_set_evidence)\n\n set_command_prefix = And([annotation_key('key'), Suppress('=')])\n self.set_command = set_command_prefix + quote('value')\n self.set_command.setParseAction(self.handle_set_command)\n\n self.set_command_list = set_command_prefix + delimitedSet('values')\n self.set_command_list.setParseAction(self.handle_set_command_list)\n\n self.unset_command = annotation_key('key')\n self.unset_command.addParseAction(self.handle_unset_command)\n\n self.unset_evidence = supporting_text_tags(EVIDENCE)\n self.unset_evidence.setParseAction(self.handle_unset_evidence)\n\n self.unset_citation = Suppress(BEL_KEYWORD_CITATION)\n self.unset_citation.setParseAction(self.handle_unset_citation)\n\n self.unset_statement_group = Suppress(BEL_KEYWORD_STATEMENT_GROUP)\n self.unset_statement_group.setParseAction(self.handle_unset_statement_group)\n\n self.unset_list = delimitedSet('values')\n self.unset_list.setParseAction(self.handle_unset_list)\n\n self.unset_all = Suppress(BEL_KEYWORD_ALL)\n self.unset_all.setParseAction(self.handle_unset_all)\n\n set_tag = Suppress(BEL_KEYWORD_SET)\n unset_tag = Suppress(BEL_KEYWORD_UNSET)\n\n self.set_statements = set_tag + MatchFirst([\n self.set_statement_group,\n self.set_citation,\n self.set_evidence,\n self.set_command,\n self.set_command_list,\n ])\n\n self.unset_statements = unset_tag + MatchFirst([\n self.unset_all,\n self.unset_citation,\n self.unset_evidence,\n self.unset_statement_group,\n self.unset_command,\n self.unset_list\n ])\n\n self.language = self.set_statements | self.unset_statements\n\n BaseParser.__init__(self, self.language)\n\n def validate_annotation_key(self, key):\n if key not in self.valid_annotations and key not in self.annotations_re_compiled:\n raise UndefinedAnnotationWarning(key)\n\n def validate_value(self, key, value):\n if key in self.valid_annotations and value not in self.valid_annotations[key]:\n raise IllegalAnnotationValueWarning(value, key)\n elif key in self.annotations_re_compiled and not self.annotations_re_compiled[key].match(value):\n raise MissingAnnotationRegexWarning(value, key)\n\n def handle_annotation_key(self, s, l, tokens):\n \"\"\"Called on all annotation keys before parsing to validate that it's either enumerated or as a regex\"\"\"\n key = tokens['key']\n\n if self.citation_clearing and not self.citation:\n raise MissingCitationException(s)\n\n self.validate_annotation_key(key)\n return tokens\n\n def handle_set_statement_group(self, s, l, tokens):\n self.statement_group = tokens['group']\n return tokens\n\n def handle_set_citation(self, s, l, tokens):\n self.clear_citation()\n\n values = tokens['values']\n\n if not (3 <= len(values) <= 6):\n raise InvalidCitationException(s)\n\n if values[0] not in CITATION_TYPES:\n raise InvalidCitationType(values[0])\n\n if values[0] == 'PubMed' and not is_int(values[2]):\n raise InvalidPubMedIdentifierWarning(values[2])\n\n self.citation = dict(zip(CITATION_ENTRIES, values))\n\n return tokens\n\n def handle_set_evidence(self, s, l, tokens):\n self.evidence = tokens['value']\n return tokens\n\n def handle_set_command(self, s, l, tokens):\n key = tokens['key']\n value = tokens['value']\n\n self.validate_value(key, value)\n\n self.annotations[key] = value\n return tokens\n\n def handle_set_command_list(self, s, l, tokens):\n key = tokens['key']\n values = tokens['values']\n\n for value in values:\n self.validate_value(key, value)\n\n self.annotations[key] = set(values)\n return tokens\n\n def handle_unset_statement_group(self, s, l, tokens):\n if self.statement_group is None:\n raise MissingAnnotationKeyWarning(BEL_KEYWORD_STATEMENT_GROUP)\n self.statement_group = None\n return tokens\n\n def handle_unset_citation(self, s, l, tokens):\n if not self.citation:\n raise MissingAnnotationKeyWarning(BEL_KEYWORD_CITATION)\n\n self.clear_citation()\n\n return tokens\n\n def handle_unset_evidence(self, s, l, tokens):\n if self.evidence is None:\n raise MissingAnnotationKeyWarning(tokens[EVIDENCE])\n self.evidence = None\n return tokens\n\n def validate_unset_command(self, key):\n if key not in self.annotations:\n raise MissingAnnotationKeyWarning(key)\n\n def handle_unset_command(self, s, l, tokens):\n key = tokens['key']\n self.validate_unset_command(key)\n del self.annotations[key]\n return tokens\n\n def handle_unset_list(self, s, l, tokens):\n for key in tokens['values']:\n if key in {BEL_KEYWORD_EVIDENCE, BEL_KEYWORD_SUPPORT}:\n self.evidence = None\n else:\n self.validate_unset_command(key)\n del self.annotations[key]\n\n return tokens\n\n def handle_unset_all(self, s, l, tokens):\n self.clear()\n return tokens\n\n def get_annotations(self):\n \"\"\"\n\n :return: The currently stored BEL annotations\n :rtype: dict\n \"\"\"\n return {\n EVIDENCE: self.evidence,\n CITATION: self.citation.copy(),\n ANNOTATIONS: self.annotations.copy()\n }\n\n def clear_citation(self):\n self.citation.clear()\n\n if self.citation_clearing:\n self.evidence = None\n self.annotations.clear()\n\n def clear(self):\n \"\"\"Clears the statement_group, citation, evidence, and annotations\"\"\"\n self.statement_group = None\n self.citation.clear()\n self.evidence = None\n self.annotations.clear()\n","sub_path":"src/pybel/parser/parse_control.py","file_name":"parse_control.py","file_ext":"py","file_size_in_byte":8538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"467231781","text":"import json\nfrom uai.utils.logger import uai_logger\nfrom uai.operation.base_operation import BaseUaiServiceOp\nfrom uai.api.get_uai_available_env_pkg import GetUAIAvailableEnvPkgOp\nfrom uai.api.check_uai_base_img_exist import CheckUAIBaseImgExistOp\n\nUCLOUD_API_URL = 'http://api.ucloud.cn'\n\nclass UaiServiceCheckBaseImgExistOp(BaseUaiServiceOp):\n\n def __init__(self, parser):\n super(UaiServiceCheckBaseImgExistOp, self).__init__(parser)\n\n def _add_args(self, parser):\n super(UaiServiceCheckBaseImgExistOp, self)._add_args(parser)\n args_parser = parser.add_argument_group()\n\n args_parser.add_argument(\n '--os',\n type=str,\n default='ubuntu',\n required=False,\n help='the type of the docker os, default as ubuntu')\n args_parser.add_argument(\n '--python_v',\n type=str,\n default='python',\n required=False,\n help='the python version of the docker, such as python-2.7.6')\n\n if hasattr(self, 'platform') is True:\n args_parser.add_argument(\n '--ai_arch_v',\n type=str,\n default=self.platform,\n required=False,\n help='AI architecture and specific version')\n else:\n args_parser.add_argument(\n '--ai_arch_v',\n type=str,\n default='tensorflow',\n required=False,\n help='AI architecture and specific version, such as tensorflow-1.1.0')\n\n #add other params in subclasses#\n\n def _parse_args(self):\n super(UaiServiceCheckBaseImgExistOp, self)._parse_args()\n self.os_version = self.params['os'] if 'os' in self.params else ''\n self.python_version = self.params['python_v'] if 'python_v' in self.params else ''\n self.ai_frame_version = self.params['ai_arch_v'] if 'ai_arch_v' in self.params else ''\n # add other params in subclasses#\n\n def _translate_pkg_to_id(self, pkgtype, pkglist):\n resultlist = []\n uai_logger.info(\"Start translate {0} package to their id, packages: {1}\".format(pkgtype, pkglist))\n for avpkg in self.rsp['PkgSet']:\n for pkg in pkglist:\n if pkgtype == 'os' or pkgtype == 'python_v' or pkgtype == 'ai_arch_v':\n versionsplit = pkg.rfind('-')\n if versionsplit >= 0:\n if avpkg[\"PkgName\"] == pkg[:versionsplit] and (\n avpkg[\"PkgVersion\"] == \"\" or avpkg[\"PkgVersion\"] == pkg[versionsplit + 1:]):\n pkglist.remove(pkg)\n resultlist.append(avpkg[\"PkgId\"])\n elif versionsplit < 0:\n if avpkg[\"PkgName\"] == pkg:\n pkglist.remove(pkg)\n resultlist.append(avpkg[\"PkgId\"])\n else:\n if avpkg[\"PkgName\"] == pkg:\n pkglist.remove(pkg)\n resultlist.append(avpkg[\"PkgId\"])\n\n if len(pkglist) != 0:\n uai_logger.error(\"Some {0} package is not supported: {1}\".format(pkgtype, pkglist))\n raise RuntimeError(\"Some {0} package is not supported: {1}\".format(pkgtype, pkglist))\n\n uai_logger.info(\"End translate {0} package to their id, result: {1}\".format(pkgtype, resultlist))\n return resultlist\n\n def cmd_run(self, params):\n super(UaiServiceCheckBaseImgExistOp, self).cmd_run(params)\n envOp = GetUAIAvailableEnvPkgOp(public_key=self.public_key,\n private_key=self.private_key,\n project_id=self.project_id,\n region=self.region,\n zone=self.zone)\n succ, self.rsp = envOp.call_api()\n if self.rsp[\"RetCode\"] != 0:\n uai_logger.error(\"Fail: [checkBase][getEnv] {0}\".format(self.rsp))\n raise RuntimeError(\"Fail: [checkBase][getEnv] {0}\".format(self.rsp))\n\n self.os_version = self._translate_pkg_to_id('os', self.params['os'].split(','))[0]\n self.python_version = self._translate_pkg_to_id('python_v', self.params['python_v'].split(','))[0]\n self.ai_frame_version = self._translate_pkg_to_id('ai_arch_v', self.params['ai_arch_v'].split(','))[0]\n\n checkOp = CheckUAIBaseImgExistOp(public_key=self.public_key,\n private_key=self.private_key,\n project_id=self.project_id,\n region=self.region,\n zone=self.zone,\n os_version=self.os_version,\n python_version=self.python_version,\n ai_frame_version=self.ai_frame_version)\n succ, rsp = checkOp.call_api()\n return succ, rsp\n # add other params in subclasses#","sub_path":"uai/operation/checkbase.py","file_name":"checkbase.py","file_ext":"py","file_size_in_byte":5057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"228419709","text":"from django.contrib import admin\nfrom .models import ContactForm\n\n__author__ = \"Aniruddha Ravi\"\n__license__ = \"MIT\"\n__version__ = \"1.0.3\"\n__email__ = \"aniruddha.ravi@gmail.com\"\n__status__ = \"Development\"\n\n\nclass ContactFormAdmin(admin.ModelAdmin):\n class Meta:\n model = ContactForm\n\nadmin.site.register(ContactForm, ContactFormAdmin)\n","sub_path":"mvp/contact/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"442844023","text":"##############################################################################\n# FILE: ex4.py\n# WRITER: Gregory Pasternak griffonn 327148417\n# EXERCISE : intro2cs ex4 2013-2014\n# Description\n# Some useful functions to estimate on-retirement expenses, to get the best\n# home one can afford from these expenses. Exercising binary search and bubble\n# sort algorhythms - very basic and very important algorhythms.\n##############################################################################\n\n\n# Unmagic numbers\nNAME, PRICE, OPPONENTS = 0, 1, 2\n\n# Comebacks from ex3:\n\n\ndef variable_pension(salary, save, growth_rates):\n \"\"\"A function that calculates the value of a retirement savings each year\n based on the worker's salary, savings, and a list of growth rates values.\n Number of working years is equal to the length of growth_rates.\n\n Args:\n salary: the amount of money you earn each year - non-negative float.\n save: the percent of your salary to save in the investment account\n each working year - non-negative float between 0 and 100\n growth_rates: a list of annual growth percentages in your investment\n account - a list of floats larger than or equal to -100. The\n length of the list defines the number of years you plan to work.\n\n Returns: a list whose values are the size of your retirement account at\n the end of each year.\n\n In case of bad input - values are out of range:\n returns None\n\n You can assume that the types of the input arguments are correct.\"\"\"\n\n # If no years provided, nothing to talk about\n if type(growth_rates) == list:\n if len(growth_rates) == 0:\n return []\n else:\n return None\n\n # Validate right values\n if (salary < 0) or (save > 100) or (save < 0) or \\\n any([float(x) < -100 for x in growth_rates]):\n return None\n\n # Create result list and first element in it - first year formula\n F = [salary * save * 0.01]\n\n # Every next year\n for x in range(len(growth_rates) - 1):\n F.append(F[x] *\n (1 + float(growth_rates[x+1]) * 0.01) + salary * save * 0.01)\n\n return F\n\n\ndef post_retirement(savings, growth_rates, expenses):\n \"\"\"A function that calculates the account status after retirement,\n assulog constant expenses and no income.\n\n Args:\n savings: the initial amount of money in your savings account -\n positive float.\n growth_rates: a list of annual growth percentages in your investment\n account - a list of floats larger than or equal to -100.\n expenses: the amount of money you plan to spend each year during\n retirement - non-negative float\n\n Returns: a list of your retirement account value at the end of each year.\n\n Note: in case of a negative balance - the growth rate will change into\n rate on the debt\n \n In case of bad input - values are out of range:\n returns None\n\n You can assume that the types of the input arguments are correct.\"\"\"\n # Validate right values\n if (savings < 1) or (expenses < 0) or \\\n any([float(x) < -100 for x in growth_rates]):\n return None\n\n # If you think you lived enough and plan to die on the last work day\n if len(growth_rates) < 1:\n return []\n\n # Create result list and first element in it\n F = [savings * (1 + growth_rates[0] * 0.01) - expenses]\n\n # Every next year\n for x in range(len(growth_rates) - 1):\n F.append(F[x] * (1 + growth_rates[x + 1] * 0.01) - expenses)\n\n return F\n\n\ndef binary_like(fn, total, growth_rates, e, exp_list=None):\n \"\"\"\n Returns:\n If provided exp_list, returns index of the nearest item to expenses.\n Otherwise, returns float - needed expenses under provided\n circumstances so last object of fn() resulting list will be positive.\n\n @param fn: function callable, here assumed it is always post_retirement.\n @param total: positive int\n @param growth_rates: list of floats\n @param e: epsilon\n @param exp_list: list of floats\n\n Assuming all arguments are valid.\n \"\"\"\n affordable = 0.0 # default if no result found\n lo = 0 # lower bound\n if exp_list:\n hi = len(exp_list) # upper bound\n while hi > lo: # something between the bounds\n needle = (hi + lo) // 2\n expenses = exp_list[needle] # middle between the bounds\n if fn(total, growth_rates, expenses)[-1] > 0:\n affordable = expenses\n lo = needle + 1 # raise lower bound\n elif fn(total, growth_rates, expenses)[-1] < 0:\n hi = needle # drop upper bound\n else:\n break # stop when 0 < exp < epsilon\n else:\n hi = fn(total, growth_rates, 0)[-1] # upper bound\n while hi > lo: # something between the bounds\n expenses = (hi + lo) / 2 # middle between the bounds\n if fn(total, growth_rates, expenses)[-1] > e:\n lo = expenses # raise lower bound\n elif fn(total, growth_rates, expenses)[-1] < 0:\n hi = expenses # drop upper bound\n else:\n affordable = expenses\n break # stop when 0 < expenses < epsilon\n return affordable\n\n\ndef live_like_a_king(salary, save, pre_retire_growth_rates,\n post_retire_growth_rates, epsilon):\n \"\"\"Find the maximal expenses you may spend during your lifetime \n\n A function that calculates what is the maximal annual expenses you may\n spend each year and not enter into debts. You may calculate it using\n binary search or using arithmetic.\n Specify in your README in which method you've implemented the function.\n\n Args:\n salary: the amount of money you make each year - non-negative float\n save: the percent of your salary to save in the investment account\n each working year - non-negative float between 0 and 100\n pre_retire_growth_rates: a list of annual growth percentages in your\n investment account - list of floats larger than or equal to -100\n post_retire_growth_rates: a list of annual growth percentages\n on investments while you are retired - a list of floats larger\n than or equal to -100. In case of empty list return None\n epsilon: an upper bound on the money must remain in the account\n on the last year of retirement - float larger than 0\n\n Returns: maximal expenses value you found (such that the amount of\n money left in your account will be positive but smaller than epsilon)\n\n In case of bad input - values are out of range:\n returns None\n\n You can assume that the types of the input arguments are correct.\"\"\"\n\n # Shortcuts\n pre = [x for x in pre_retire_growth_rates]\n post = [x for x in post_retire_growth_rates]\n\n # Validate\n if salary < 0 or not (0 <= save < 100) or epsilon <= 0 or \\\n any([float(x) < -100 for x in pre]) or \\\n any([float(x) < -100 for x in post]) or len(post) < 1:\n return None\n\n # Didn't work or didn't save?\n if salary == 0 or save == 0 or len(pre) == 0:\n return 0.0\n\n # Get total savings on pension day\n savings = 0\n for year in range(len(pre)):\n savings = savings * (1 + pre[year] * 0.01) + (salary * save * 0.01)\n\n # # Uncomment this to see binary search in action:\n # return binary_like(post_retirement, savings, post, epsilon)\n\n # Using math - real magic:\n \"\"\"Every year value after used expenses eq. \"last year\" minus expenses\n divided by coefficient, that depends on growth rate of that same year.\n The very last year is is total changed money divided by total changed\n coefficient. +1s are there in order not to loose already stored value.\"\"\"\n coefficient = 0\n for year in range(len(post)):\n savings *= (1 + post[year] * 0.01) # Change savings according to year\n coefficient = 1 + coefficient * (1 + post[year] * 0.01)\n # This is yearly coefficient that shows something\n\n return savings / coefficient\n\n\ndef bubble_sort_2nd_value(tuple_list):\n \"\"\"\n Sort a list of tuples using bubble sort algorithm\n\n Args:\n tuples_list - a list of tuples, where each tuple is composed of a\n string value and a float value - ('house_1',103.4)\n\n Returns: a NEW list that is sorted by the 2nd value of the tuple,\n the numerical one. The sorting direction should be from the lowest to the\n largest. Sort should be stable (if values are equal, use original order)\n\n You can assume that the input is correct.\"\"\"\n\n tl = tuple_list.copy() # do not work with provided container, they said.\n changed = 1 # should start at least once\n while changed:\n changed = 0\n for x in range(len(tl) - 1):\n if tl[x][1] > tl[x + 1][1]: # if next is smaller, change places\n tl[x], tl[x + 1] = tl[x + 1], tl[x]\n changed = 1 # and note that\n\n return tl\n\n\ndef choosing_retirement_home(savings, growth_rates, retirement_houses):\n \"\"\"Find the most expensive retirement house one can afford.\n\n Find the most expensive, but affordable, retirement house.\n Implement the function using binary search.\n\n Args:\n savings: the initial amount of money in your savings account - \n non-negative integer\n growth_rates: a list of annual growth percentages in your\n investment account - list of floats larger than or equal to -100\n retirement_houses: a list of tuples of retirement_houses, where the\n first value is a string - the name of the house and the second is\n the annual rent of it - non-negative float\n\n Returns: a string - the name of the chosen retirement house\n Returns None if can't afford any house.\n\n You need to test the legality of savings and growth_rates\n but you can assume legal retirement_house list \n You can assume that the types of the input are correct\"\"\"\n\n # Validate\n if len(retirement_houses) < 1 or len(growth_rates) < 1:\n return None\n if savings < 1 or any([float(x) < -100 for x in growth_rates]):\n return None\n\n # Sort houses by price\n retirement_houses = bubble_sort_2nd_value(retirement_houses)\n house_prices = [house[PRICE] for house in retirement_houses] # price list\n\n # Get biggest price one can spend without entering into debts\n affordable_price = binary_like(post_retirement, savings,\n growth_rates, None, house_prices)\n\n # If found such a house, return its name, otherwise None.\n return retirement_houses[house_prices.index(affordable_price)][NAME]\\\n if affordable_price else None\n\n\ndef get_value_key(value=0):\n \"\"\"Returns a function that calculates the new value of a house\n\n Args:\n value: the value added per opponent - float, default value is 0\n\n This function returns a function that accepts triple containing\n (house, annual rent, number of opponents) and returns the new value of\n this house - annual_rent+value*opponents\n\n You can assume that the input is correct.\"\"\"\n return lambda house: house[PRICE] + value * house[OPPONENTS]\n\n\ndef choose_retirement_home_opponents(budget, key, retirement_houses):\n \"\"\"Find the best retirement house that is affordable and fun\n\n A function that returns the best retiremnt house to live in such that:\n * the house is affordable and\n * its value (annual_rent+value*opponents) is the highest\n\n Args:\n annual_budget: the amount of money you can expand per year - positive\n float\n key: a function of the type returned by get_value_key\n retirement_houses: a list of houses (tuples), where the first value\n is a string - the name of the house, the second is the annual rent\n on it - non-negative float, and the third is the number of\n battleship opponents the home hosts - non-negative int\n \n Returns: the name of the retirement home which provides the best value and\n which is affordable.\n\n You need to test the legality of annual_budget,\n but you can assume legal retirement_house list \n You can assume that the types of the input are correct\"\"\"\n\n # Validate\n if len(retirement_houses) < 1:\n return None\n if budget < 1:\n return None\n\n maxfun, best_house = 0, None # initialize\n # Get biggest price one can spend without entering into debts\n for house in retirement_houses:\n if budget > house[PRICE]: # affordable\n fun = key(house)\n if fun > maxfun: # and more fun\n maxfun = fun\n best_house = house # so write it down\n return best_house[NAME] if best_house else None\n","sub_path":"ex4/ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":12824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"94923814","text":"# -*- coding: utf-8 -*-\n\nfrom django.db import transaction\nfrom django.db import connection\n\nfrom . import models\nimport reversion\n\n\nclass UserStoriesService(object):\n @transaction.atomic\n def bulk_insert(self, project, user, data, callback_on_success=None):\n items = filter(lambda s: len(s) > 0,\n map(lambda s: s.strip(), data.split(\"\\n\")))\n\n for item in items:\n obj = models.UserStory.objects.create(subject=item, project=project, owner=user,\n status=project.default_us_status)\n if callback_on_success:\n callback_on_success(obj, True)\n\n @transaction.atomic\n def bulk_update_order(self, project, user, data):\n cursor = connection.cursor()\n\n sql = \"\"\"\n prepare bulk_update_order as update userstories_userstory set \"order\" = $1\n where userstories_userstory.id = $2 and\n userstories_userstory.project_id = $3;\n \"\"\"\n\n cursor.execute(sql)\n for usid, usorder in data:\n cursor.execute(\"EXECUTE bulk_update_order (%s, %s, %s);\",\n (usorder, usid, project.id))\n cursor.close()\n","sub_path":"taiga/projects/userstories/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"135635285","text":"import configparser\nimport random\nimport string\nimport base64\n\nclass Salting():\n\n def read_salt_policy(self):\n # reads in password policy, returns variables as a list. Written by KW\n # password_policy = open('password_policy.txt', 'r')\n policy = configparser.ConfigParser()\n policy.read('password_policy.txt')\n salt_lowercase = policy.getint('Policy', 'salt_lowercase')\n salt_uppercase = policy.getint('Policy', 'salt_uppercase')\n salt_numbers = policy.getint('Policy', 'salt_numbers')\n\n return [int(salt_lowercase), int(salt_uppercase), int(salt_numbers)]\n\n\n def generate_salt(self):\n\n # reading through the password policy and looping through to extract necessary values to check and generates the password\n policy_checklist = self.read_salt_policy()\n max_length = sum(policy_checklist)\n\n # Generating the password using random and string modules\n salt = \"\"\n for character in range(max_length):\n\n random_character = random.randint(1,4)\n\n if random_character == 1:\n salt += random.choice(string.ascii_lowercase)\n elif random_character == 3:\n salt += random.choice(string.ascii_uppercase)\n else:\n salt += random.choice(string.digits)\n\n return salt\n\n def generate_base64_salt(self, salt):\n salt_64 = base64.b64encode(salt.encode())\n salt_final = salt_64.decode()\n #print(\"This is the final salt\" + salt_final)\n return salt_final\n","sub_path":"Account-Generator/hashing/salting.py","file_name":"salting.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"362868462","text":"from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom . import views\n\nurlpatterns = [\n # General links\n path('', views.index, name='homepage'),\n path('contact/', views.contact, name='contact'),\n path('index/', views.index, name='homepage'),\n\n # Course-based links\n path(\n 'course/create/',\n views.CoursesCreate.as_view(),\n name='course_create'),\n path(\n 'course//',\n views.course_detail,\n name='course_detail'\n ),\n path(\n 'course//update/',\n views.CoursesUpdate.as_view(),\n name='course_update'\n ),\n path(\n 'course//delete/',\n views.CoursesDelete.as_view(),\n name='course_delete'\n ),\n\n # JAKE - PATH for Project Detailview\n path(\n 'course//addproject/',\n views.ProjectCreate,\n name='ProjectCreate'\n ),\n\n # PREV:\n path(\n 'assignment//',\n views.assignment_detail,\n name='assignment_detail'\n ),\n path(\n 'assignment//update/',\n views.ProjectUpdate,\n name='ProjectUpdate'\n ),\n\n\n path(\n 'assignment//addsubmission/',\n views.model_form_upload,\n name='model_form_upload'\n ),\n path(\n '/viewsubmission/',\n views.submission_detail,\n name='submission_detail'\n ),\n path(\n 'deletesubmission/',\n views.SubmissionDelete.as_view(),\n name='submission_delete'\n ),\n path(\n 'downloadsubmission/',\n views.submission_download,\n name='submission_download'\n ),\n\n\n # Invite-based links\n path(\n 'course/invite/',\n views.create_invite,\n name='invite_create'\n ),\n path(\n 'invite//delete/',\n views.InviteDelete.as_view(),\n name='invite_delete'\n ),\n path(\n 'email/',\n views.email,\n name='email'\n ),\n\n #JAMES - Links for Help pages\n path(\n 'help/instructor/',\n views.instructor_help,\n name='instructor_help'\n ),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"autograder/personal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"533562354","text":"# -*- coding: utf-8 -*-\n'''\nJust a simple test routine for checking if the integration scheme works properly.\n\n'''\n\nimport unittest\nimport copy\nimport numpy as np\nimport scipy as sp\n\nimport amfe\n\n#%%\n\nclass DynamicalSystem():\n\n def __init__(self, K, M, f_ext):\n self.q = []\n self.t = []\n self.K_int = K\n self.M_int = M\n self.D_int = M*0\n self.f_ext = f_ext\n\n def S_and_res(self, q, dq, ddq, dt, t, beta, gamma):\n S = self.K_int + 1/(beta*dt**2)*self.M_int\n f_ext = self.f_ext(q, dq, t)\n res = self.M_int @ ddq + self.K_int @ q - f_ext\n return S, res, f_ext\n\n def K(self):\n return self.K_int\n\n def M(self):\n return self.M_int\n\n def D(self):\n return self.D_int\n\n def write_timestep(self, t, q):\n self.t.append(t)\n self.q.append(q)\n\n def clear_timesteps(self):\n pass\n\n\nclass IntegratorTest(unittest.TestCase):\n def setUp(self):\n c1 = 10\n c2 = 20\n c3 = 10\n c4 = 0\n K = np.array([[c1 + c2,-c2,0],\n [-c2 , c2 + c3, -c3],\n [0, -c3, c3 + c4]])\n\n M = np.diag([3,1,2])\n\n omega = 2*np.pi*1\n amplitude = 5\n def f_ext(q, dq, t):\n return np.array([0, 0., amplitude*np.cos(omega*t)])\n\n\n self.my_system = DynamicalSystem(K, M, f_ext)\n\n self.q_start = np.array([1, 0, 2.])*0\n self.dq_start = np.zeros_like(self.q_start)\n\n self.T = np.arange(0,5,0.05)\n\n def test_linear_vs_nonlinear_integrator(self):\n dt = 1E-3\n alpha = 0.1\n system1 = self.my_system\n system2 = copy.deepcopy(self.my_system)\n\n amfe.integrate_nonlinear_system(system1, self.q_start, self.dq_start,\n self.T, dt, alpha)\n\n amfe.integrate_linear_system(system2, self.q_start, self.dq_start,\n self.T, dt, alpha)\n\n q_nl = sp.array(system1.q)\n t_nl = sp.array(system1.t)\n q_lin = sp.array(system2.q)\n t_lin = sp.array(system2.t)\n np.testing.assert_allclose(t_nl, t_lin, atol=1E-10)\n # why does that work and below not?\n assert(np.any(np.abs(q_nl - q_lin) < 1E-3))\n # np.testing.assert_allclose(q_nl, q_lin, rtol=1E-1, atol=1E-4)\n return q_nl, q_lin, t_lin\n\nif __name__ == '__main__':\n my_integrator_test = IntegratorTest()\n my_integrator_test.setUp()\n q_nl, q_lin, t = my_integrator_test.test_linear_vs_nonlinear_integrator()\n from matplotlib import pyplot\n pyplot.plot(t, q_nl)\n pyplot.plot(t, q_lin)\n\n\n #%%","sub_path":"tests/test_integrator.py","file_name":"test_integrator.py","file_ext":"py","file_size_in_byte":2627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"21933125","text":"import threading as thr\nimport time\nimport random\n\n\nclass Jorry(thr.Thread):\n def __init__(self, x, y):\n # Siempre debos inicializar el super cuando heredamos\n super().__init__()\n self.x = x\n self.y = y\n\n def mover(self):\n mov = int(input(\"Ingresa la cantidad de casillas: \"))\n self.x += mov\n self.y += mov\n print(f'Jorry se movió a {self.x}, {self.y}\\n')\n\n def run(self):\n for i in range(1):\n self.mover()\n time.sleep(1)\n\n\nclass MrMeeseeks(thr.Thread):\n def __init__(self, numero, x, y):\n super().__init__()\n self.x = x\n self.y = y\n self.numero = numero\n self.daemon = True # esto hace que sea un daemon_thread\n\n def mover(self):\n mov = random.randint(1, 2)\n self.x += mov\n self.y += mov\n print(f'El Mr Meeseeks{self.numero} se movió a {self.x}, {self.y}\\n')\n\n def run(self):\n for i in range(3):\n self.mover()\n time.sleep(1.5)\n\n\nif __name__ == \"__main__\":\n meeseeks1 = MrMeeseeks(\"1\", 0, 0)\n meeseeks2 = MrMeeseeks(\"2\", 0, 0)\n jorry = Jorry(0, 0)\n\n jorry.start()\n meeseeks1.start()\n meeseeks2.start()\n","sub_path":"Syllabus/Ayudantias/AY03/sol_daemons.py","file_name":"sol_daemons.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"447948842","text":"\"\"\"\npython extract_frame.py \n\"\"\"\nimport glob, sys, os\nimport numpy as np\nrng = np.random.RandomState(65537)\nstyles = [\"starrynight\", \"lamuse\", \"feathers\", \"composition\", \"candy\", \"udnie\", \"mosaic\"]\ncontents = ['sintel_ambush_1', 'sintel_bamboo_3', 'sintel_market_1', 'sintel_mountain_2', 'sintel_PERTURBED_shaman_1', 'sintel_tiger', 'sintel_temple_1', 'sintel_wall', 'sintel_cave_3', 'sintel_market_4', 'davis_man-bike', 'davis_slackline', 'davis_cats-car', 'davis_girl-dog', 'davis_helicopter', 'davis_guitar-violin', 'davis_subway', 'davis_gym', 'davis_horsejump-stick', 'davis_tandem']\n\nrng_dic = {c:{} for c in contents}\nfor c in contents:\n rng_dic[c] = {s:rng.randint(0, 1000000) for s in styles}\n\ndef find_rng(name):\n for c in contents:\n if c in name: break\n for s in styles:\n if s in name: break\n return rng_dic[c][s]\n\nin_dir, out_dir = sys.argv[1:]\nfiles = glob.glob(in_dir + \"/*.mp4\")\nfiles.sort()\n# temp dir\nos.system(\"mkdir temp\")\nbasecmd = \"ffmpeg -loglevel panic -i %s -qscale:v 2 temp/%s\"\nos.system(\"rm %s/*.jpg\" % out_dir)\nfor f in files:\n os.system(\"rm temp/*.jpg\")\n ind = f.rfind(\"/\")\n name = f[ind+1:]\n video_name = name.replace(\".mp4\", \"\")\n cmd = basecmd % (f, video_name)\n cmd = cmd + \"_%05d.jpg\"\n print(cmd)\n os.system(cmd)\n num = len(os.listdir(\"temp\"))\n ind = find_rng(name) % num\n print(\"mv temp/%s_%05d.jpg %s/\" % (video_name, ind, out_dir))\n os.system(\"mv temp/%s_%05d.jpg %s/\" % (video_name, ind, out_dir))\n","sub_path":"extract_frame.py","file_name":"extract_frame.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"309786625","text":"from urllib.request import urlopen as http_req\nfrom urllib.request import Request\nfrom bs4 import BeautifulSoup as Soup\nfrom urllib import parse\nimport time\nimport sys\n\n\ndef get_soup(search_term, start):\n search_term = parse.quote_plus(search_term)\n url = f'https://www.google.dk/search?q={search_term}&start={start}'\n\n req = Request(url, data=None, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'})\n\n u_client = http_req(req)\n page_html = u_client.read()\n u_client.close()\n page_soup = Soup(page_html, \"html.parser\")\n results = page_soup.find(\"body\")\n return results\n\n\ndef check_anchor(href, site, start):\n if href.startswith(site):\n if start == 0:\n start = 1\n else:\n start = 1 + int((start / 10))\n print(f\"Search term found on page: {start}\")\n sys.exit()\n\n\ndef start_scraping(search_term, start, site):\n soup = get_soup(search_term, start)\n\n results = soup.findAll(\"div\", {\"class\": 'g'})\n\n for result in results:\n anchor = None\n if result is not None:\n anchor = result.find(\"div\")\n if anchor is not None:\n anchor = anchor.find(\"div\", {\"class\": 'rc'})\n if anchor is not None:\n anchor = anchor.find(\"div\", {\"class\": 'r'})\n if anchor is not None:\n anchor = anchor.find(\"a\")\n if anchor is not None:\n if anchor['href'] is not None:\n check_anchor(anchor['href'], site, start)\n\n if len(results) == 0:\n sys.exit(\"Your search was not found\")\n\n if len(results) != 0:\n print(\"Sleeping 15 seconds\")\n time.sleep(15)\n print(f\"Start: {start}\")\n start_scraping(search_term, start + 10, site)\n","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"582520573","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\nGranny-Clanger:~ stichbury$ /Users/stichbury/anaconda/envs/py3/bin/spyder ; exit;\n\n\nThis is a temporary script file.\n\"\"\"\n\n# Imports\nimport pandas as pd\nimport deutils as de\nimport numpy as np\n\n# weights\nSparta_Core = 'WghtUniversal_Core'\n\n# Read DE15 data\nde15 = de.read_sparta_survey(6)\nde15_meta = de.meta(6)\n\n# Read DE14 data\nde14 = de.read_sparta_survey(5)\nde14_meta = de.meta(5)\n\n# Read DE13 data\nde13 = de.read_sparta_survey(4)\nde13_meta = de.meta(4)\n\n\n#GAM1: What types of platform do you develop games for?\ngam1 = de.dist(de15, de15_meta, 'GAM1', 'WghtUniversal_Game')\ngam1pc = de.calc_pct(gam1)\ngam1pc.to_clipboard()\n\n#GAM1\ngam1_14 = de.dist(de14, de14_meta, 'GAM1', 'WghtUniversal_Game')\ngam1_14pc = de.calc_pct(gam1_14)\ngam1_14pc.to_clipboard()\n\n#GAM1\ngam1_13 = de.dist(de13, de13_meta, 'GAM1', 'WghtUniversal_Game')\ngam1_13pc = de.calc_pct(gam1_13)\ngam1_13pc.to_clipboard()\n\n#GAM2: Which consoles do you target with your games?\ngam2 = de.dist(de15, de15_meta, 'GAM2', 'WghtUniversal_Game')\ngam2pc = de.calc_pct(gam2)\ngam2pc.to_clipboard()\n\n#GAM1: Professional Devs - What types of platform do you develop games for?\nde15['GAMES'] = de15[['CR2_8_1']].any(axis=1).astype(float).replace(0,np.nan)\npro_game_dev=de15.GAMES==1\n\ngam1PRO = de.dist(de15[pro_game_dev], de15_meta, 'GAM1', 'WghtUniversal_Game')\ngam1PROpc = de.calc_pct(gam1PRO)\ngam1PROpc.to_clipboard()\n\nde14['GAMES'] = de14[['CR2_8_1']].any(axis=1).astype(float).replace(0,np.nan)\npro_game_dev=de14.GAMES==1\n\ngam1PRO_14 = de.dist(de14[pro_game_dev], de14_meta, 'GAM1', 'WghtUniversal_Game')\ngam1PRO_14pc = de.calc_pct(gam1PRO_14)\ngam1PRO_14pc.to_clipboard()\n\nde13['GAMES'] = de13[['CR2_8_1']].any(axis=1).astype(float).replace(0,np.nan)\npro_game_dev=de13.GAMES==1\n\ngam1PRO_13 = de.dist(de13[pro_game_dev], de13_meta, 'GAM1', 'WghtUniversal_Game')\ngam1PRO_13pc = de.calc_pct(gam1PRO_13)\ngam1PRO_13pc.to_clipboard()\n\n#GAM2: Which consoles do you target with your games?\ngam2 = de.dist(de15, de15_meta, 'GAM2', 'WghtUniversal_Game')\ngam2pc = de.calc_pct(gam2)\ngam2pc.to_clipboard()\n\n\n#GAM2\ngam2_14 = de.dist(de14, de14_meta, 'GAM2', 'WghtUniversal_Game')\ngam2_14pc = de.calc_pct(gam2_14)\ngam2_14pc.to_clipboard()\n\n#GAM2\ngam2_13 = de.dist(de13, de13_meta, 'GAM2', 'WghtUniversal_Game')\ngam2_13pc = de.calc_pct(gam2_13)\ngam2_13pc.to_clipboard()\n\n#GAM3: Which Smart TVs or set-top boxes do you target with your games?\ngam3 = de.dist(de15, de15_meta, 'GAM3', 'WghtUniversal_Game')\ngam3pc = de.calc_pct(gam3)\ngam3pc.to_clipboard()\n\n#GAM3\ngam3_14 = de.dist(de14, de14_meta, 'GAM3', 'WghtUniversal_Game')\ngam3_14pc = de.calc_pct(gam3_14)\ngam3_14pc.to_clipboard()\n\n#GAM3\ngam3_13 = de.dist(de13, de13_meta, 'GAM3', 'WghtUniversal_Game')\ngam3_13pc = de.calc_pct(gam3_13)\ngam3_13pc.to_clipboard()\n\n#GAM5: Which programming languages do you use to write code that runs on the client side for your games?\ngam5 = de.dist(de15, de15_meta, 'GAM5', 'WghtUniversal_Game')\ngam5pc = de.calc_pct(gam5)\ngam5pc.to_clipboard()\n\n#GAM5\ngam5_14 = de.dist(de14, de14_meta, 'GAM5', 'WghtUniversal_Game')\ngam5_14pc = de.calc_pct(gam5_14)\ngam5_14pc.to_clipboard()\n\n#GAM5\ngam5_13 = de.dist(de13, de13_meta, 'GAM5', 'WghtUniversal_Game')\ngam5_13pc = de.calc_pct(gam5_13)\ngam5_13pc.to_clipboard()\n\n\n#GAM6 = \"Why are you working on games?\"\ngam6 = de.dist(de15, de15_meta, 'GAM6', 'WghtUniversal_Game')\ngam6pc = de.calc_pct(gam6)\ngam6pc.to_clipboard()\n\n# Now filter on pros, hobbyists and students\nde15['GAMES'] = de15[['CR2_8_3']].any(axis=1).astype(float).replace(0,np.nan)\nGame_dev=de15.GAMES==1\n\ngam6f = de.dist(de15[Game_dev],de15_meta,'GAM6','WghtUniversal_Game')\ngam6f_pc=de.calc_pct(gam6f)\ngam6f_pc.to_clipboard()\n\n#GAM7 = \"How do you monetise your games?\"\ngam7 = de.dist(de15, de15_meta, 'GAM7', 'WghtUniversal_Game')\ngam7pc = de.calc_pct(gam7)\ngam7pc.to_clipboard()\n\n# Filter out anything that is \"Other, don't know, or we don't want to make money\"\nGAM7_14_excl=de15.GAM7_14.isnull() & de15.GAM7_15.isnull()& de15.GAM7_16.isnull() & de15.GAM7_17.isnull()\n\n#Now I am applying the filter to my DE15 data\ngam7f=de.dist(de15[GAM7_14_excl],de15_meta,'GAM7','WghtUniversal_Game')\ngam7fpc = de.calc_pct(gam7f)\ngam7fpc.to_clipboard()\n\n#GAM8 = \"What's your monthly revenue?\"\ngam8 = de.dist(de15[de15.GAM8<10], de15_meta, 'GAM8', 'WghtUniversal_Game')\ngam8pc=de.calc_pct(gam8)\ngam8pc.to_clipboard()\n\n#GAM8 = \"What's your monthly revenue?\"\ngam8_14 = de.dist(de15[de15.GAM8<10], de15_meta, 'GAM8', 'WghtUniversal_Game')\ngam8_14pc=de.calc_pct(gam8_14)\ngam8_14pc.to_clipboard()\n\n#GAM8 = \"What's your monthly revenue?\"\ngam8_13 = de.dist(de15[de15.GAM8<10], de15_meta, 'GAM8', 'WghtUniversal_Game')\ngam8_13pc=de.calc_pct(gam8_13)\ngam8_13pc.to_clipboard()\n\n\n#GAM_PA \"What technologies?\"\n\ngam_pa = de.dist(de15, de15_meta, 'GAM_PA', 'WghtUniversal_Game')\ngam_pa_pc=de.calc_pct(gam_pa)\ngam_pa_pc.to_clipboard()\n\n#Filter GAM_PA by dev type\nde15['GAMES'] = de15[['CR2_8_3']].any(axis=1).astype(float).replace(0,np.nan)\nGame_dev=de15.GAMES==1\n\ngam_pa_f = de.dist(de15[Game_dev], de15_meta, 'GAM_PA', 'WghtUniversal_Game')\ngam_pa_f_pc=de.calc_pct(gam_pa_f)\ngam_pa_f_pc.to_clipboard()\n\n\n\n#CR2\ncr2 = de.dist(de15, de15_meta, 'CR2', 'WghtUniversal_Core')\ncr2_pc = de.calc_pct(cr2)\ncr2_pc.to_clipboard()\n\n# CR6: experience levels\ncr6 = de.dist(de15, de15_meta, 'CR6', 'WghtUniversal_Core')\ncr6_pc = de.calc_pct(cr6, pct_type='row')\ncr6_pc.to_clipboard()\n\n# Filter by games pros\n#de15['GAMES'] = de15[['CR2_8_1', 'CR2_8_2', 'CR2_8_3']].any(axis=1).astype(float).replace(0,np.nan)\nde15['GAMES'] = de15[['CR2_8_1']].any(axis=1).astype(float).replace(0,np.nan)\nGame_dev=de15.GAMES==1\n\ncr6games = de.dist(de15[Game_dev],de15_meta,'CR6','WghtUniversal_Core')\ncr6g_pc=de.calc_pct(cr6games,pct_type='row')\ncr6g_pc.to_clipboard()\n\n\n\n#Look at age levels\n# whole dev population\ncr_dev2 = de.dist(de15, de15_meta, 'CR_DEV2', 'WghtUniversal_Core')\ncr_dev2_pc = de.calc_pct(cr_dev2)\ncr_dev2_pc.to_clipboard()\n\n#Just game devs (filter)\n\nde15['GAMES'] = de15[['CR2_8_1', 'CR2_8_2', 'CR2_8_3']].any(axis=1).astype(float).replace(0,np.nan)\nGame_dev=de15.GAMES==1\n\ncr_dev2_games = de.dist(de15[Game_dev],de15_meta,'CR_DEV2','WghtUniversal_Core')\ncr_dev2_games_pc=de.calc_pct(cr_dev2_games)\ncr_dev2_games_pc.to_clipboard()\n\n#CR3: organisation sizes\ncr3 = de.dist(de15, de15_meta, 'CR3', 'WghtUniversal_Core')\ncr3_pc = de.calc_pct(cr3)\ncr3_pc.to_clipboard()\n\n#Just game devs (filter)\n\nde15['GAMES'] = de15[['CR2_8_1', 'CR2_8_2', 'CR2_8_3']].any(axis=1).astype(float).replace(0,np.nan)\nGame_dev=de15.GAMES==1\n\ncr3g = de.dist(de15[Game_dev],de15_meta,'CR3','WghtUniversal_Core')\ncr3g_pc=de.calc_pct(cr3g)\ncr3g_pc.to_clipboard()\n\n#Just pro game devs (filter)\n\nde15['GAMES'] = de15[['CR2_8_1']].any(axis=1).astype(float).replace(0,np.nan)\nGame_dev=de15.GAMES==1\n\ncr3gp = de.dist(de15[Game_dev],de15_meta,'CR3','WghtUniversal_Core')\ncr3gp_pc=de.calc_pct(cr3gp)\ncr3gp_pc.to_clipboard()\n\n\n","sub_path":"scripts/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":6927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"423566631","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"contacts/\", views.contacts, name=\"contact\"),\n path(\"contacts/edit/\", views.edit, name=\"edit\"),\n path(\"contacts/delete/\", views.delete, name=\"delete\"),\n\n]","sub_path":"contactBookApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"501997126","text":"def move(message, n_letters):\n split_l = message[:n_letters]\n split_r = message[n_letters:]\n return split_r + split_l\n\ndef insert(message, index, value):\n split_l = message[:index]\n split_r = message[index:]\n return split_l + value + split_r\n\ndef changeall(message, substring, replacement):\n return message.replace(substring, replacement)\n\n\ndef enigma():\n message = input()\n\n data = input()\n while not data == \"Decode\":\n data = data.split('|')\n commad = data[0]\n\n if commad == \"Move\":\n n_letters = int(data[1])\n message = move(message, n_letters)\n\n elif commad == \"Insert\":\n index = int(data[1])\n value = data[2]\n message = insert(message, index, value)\n\n elif commad == \"ChangeAll\":\n substring = data[1]\n replacement = data[2]\n message = changeall(message, substring, replacement)\n\n data = input()\n return message\n\nprint(f\"The decrypted message is: {enigma()}\")","sub_path":"python-dev-EXAMS/programming-fundamentals/final-exam/pf_final_ex_15_aug_2020/01_the_imitation_game.py","file_name":"01_the_imitation_game.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"401018117","text":"def cipher(word):\n wordCiphered = ''\n for i in range(len(word)):\n if word[i].islower():\n wordCiphered = wordCiphered + chr(219-ord(word[i]))\n else:\n wordCiphered = wordCiphered + word[i]\n return wordCiphered\n\ndef decipher(word):\n wordDeciphered = ''\n for i in range(len(word)):\n if word[i].islower():\n wordDeciphered = wordDeciphered + chr(219-ord(word[i]))\n else:\n wordDeciphered = wordDeciphered + word[i]\n return wordDeciphered\n\nprint(cipher('KomachiLabNlpKnock'))\nprint(decipher('KlnzxsrLzyNokKmlxp'))","sub_path":"Mana/chapter01/knock08.py","file_name":"knock08.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"551412728","text":"import cv2\r\nimport numpy as np\r\nimport os\r\nimport sys\r\nimport glob\r\nimport tensorflow as tf\r\nfrom matplotlib import pyplot\r\n\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nEPOCHS = 10\r\nIMG_WIDTH = 30\r\nIMG_HEIGHT = 30\r\nNUM_CATEGORIES = 43\r\nTEST_SIZE = 0.3\r\n\r\n\r\ndef main():\r\n\r\n # Check command-line arguments\r\n if len(sys.argv) not in [2, 3]:\r\n sys.exit(\"Usage: python traffic.py data_directory [model.h5]\")\r\n\r\n # Get image arrays and labels for all image files\r\n images, labels = load_data(sys.argv[1])\r\n\r\n # Split data into training and testing sets\r\n labels = tf.keras.utils.to_categorical(labels)\r\n x_train, x_test, y_train, y_test = train_test_split(\r\n np.array(images), np.array(labels), test_size=TEST_SIZE\r\n )\r\n\r\n # Get a compiled neural network\r\n model = get_model()\r\n\r\n # Fit model on training data\r\n history = model.fit(x_train, y_train, epochs=EPOCHS)\r\n\r\n # Evaluate neural network performance\r\n train_acc = model.evaluate(x_train, y_train, verbose=2)\r\n test_acc = model.evaluate(x_test, y_test, verbose=2) \r\n \r\n # Save model to file\r\n if len(sys.argv) == 3:\r\n filename = sys.argv[2]\r\n model.save(filename)\r\n print(f\"Model saved to {filename}.\")\r\n \r\n # plot loss during training\r\n pyplot.subplot(211)\r\n pyplot.title('Loss')\r\n pyplot.plot(history.history['loss'], label='train')\r\n pyplot.legend()\r\n # plot accuracy during training\r\n pyplot.subplot(212)\r\n pyplot.title('Accuracy')\r\n pyplot.plot(history.history['accuracy'], label='train')\r\n pyplot.tight_layout()\r\n pyplot.legend()\r\n pyplot.show()\r\n\r\n\r\ndef load_data(data_dir):\r\n \"\"\"\r\n Load image data from directory `data_dir`.\r\n\r\n Assume `data_dir` has one directory named after each category, numbered\r\n 0 through NUM_CATEGORIES - 1. Inside each category directory will be some\r\n number of image files.\r\n\r\n Return tuple `(images, labels)`. `images` should be a list of all\r\n of the images in the data directory, where each image is formatted as a\r\n numpy ndarray with dimensions IMG_WIDTH x IMG_HEIGHT x 3. `labels` should\r\n be a list of integer labels, representing the categories for each of the\r\n corresponding `images`.\r\n \"\"\"\r\n #Initiate lists that will be returned as tuple\r\n images=[]\r\n labels=[]\r\n \r\n #Go in directoty and find all images within the directory\r\n #loop over all directories:\r\n for i in range(NUM_CATEGORIES-1):\r\n #add the directory to data-dir and get all the files:\r\n wildcard = os.path.join(data_dir, str(i), \"*\")\r\n files = glob.glob(wildcard) \r\n #loop over each file \r\n for j in files:\r\n #use cv2 to read image as np.ndarray with RGB colours (default)\r\n img = cv2.imread(j)\r\n \r\n #check size\r\n set_size=(IMG_WIDTH,IMG_HEIGHT)\r\n img2=cv2.resize(img,set_size)\r\n\r\n #append label (set to i), and image img2 \r\n labels.append(i)\r\n images.append(img2)\r\n \r\n #return tuple(images,labels) \r\n return images,labels\r\n\r\ndef get_model():\r\n \"\"\"\r\n Returns a compiled convolutional neural network model. Assume that the\r\n `input_shape` of the first layer is `(IMG_WIDTH, IMG_HEIGHT, 3)`.\r\n The output layer should have `NUM_CATEGORIES` units, one for each category.\r\n \"\"\"\r\n model = tf.keras.models.Sequential([\r\n\r\n # Convolutional layer. Learn 32 filters using a 3x3 kernel\r\n tf.keras.layers.Conv2D(\r\n 60, (5, 5), activation=\"relu\", input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)),\r\n \r\n #tf.keras.layers.Conv2D(\r\n # 60, (3, 3), activation=\"relu\"),\r\n\r\n # Max-pooling layer, using 2x2 pool size\r\n tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),\r\n\r\n # Flatten units\r\n tf.keras.layers.Flatten(), \r\n\r\n # Add a hidden layer with dropout\r\n tf.keras.layers.Dense(NUM_CATEGORIES*10, activation=\"relu\"),\r\n tf.keras.layers.Dropout(0.25), \r\n \r\n # Add a hidden layer with dropout\r\n tf.keras.layers.Dense(NUM_CATEGORIES*10, activation=\"relu\"),\r\n tf.keras.layers.Dropout(0.25), \r\n\r\n # Add an output layer with output units for all num categories\r\n tf.keras.layers.Dense(NUM_CATEGORIES-1, activation=\"sigmoid\")\r\n ])\r\n \r\n # Train neural network\r\n opti=tf.keras.optimizers.RMSprop(learning_rate=4e-4) \r\n model.compile(optimizer=opti, loss=\"categorical_crossentropy\", metrics=[\"accuracy\"])\r\n\r\n return model\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Week5_NeuralNetworks/traffic/traffic.py","file_name":"traffic.py","file_ext":"py","file_size_in_byte":4556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"266795291","text":"#!/usr/bin/python\n\n#This script takes a string and returns all permutations of it. \n#if you use perms.perms(\"apple\") you will get all permutations of Apple\n#in order to get all dictionary words, you will need to use perms.getRealWords(yourString)\n#Note that this is very processor intensive, so it is best to stick to 16 or fewer characters. \n#aeiousadfe\n\nimport itertools\n\n\ndef getPerms(somestring, length):\n return itertools.permutations(somestring, length)\n \ndef permsToList(allperms):\n listOfPerms=set()\n while allperms.next:\n try:\n tempstring=\"\"\n nextHolder=allperms.next()\n for charPOS in range(len(nextHolder)):\n tempstring=tempstring+nextHolder[charPOS]\n listOfPerms.add(tempstring)\n except StopIteration:\n break\n return listOfPerms\n\ndef perms(aString):\n allperms=[]\n for length in range(len(aString)+1):\n allperms.append(permsToList(getPerms(aString, length)))\n return allperms\n \ndef getWordList():\n words=set()\n wordFile=open('/home/jlmarks/words.txt','r')\n for word in wordFile:\n words.add(word[:-1])\n wordFile.close()\n return words\n \ndef getRealWords(aString):\n actualWords=set()\n words=getWordList()\n allPerms=perms(aString)\n for wordLength in range(1, len(allPerms)):\n actualWords=actualWords.union(words&allPerms[wordLength])\n return actualWords\n\ndef example():\n a=getRealWords(\"aeeiouysadtrghb\")\n","sub_path":"scripts/python/anagrams/perms.py","file_name":"perms.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"227797584","text":"# coding:utf-8\n\nimport csv\n\n\ndef read_attrs(path):\n file = open(path, 'r')\n reader = csv.reader(file)\n pulsars = open('pulsars.csv', 'w', newline='')\n writer = csv.writer(pulsars)\n for i, line in enumerate(reader):\n data = list()\n data.append(i+1)\n print(line[0])\n data.append(line[0])\n data.append((line[1]))\n data.append((line[2]))\n data.append(line[3])\n data.append(line[4])\n print(len(data))\n # if line[3] != '*':\n # data.append(float(line[3]))\n # else:\n # data.append(line[3])\n writer.writerow(data)\n pulsars.close()\n file.close()\n\n\n# Looking for missing values in ra\ndef missing_value(path):\n file = open(path, 'r')\n reader = csv.reader(file)\n num = list()\n for line in reader:\n num.append(int(float(line[1])//1))\n\n start = num[0]\n missing = list()\n for i in range(len(num)):\n if num[i] - start > 1:\n for j in range(start+1, num[i]):\n missing.append(j)\n start = num[i]\n print(missing)\n print(len(missing))\n\nif __name__ == '__main__':\n path = 'G:\\\\WeChatFile\\\\WeChat Files\\\\he124573325\\\\Files\\\\脉冲星候选体匹配程序\\\\脉冲星候选体匹配程序\\\\Matching\\\\ATNF.csv'\n read_attrs(path)\n # missing_value(path)\n","sub_path":"read_known_pulsars.py","file_name":"read_known_pulsars.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"145848224","text":"import unittest\nfrom Domain.Discipline import Discipline\nfrom Domain.Exceptions import DisciplineException\nfrom Repository.DisciplineRepository import DisciplineRepository\nfrom Domain.Student import Student\nfrom Domain.Exceptions import StudentException\nfrom Repository.StudentRepository import StudentRepository\n\nclass DisciplineRepositoryTestCase(unittest.TestCase):\n '''\n unit test for DisciplineRepository\n '''\n def setUp(self):\n self.repo = DisciplineRepository()\n \n d1 = Discipline(\"maths\", \"Andrea\")\n d2 = Discipline(\"physics\", \"Columban\")\n \n self.repo.add(d1)\n self.repo.add(d2)\n \n def testAdd(self):\n d = Discipline(\"chemistry\", \"Baiazid\")\n self.repo.add(d)\n self.assertEqual(len(self.repo), 3)\n \n self.assertRaises(DisciplineException, self.repo.add, d)\n \n def testFindByName(self):\n d = self.repo.findByName(\"maths\")\n self.assertEqual(d, Discipline(\"maths\", \"Andrea\"))\n\n d = self.repo.findByName(\"js\")\n self.assertEqual(d, None)\n self.assertTrue(d == None)\n\n def testUpdate(self):\n upD = Discipline(\"physics\", \"Huber\")\n self.repo.update(\"physics\",\"Huber\")\n d = self.repo.findByName(\"physics\")\n self.assertEqual(d, upD)\n\n def testRemove(self):\n self.repo.remove(\"maths\")\n self.assertEqual(len(self.repo), 1)\n\n self.assertRaises(DisciplineException, self.repo.remove, \"chemistry\")\n \nclass StudentRepositoryTestCase(unittest.TestCase):\n '''\n unit test for StudentRepository\n '''\n def setUp(self):\n self.repo = StudentRepository()\n \n s1 = Student(1, \"1\")\n s2 = Student(2, \"2\")\n \n self.repo.add(s1)\n self.repo.add(s2)\n \n def testAdd(self):\n s = Student(3, \"3\")\n self.repo.add(s)\n self.assertEqual(len(self.repo), 3)\n \n self.assertRaises(StudentException, self.repo.add, s)\n \n def testFindByID(self):\n s = self.repo.findByID(1)\n self.assertEqual(s, Student(1, \"1\"))\n\n s = self.repo.findByID(169)\n self.assertEqual(s, None)\n self.assertTrue(s == None)\n\n def testUpdate(self):\n upS = Student(2, \"8\")\n self.repo.update(2,\"8\")\n s = self.repo.findByID(2)\n self.assertEqual(s, upS)\n\n def testRemove(self):\n self.repo.remove(1)\n self.assertEqual(len(self.repo), 1)\n\n self.assertRaises(StudentException, self.repo.remove, 1)\n\nif __name__ == '__main__':\n unittest.main(verbosity=2)\n\n","sub_path":"Tests/RepositoryTest.py","file_name":"RepositoryTest.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"395613266","text":"#!/usr/bin/python\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'community'\n}\n\nDOCUMENTATION = '''\n---\nmodule: loginmsg\n\nshort_description: This is my test module\n\nversion_added: \"2.4\"\n\ndescription:\n - \"This is my longer description explaining my test module\"\n\noptions:\n name:\n description:\n - This is the message to send to the test module\n required: true\n new:\n description:\n - Control to demo if the result of this module is changed or not\n required: false\n\nauthor:\n - Your Name (@Jeroenvdl)\n'''\n\nEXAMPLES = '''\n---\n- name: set up login messages\n  hosts: all\n  collections:\n   - .loginmsg\n  become: yes\n  tasks:\n  - name: put a logon message before logging in\n    loginmsg:\n      text: Hello, you are entering a Hackathon Machine!\n      when: before\n      fqdn: true\n      state: present\n \n  - name: put a logon message after logging in\n    loginmsg:\n      text: Hi there, welcome in a Hackathon Machine!\n      when: after\n      fqdn: true\n'''\n\nRETURN = '''\noriginal_message:\n description: The original name param that was passed in\n type: str\n returned: always\nmessage:\n description: The output message that the test module generates\n type: str\n returned: always\n'''\n\nimport socket\nimport os\nfrom ansible.module_utils.basic import AnsibleModule\n\ndef run_module():\n # define available arguments/parameters a user can pass to the module\n module_args = dict(\n text=dict(type='str', required=True),\n when=dict(type='str', required=True, choices=['before', 'after']),\n fqdn=dict(type='bool', required=False, default=False),\n state=dict(type='str', required=False, default='present', choices=['absent', 'present'])\n\n )\n\n # seed the result dict in the object\n # we primarily care about changed and state\n # change is if this module effectively modified the target\n # state will include any data that you want your module to pass back\n # for consumption, for example, in a subsequent task\n result = dict(\n changed=False\n )\n\n # the AnsibleModule object will be our abstraction working with Ansible\n # this includes instantiation, a couple of common attr would be the\n # args/params passed to the execution, as well as if the module\n # supports check mode\n module = AnsibleModule(\n argument_spec=module_args,\n supports_check_mode=True\n ) \n\n # if the user is working with this module in only check mode we do not\n # want to make any changes to the environment, just return the current\n # state with no modifications\n if module.params['when'] == \"before\":\n filename = '/etc/issue'\n elif module.params['when'] == \"after\":\n filename = '/etc/motd'\n else:\n module.exit_json(**result)\n\n if module.params['state'] == 'present' and module.params['fqdn'] == True:\n with open(filename, 'w') as file_object:\n message = module.params['text'] + \" \" + socket.getfqdn()\n file_object.write(message)\n \n result['changed'] = True\n else:\n with open(filename, 'w') as file_object:\n message = module.params['text']\n file_object.write(message)\n \n result['changed'] = True\n \n if module.params['state'] == 'absent':\n try:\n with open(filename) as file_object:\n pass\n os.remove(filename)\n result['changed'] = True\n except FileNotFoundError:\n print(filename + \"Not Found.\")\n result['changed'] = False\n\n # in the event of a successful module execution, you will want to\n # simple AnsibleModule.exit_json(), passing the key/value results\n module.exit_json(**result)\n\ndef main():\n run_module()\n\nif __name__ == '__main__':\n main()","sub_path":"plugins/modules/loginmsg.py","file_name":"loginmsg.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"646945333","text":"import os\nimport logging\nfrom common.get_path import BASIC_PATH\nfrom common.get_config import config_data\n\n\ndef login_info(name, level, log_name, lf_level, ls_level):\n log = logging.getLogger(name)\n log.setLevel(level)\n\n formats = \"%(asctime)s - [%(filename)s-->line:%(lineno)d] - %(levelname)s: %(message)s\"\n log_format = logging.Formatter(formats)\n\n lf = logging.FileHandler(os.path.join(BASIC_PATH, log_name), encoding=\"utf-8\")\n lf.setLevel(lf_level)\n log.addHandler(lf)\n lf.setFormatter(log_format)\n\n ls = logging.StreamHandler()\n ls.setLevel(ls_level)\n log.addHandler(ls)\n ls.setFormatter(log_format)\n\n return log\n\n\nlog = login_info(name=config_data.get(\"login\", \"name\"),\n level=config_data.get(\"login\", \"level\"),\n log_name=config_data.get(\"login\", \"log_name\"),\n lf_level=config_data.get(\"login\", \"fh_level\"),\n ls_level=config_data.get(\"login\", \"sh_level\"))\n\n\n","sub_path":"common/login_info.py","file_name":"login_info.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"634048022","text":"##############################################################\n# Python file header\n__author__ = \"Thiago Lopes\"\n__GitHubPage__ = \"https://github.com/lopesth\"\n__email__ = \"lopes.th.o@gmail.com\"\n__date__ = \"Friday, May 25 2018 08:57:06\"\n\n'''Description: Class to calculate the mass center of a molecule'''\n###############################################################\n\nclass MassCenterOfSet (object):\n\n def __init__(self, massList, posList):\n self.__massList = massList\n self.__numberOfElements = len(massList)\n posXlist = []\n posYlist = []\n posZlist = []\n for atomPos in posList:\n posXlist.append(atomPos[0])\n posYlist.append(atomPos[1])\n posZlist.append(atomPos[2])\n self.__xCoord = self.__massCenterAxis(posXlist)\n self.__yCoord = self.__massCenterAxis(posYlist)\n self.__zCoord = self.__massCenterAxis(posZlist)\n\n def __massCenterAxis (self, axisValueList):\n rUp = 0\n rDown = 0 \n for i in range(0, self.__numberOfElements):\n rUp = rUp + (self.__massList[i] * axisValueList[i])\n rDown = rDown + self.__massList[i]\n return '{:15.7f}'.format((rUp / rDown))\n \n def returnMassCenter(self):\n return [float(self.__xCoord), float(self.__yCoord), float(self.__zCoord)]","sub_path":"massCenter.py","file_name":"massCenter.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"336648626","text":"from data_loader import DataLoader\nfrom itertools import permutations\n\nclass FullSwitchSet:\n @staticmethod\n def call(set, h):\n total_time = sum(x[0] for x in set)\n due_date = int(h * total_time)\n minn = min([\n (FullSwitchSet.eval_result(perm, due_date), perm) for perm in permutations(set)\n ])\n return minn[0], list(minn[1])\n\n @staticmethod\n def eval_result(set, due_date):\n current_time = 0\n total_penelty = 0\n for [len, early, late] in set:\n current_time += len\n if current_time < due_date:\n total_penelty += early * (due_date - current_time)\n else:\n total_penelty += late * (current_time - due_date)\n return total_penelty\n","sub_path":"full_switch_set.py","file_name":"full_switch_set.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"125628249","text":"'''\n-----------------------------------------------\nLab Ten, Task 1\n-----------------------------------------------\nAuthor: Nishant Tewari\nID: 190684430\nEmail: tewa4430@mylaurier.ca\n__updated__ = \"2021-07-04\"\n-----------------------------------------------\n'''\n#Task 1 \ndef customer_record(fh, n):\n \"\"\"\n -------------------------------------------------------\n Find the n-th record in a comma-delimited sequential file.\n Records are numbered starting with 0.\n Use: result = customer_record(fh, n)\n -------------------------------------------------------\n Parameters:\n fh - file to search (file handle - already open for reading)\n n - the number of the record to return (int > 0)\n Returns:\n result - a list of the fields of the n-th record if it exists,\n an empty list otherwise (list)\n -------------------------------------------------------\n \"\"\"\n fh.seek(0)\n result = []\n line = fh.readline()\n counter = 0\n \n while line != \"\" and counter != n: \n line = fh.readline()\n counter += 1 \n if line != \"\" :\n line = line.strip()\n result = line.split(',')\n \n return result","sub_path":"Lab 10/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"425161901","text":"import RPi.GPIO as GPIO\nimport Keypad\nfrom LCDDisplay import LCDDisplay\n\nfrom time import sleep, strftime\nfrom datetime import datetime\n\nROWS = 4\nCOLS = 4\nkeys = [\n '1', '2', '3', 'A',\n '4', '5', '6', 'B',\n '7', '8', '9', 'C',\n '*', '0', '#', 'D'\n]\nrowsPins = [12, 16, 18, 22] # connect to the row pinouts of the keypad\ncolsPins = [19, 15, 13, 11] # connect to the column pinouts of the keypad\n\ndef loop():\n lcd.start()\n\n keypad = Keypad.Keypad(keys, rowsPins, colsPins, ROWS, COLS)\n keypad.setDebounceTime(50)\n\n keyString = \"\"\n\n while(lcd.RUNNING):\n key = keypad.getKey()\n if(key != keypad.NULL):\n if len(keyString) >= 32:\n keyString = keyString[-31:]\n keyString += key\n firstLine = \"\"\n secondLine = \"\"\n if(len(keyString) > 16):\n firstLine = keyString[:16]\n secondLine = keyString[16:]\n else:\n firstLine = keyString\n stringMessage = firstLine + \"\\n\" + secondLine\n lcd.showMessage(stringMessage)\n print('----------------')\n print(stringMessage)\n sleep(lcd.DELAY)\n\nif __name__ == '__main__':\n print('Program starting...')\n lcd = LCDDisplay(False, 1)\n try:\n loop()\n except KeyboardInterrupt:\n GPIO.cleanup()\n lcd.stop()\n lcd.destroy()\n","sub_path":"screenpad/screenPad.py","file_name":"screenPad.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"180501526","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# author: bigfoolliu\n\n\n\"\"\"\n堆的实现\n\"\"\"\n\n\nclass MaxHeap(object):\n \"\"\"大根堆的实现\"\"\"\n\n def __init__(self, array=None):\n \"\"\"初始化\"\"\"\n if array:\n self.heap = self._max_heapify(array)\n else:\n self.heap = []\n\n def _max_heapify(self, array):\n \"\"\"大根堆化\"\"\"\n for i in range(int(len(array) / 2), -1, -1):\n self._sink(array, i)\n return array\n\n def _sink(self, array, i):\n \"\"\"\n 向下调整,将节点向下移,递归的操作\n 即 '下沉'\n \"\"\"\n # move node down the tree\n left, right = 2 * i + 1, 2 * i + 2\n max_index = i\n # 比较两个子节点然后决定和哪个交换\n if left < len(array) and right < len(array):\n flag = array[left] > array[right]\n else:\n flag = True\n if left < len(array) and array[left] > array[max_index] and flag:\n max_index = left\n if right < len(array) and array[right] > array[max_index] and not flag:\n max_index = right\n if max_index != i:\n array[i], array[max_index] = array[max_index], array[i]\n self._sink(array, max_index)\n\n def _swim(self, array, i):\n \"\"\"\n 向上调整,将节点向上移,递归的操作\n 即 '上浮'\n \"\"\"\n # move node up the tree\n if i == 0:\n return\n father = (i - 1) / 2\n if array[father] < array[i]:\n array[father], array[i] = array[i], array[father]\n self._swim(array, father)\n\n def push(self, item):\n \"\"\"\n 堆的核心api\n 向外暴露的接口,推入元素\n \"\"\"\n self.heap.append(item)\n self._swim(self.heap, len(self.heap) - 1)\n\n def pop(self):\n \"\"\"\n 堆的核心api\n 向外暴露的接口,弹出元素,弹出的是堆的堆顶元素,大根堆则返回最大元素\n \"\"\"\n self.heap[0], self.heap[-1] = self.heap[-1], self.heap[0]\n item = self.heap.pop()\n self._sink(self.heap, 0)\n return item\n\n\ndef main():\n my_heap = MaxHeap(list(range(10)))\n for i in range(10):\n print(my_heap.pop())\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"dataStructure/heap/heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"303281341","text":"from pprint import pprint\r\nimport boto3\r\nfrom boto3.dynamodb.conditions import Key, Attr\r\nimport argparse\r\nimport time\r\nfrom decimal import *\r\n\r\ndef scan_movies(YearToFind,GenreToFind):\r\n region=boto3.session.Session().region_name\r\n dynamodb = boto3.resource('dynamodb', region_name=region) #low-level Client\r\n table = dynamodb.Table('movies') #define which dynamodb table to access\r\n\r\n recordcount = 0\r\n recordscannedcount = 0\r\n\r\n scanreturn = table.scan( # perform first scan\r\n FilterExpression=Key('year').eq(YearToFind) & Attr(\"genre\").eq(GenreToFind)\r\n )\r\n recordcount += scanreturn['Count']\r\n recordscannedcount += scanreturn['ScannedCount']\r\n while 'LastEvaluatedKey' in scanreturn.keys(): # if lastevaluatedkey is present, we need to keep scanning and adding to our counts until everything is scanned\r\n scanreturn = table.scan(\r\n FilterExpression=Key('year').eq(YearToFind) & Attr(\"genre\").eq(GenreToFind),\r\n ExclusiveStartKey = scanreturn['LastEvaluatedKey']\r\n )\r\n recordcount += scanreturn['Count']\r\n recordscannedcount += scanreturn['ScannedCount']\r\n return [recordcount, recordscannedcount]\r\n\r\nif __name__ == '__main__':\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"Qyear\", help=\"Search by year and genre.. will return number of movies with that year and genre\")\r\n parser.add_argument(\"Qgenre\", help=\"Search by year and genre.. will return number of movies with that year and genre\")\r\n args = parser.parse_args()\r\n queryyear = Decimal(args.Qyear)\r\n querygenre = (args.Qgenre) #section to collect argument from command line\r\n\r\n start = time.time()\r\n movies = scan_movies(queryyear, querygenre) #scan_movies returns our total counts as two items of a list\r\n end = time.time()\r\n print(\"Count is \", movies[0]) # print the count of items returned by the scan\r\n print(\"ScannedCount is \", movies[1]) # print the count of items that had to be scanned to process the scan\r\n print('Total time: {} sec'.format(end - start))\r\n","sub_path":"lab_reference_scripts/MoviesScanYG.py","file_name":"MoviesScanYG.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"262659597","text":"from django.conf.urls import url\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n url(r'^kmeans/', views.kmeans, name='kmeans'),\n url(r'^extraction/', views.extraction, name='extraction'),\n url(r'^reinit/', views.reinit, name='reinit'),\n\n]","sub_path":"SpamDetector/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"499027324","text":"from common import *\n\n\nclass Conf(Singleton):\n __slot__ = (\n 'MYSQL_TABLE_PREFIX', 'HTTP_PORT', 'DB_CONFIG', 'THREAD_NUM'\n 'LOG_DIR', 'LOG_PATH', 'REDIS_TABLE_EXPIRE', 'REDIS_KEY',\n 'REDIS_PERSIST', 'REDIS_CACHE','MAX_POOL_SIZE', 'CALL_LIMIT',\n )\n\n LOG_DIR = \".\"\n LOG_PATH = \"web_analysis.log\"\n MYSQL_TABLE_PREFIX = 'web_analyse_'\n HTTP_PORT = 8888\n DB_CONFIG = {\n 'host' :'localhost',\n 'user' :'root',\n 'password' :'root',\n 'db' :'web_analysis',\n 'port' :3306,\n 'charset' :\"utf8\"\n }\n THREAD_NUM = 5\n MAX_POOL_SIZE = 5\n REDIS_TABLE_EXPIRE = 3600\n REDIS_KEY = DotDict({\n \"all_tables\" :\"all_tables\",\n \"failed_table_name\" :\"failed_table:{0}\",\n \"last_table\" :\"last_table:{0}\",\n \"last_upload_time\": \"last_upload_time\",\n })\n REDIS_PERSIST = {\n \"host\" :\"localhost\",\n \"port\" :\"63380\",\n \"db\" :0\n }\n REDIS_CACHE = {\n \"host\" :\"localhost\",\n \"port\" :\"6379\",\n \"db\" :0\n }\n\n CALL_LIMIT = {#seconds\n 'upload_interval': 150,\n }\n\nconf = Conf()\n","sub_path":"conf.py","file_name":"conf.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"540668171","text":"# encoding: utf-8\n\n\"\"\"\n@version: 1.0\n@author: LeungJain\n@time: 2017/12/26 9:43\n\"\"\"\nimport pandas as pd\nfrom urllib2 import urlopen\nfrom pandas_datareader import data as pdr\n\n\nclass RealData:\n \"\"\"\n 从各个行情服务器读取实时数据\n \"\"\"\n # 新浪A股返回的数据结构\n columns = {0: 'stock_name', 1: 'open', 2: 'last_close', 3: 'price', 4: 'high',\n 5: 'low', 6: 'buy_', 7: 'sell_', 8: 'volume', 9: 'amount',\n 10: 'B1_V', 11: 'B1', 12: 'B2_V', 13: 'B2', 14: 'B3_V', 15: 'B3',\n 16: 'B4_V', 17: 'B4', 18: 'B5_V', 19: 'B5', 20: 'B1_V',\n 21: 'S1', 22: 'S2_V', 23: 'S2', 24: 'S3_V', 25: 'S3', 26: 'S4_V', 27: 'S4',\n 28: 'S5_V', 29: 'S5', 30: 'datetime', 31: 'time'}\n\n @classmethod\n def market_judge(cls, stock_code):\n \"\"\"\n 根据股票的代码判断其所属市场,并返回带市场标示前缀的股票代码\n :param stock_code:\n :return:\n \"\"\"\n if stock_code[0:1] == '6':\n stock_code = 'sh' + stock_code\n else:\n stock_code = 'sz' + stock_code\n return stock_code\n\n @classmethod\n def get_stock_data(cls, stock_code):\n \"\"\"\n 读取一止股票的实时数据\n :param stock_code:\n :return:\n \"\"\"\n _code = RealData.market_judge(stock_code)\n html = urlopen('http://hq.sinajs.cn/list={}'.format(_code)).read()\n data_l = html.decode('gbk').split('\\n')\n i = 0\n res = dict()\n for data in data_l:\n if len(data):\n d = data.split('=\"')\n key = stock_code\n i = i + 1\n res[key] = d[1][:-2].split(',')\n\n # print(res, len(res['601088']))\n return res\n\n @classmethod\n def get_stocks_data(cls, stocks_code):\n \"\"\"\n 根据所给的股票代码列表,从新浪接口读取实时数据并打包成一个df\n 注意:在交易时间之外价格数据可能不符合实际\n :param stocks_code:\n :return:\n \"\"\"\n try:\n _codes = ['sh' + c if c[0:1] == '6' else 'sz' + c for c in stocks_code]\n # _codes = [RealData.market_judge(x) for x in stocks_code]\n _codes = ','.join(_codes)\n html = urlopen('http://hq.sinajs.cn/list={}'.format(_codes)).read()\n data_l = html.decode('gbk').split('\\n')\n i = 0\n res = dict()\n for data in data_l:\n if len(data):\n d = data.split('=\"')\n key = stocks_code[i]\n i += 1\n res[key] = d[1][:-2].split(',')\n data = pd.DataFrame(res).T\n data[30] = data[30] + ' ' + data[31]\n data = data.rename(columns=RealData.columns)\n data['datetime'] = pd.to_datetime(data.datetime)\n data['stock_code'] = data.index\n # print(data.T)\n return data\n except Exception as e:\n print(e)\n return None\n\n @classmethod\n def yahoo_stock_data(cls, stock_code):\n \"\"\"\n\n :param stock_code:\n :return:\n \"\"\"\n html = urlopen('http://finance.yahoo.com/d/quotes.csv?s=XOM+BBDb.TO+JNJ+MSFT&f=snd1l1yr').read()\n data_l = html.decode('gbk').split('\\n')\n print(data_l)\n\n @classmethod\n def usa_stock_data(cls, stock_code):\n \"\"\"\n 获取美股的实时数据\n :param stock_code:\n :return:\n \"\"\"\n try:\n data = pdr.DataReader(stock_code, 'iex-last')\n data = data.T\n if len(data) > 1:\n data = data.T\n elif len(data) == 1:\n pass\n else:\n return None\n data = data.rename(columns={'symbol': 'stock_code', 'time': 'datetime'})\n data['datetime'] = pd.to_datetime(data.datetime, unit='ms', utc=True)\n data['datetime'] = data.datetime + pd.Timedelta(hours=-4)\n data['datetime'] = data.datetime.map(lambda d: d.replace(tzinfo=None))\n return data\n except Exception as e:\n print(e)\n return None\n \n @classmethod\n def hk_stock_data(cls, stock_code):\n try:\n if isinstance(stock_code, str):\n _code = 'hk' + stock_code\n stock_code = [stock_code]\n elif isinstance(stock_code, list):\n _code = ['hk' + c for c in stock_code]\n _code = ','.join(_code)\n else:\n raise TypeError('This stock code type must in (str, list)')\n html = urlopen('http://hq.sinajs.cn/list={}'.format(_code)).read()\n data_l = html.decode('gbk').split('\\n')\n i = 0\n res = dict()\n for data in data_l:\n if len(data):\n d = data.split('=\"')\n key = stock_code[i]\n res[i] = [stock_code[i]] + d[1][:-2].split(',')\n i += 1\n data = pd.DataFrame(res).T\n data[18] = data[18] + ' ' + data[19]\n data = data.loc[:, [0, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 18]]\n columns = {0: 'stock_code', 2: 'stock_name', 3: 'open', 4: 'last_close',\n 5: 'price', 6: 'low', 7: 'high', 9: 'gains', 10: 'B1', 11: 'S1',\n 12: 'amount', 13: 'volume', 18: 'datetime'}\n data = data.rename(columns=columns)\n data['datetime'] = pd.to_datetime(data.datetime)\n # data['stock_code'] = data.index\n # print(data.T)\n return data\n except Exception as e:\n print(e)\n return None\n\n# realdata.get_stocks_data(['601088', '002427'])\n# RealData.yahoo_stock_data('aa')\n# print(RealData.hk_stock_data('00700'))","sub_path":"analusis/Calf/data/realdata.py","file_name":"realdata.py","file_ext":"py","file_size_in_byte":5869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"211118085","text":"import argparse\nimport json\nimport os\nimport nltk\nimport torch\nimport numpy as np\nimport tensorflow as tf\nfrom model import dcn_plus_model\nfrom nltk.tokenize.moses import MosesDetokenizer\nfrom preprocessing.cove_encoder import MTLSTM as CoveEncoder\n\ndef load_glove(filename):\n vocab_dict = {}\n embedding = []\n file = open(filename, 'r')\n for id, line in enumerate(file.readlines()):\n row = line.strip().split(' ')\n if len(row) != 301:\n continue\n vocab_dict[row[0]] = id\n embedding.append([float(i) for i in row[1:]])\n file.close()\n embedding.append([0] * len(embedding[0]))\n return vocab_dict, embedding\n\n\ndef get_vocab_id(word, vocab_dict):\n if vocab_dict.get(word) is None:\n return len(vocab_dict)\n else:\n return vocab_dict[word]\n\n\ndef pad_ids(id_list, vocab_dict, max_sequence_length):\n if len(id_list) >= max_sequence_length:\n return id_list[:max_sequence_length]\n else:\n return id_list + [len(vocab_dict)] * (max_sequence_length - len(id_list))\n\n\ndef pad_tokens(tokens, max_sequence_length):\n if len(tokens) >= max_sequence_length:\n return tokens[:max_sequence_length]\n else:\n pad_token = \"\".encode('utf-8')\n return tokens + [pad_token] * (max_sequence_length - len(tokens))\n\n\ndef document_to_tensor(document, vocab_dict, embedding, max_sequence_length, cove_encoder):\n tokens = [token.replace(\"``\", '\"').replace(\n \"''\", '\"') for token in nltk.word_tokenize(document)]\n length = [min(len(tokens), max_sequence_length)]\n tokens = pad_tokens(tokens, max_sequence_length)\n ids = pad_ids([get_vocab_id(token, vocab_dict)\n for token in tokens], vocab_dict, max_sequence_length)\n tensor = [embedding[id] for id in ids]\n if cove_encoder is not None:\n inputs = torch.autograd.Variable(\n torch.LongTensor(np.asarray(ids))).unsqueeze(0).cuda()\n length = torch.LongTensor(np.asarray(length)).cuda()\n document_tensor, document_cove = cove_encoder(inputs, length)\n\n if document_cove.shape[1] < 600:\n document_cove = torch.cat([document_cove, torch.autograd.Variable(\n torch.zeros(1, 600 - document_cove.shape[1], 600)).cuda()], 1)\n document_tensor = torch.cat(\n [document_tensor, document_cove], 2).squeeze(0).data.cpu().numpy()\n\n for i in range(max_sequence_length):\n if ids[i] == len(vocab_dict):\n document_tensor[i] = np.zeros(900)\n tensor = document_tensor\n #document = tf.transpose(tf.constant(\n # np.expand_dims(np.array(tensor), axis=0)), [0, 2, 1])\n document = tf.constant(np.expand_dims(np.array(tensor), axis=0))\n length = tf.constant(np.expand_dims(np.array(length), axis=0))\n return document, length\n\n\ndef input_fn(context, question, context_length, question_length, context_tokens):\n \"\"\"\n features = [OrderedDict([('context', context), ('question', question), ('context_length', context_length),\n ('question_length', question_length), ('context_tokens', tf.constant(context_tokens)), ('id', tf.constant(np.array(\"id\")))])]\n dtypes = OrderedDict([('context', tf.float32), ('question', tf.float32), ('context_length', tf.int64),\n ('question_length', tf.int64), ('context_tokens', tf.string), ('id', tf.string)])\n shapes = OrderedDict([('context', context.shape), ('question', question.shape), ('context_length', context_length.shape),\n ('question_length', question_length.shape), ('context_tokens', tf.constant(context_tokens).shape), ('id', tf.constant(np.array(\"id\")).shape)])\n train_data = tf.data.Dataset.from_generator(lambda: (feature for feature in features), dtypes, shapes)\n \"\"\"\n context_tokens = np.expand_dims(context_tokens, axis=0)\n features = {'context': context, 'question': question, 'context_length': context_length, 'question_length': question_length, 'context_tokens': tf.constant(context_tokens), 'id': tf.constant(np.array([\"id\"]))}\n train_data = tf.data.Dataset.from_tensors(features)\n iterator = train_data.make_one_shot_iterator()\n return iterator.get_next()\n\n\nif __name__ == '__main__':\n params = json.load(open('params.json'))\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n max_sequence_length = params['model']['max_sequence_length']\n parser = argparse.ArgumentParser()\n parser.add_argument('--glove_file')\n parser.add_argument('--use_cove', action='store_true')\n parser.add_argument('--model_dir', nargs='?', default='pretrained', type=str)\n args = parser.parse_args()\n\n glove_file = args.glove_file\n if glove_file is None:\n print(\"Glove file needed\")\n\n else:\n context = input(\"Context: \")\n question = input(\"Question: \")\n vocab_dict, embedding = load_glove(glove_file)\n\n cove_encoder = None\n if args.use_cove:\n cove_encoder = CoveEncoder(n_vocab=len(\n embedding), vectors=torch.FloatTensor(embedding), residual_embeddings=False)\n cove_encoder.cuda()\n\n context_tokens = [token.replace(\"``\", '\"').replace(\n \"''\", '\"') for token in nltk.word_tokenize(context)]\n context_embedding, context_length = document_to_tensor(\n context, vocab_dict, embedding, max_sequence_length, cove_encoder)\n question_embedding, question_length = document_to_tensor(\n question, vocab_dict, embedding, max_sequence_length, cove_encoder)\n dcn_estimator = tf.estimator.Estimator(\n model_fn=dcn_plus_model, params=params['model'], model_dir=args.model_dir)\n prediction = dcn_estimator.predict(input_fn=lambda: input_fn(\n context_embedding, question_embedding, context_length, question_length, context_tokens))\n prediction = list(prediction)[0]\n detokenizer = nltk.tokenize.moses.MosesDetokenizer()\n prediction = detokenizer.detokenize([token.decode(\n 'utf-8') for token in prediction['context_tokens'][prediction['start']:prediction['end']+1]], return_str=True)\n print(\"Answer: {}\".format(prediction))\n","sub_path":"interactive.py","file_name":"interactive.py","file_ext":"py","file_size_in_byte":5820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"169627085","text":"#\n# MIT License\n#\n# Copyright (c) 2020 Airbyte\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n\n\nimport json\nimport pathlib\nimport random\n\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\n\ndef create():\n source_directory = pathlib.Path(__file__).resolve().parent.parent.parent\n configs_path = source_directory.joinpath(\"secrets/config.json\")\n with open(configs_path) as json_configs:\n configs = json.load(json_configs)\n auth = HTTPBasicAuth(configs.get(\"email\"), configs.get(\"api_token\"))\n base_api_url = f'https://{configs.get(\"domain\")}/rest/api/3/issue'\n\n headers = {\"Accept\": \"application/json\", \"Content-Type\": \"application/json\"}\n\n projects = [\"EX\", \"IT\", \"P2\", \"TESTKEY1\"]\n issue_types = [\"10001\", \"10002\", \"10004\"]\n\n for index in range(1, 76):\n payload = json.dumps(\n {\n \"fields\": {\n \"project\": {\"key\": random.choice(projects)},\n \"issuetype\": {\"id\": random.choice(issue_types)},\n \"summary\": f\"Test {index}\",\n \"description\": {\n \"type\": \"doc\",\n \"version\": 1,\n \"content\": [{\"type\": \"paragraph\", \"content\": [{\"type\": \"text\", \"text\": f\"Test description {index}\"}]}],\n },\n }\n }\n )\n\n requests.request(\"POST\", base_api_url, data=payload, headers=headers, auth=auth)\n\n\nif __name__ == \"__main__\":\n create()\n","sub_path":"airbyte-integrations/connectors/source-jira/integration_tests/fixtures/create_issues.py","file_name":"create_issues.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"572797194","text":"from . import goog, label\n\ndef stratnsefoticker (ticker, cacheid, stratid, gdrv, maxhold=15, stoploss_p=1, mintarget_p=2, label_p=False, dbgmsg=False):\n import pandas as pd, numpy as np, datetime as dt, os\n \n if (gdrv != ''): #gdrive, need to fetch strat first\n goog.syncfolder_gdrive2local(gdrv, gdrv.CreateFile({'id': stratid})['title'], stratid, dbgmsg)\n stratfile = gdrv.CreateFile({'id': stratid})['title'] + '/' + 'strat.csv'\n cachefol = gdrv.CreateFile({'id': cacheid})['title']\n else:\n stratfile = stratid\n cachefol = cacheid\n \n sf = pd.read_csv(stratfile)\n for f in os.listdir(cachefol):\n if (f.find(ticker) != 0 or f[-5:] != '_S.h5'): continue\n idf = pd.read_hdf(cachefol + '/' + f, key=ticker, mode='r', format='table', complevel=9, complib='blosc', fletcher32=True)\n df = {}\n odf = {}\n window = 2*maxhold + 1\n for tdate in idf.index.normalize().unique().sort_values():#iter over each day in df\n print('STRAT processing date: ', tdate)\n df = idf[idf.index.normalize() == tdate]#idf is intraday df of trades\n zstrikes = sorted(idf[ticker + '_F1_CLOSE'].dropna().round(decimals=-2).astype(int).unique())\n #zstrikes.append(zstrikes[-1] + 100)\n #zstrikes.append(zstrikes[0] - 100)\n zstrikes = sorted(zstrikes)\n for zs in zstrikes:\n #print('working on strike: ', zs)\n for strat in sf.itertuples():\n df[ticker + '_STRAT_' + strat.STRAT + '_' + str(zs)] = np.nan # create empty column for strat\n for leg in range(strat.NLEG):\n if (0 == leg):\n df[ticker + '_STRAT_' + strat.STRAT + '_' + str(zs)] = df[ticker + '_' + str(eval('strat.SDIST_LEG' + str(leg)) + zs) + '_' + eval('strat.CT_LEG' + str(leg)) + '_CLOSE'] * eval('strat.LOT_LEG' + str(leg))\n #print(df[ticker + '_STRAT_' + strat.STRAT + '_ZS_' + str(zs)])\n else:\n df[ticker + '_STRAT_' + strat.STRAT + '_' + str(zs)] += df[ticker + '_' + str(eval('strat.SDIST_LEG' + str(leg)) + zs) + '_' + eval('strat.CT_LEG' + str(leg)) + '_CLOSE'] * eval('strat.LOT_LEG' + str(leg))\n #print(df[ticker + '_STRAT_' + strat.STRAT + '_ZS_' + str(zs)])\n\n # dropna the strat series with copy\n strat_df = df[ticker + '_STRAT_' + strat.STRAT + '_' + str(zs)].copy().dropna()\n if (dbgmsg==True): print('length of strat_df = ', strat_df.shape)\n \n ## pad left with bfill, right with ffill; rolling func will only consider full 'windowsize' windows for label\n for m in range(1, int((window-1)/2)): strat_df = strat_df.append(pd.Series(strat_df[-1], index=[pd.Timestamp(strat_df.index[-1]) + dt.timedelta(minutes=1)])) # ffill on right will not falsely trigger rolling func\n #if (dbgmsg==True): print('length of strat_df = ', strat_df.shape)\n for m in range(1, int((window-1)/2)): strat_df = strat_df.append(pd.Series(strat_df[ 0], index=[pd.Timestamp(strat_df.index[ 0]) - dt.timedelta(minutes=m)])) # bfill on left is dropped in rolling func\n #if (dbgmsg==True): print('length of strat_df = ', strat_df.shape)\n strat_df.sort_index(inplace=True) # make monotonic\n if (dbgmsg==True): print('length of strat_df = ', strat_df.shape)\n \n #rolling window to label exits\n label_df = strat_df.rolling(center=True, window=window, min_periods=0, closed='neither').apply(func=label.labelstratrolling, kwargs={'window': window, 'stoploss_p': stoploss_p, 'mintarget_p': mintarget_p, 'label_p': label_p}).dropna()\n if (dbgmsg==True): print('length of label_df = ', label_df.shape)\n #print(label_df)\n \n df[ticker + '_STRAT_' + strat.STRAT + '_' + str(zs)] = strat_df\n df[ticker + '_STRATLABEL_' + strat.STRAT + '_' + str(zs)] = label_df\n if (dbgmsg==True): print('length of df = ', df.shape)\n \n if (0 == len(odf)):\n odf = df\n else:\n odf = pd.concat([odf, df], join='outer', axis=1)\n odf.to_csv(cachefol+'/'+ticker+'_'+f[-13:-5]+'_SL.csv')\n odf.to_hdf(cachefol+'/'+ticker+'_'+f[-13:-5]+'_SL.h5', key=ticker, mode='w', format='table', complevel=9, complib='blosc', fletcher32=True)\n if (gdrv != ''): #gdrive, need to upload\n goog.syncfolder_local2gdrive(gdrv, cachefol, cacheid, '.*'+f[-13:-5]+'_SL.h5', dbgmsg)\n goog.syncfolder_local2gdrive(gdrv, cachefol, cacheid, '.*'+f[-13:-5]+'_SL.csv', dbgmsg)\n return('IEOD NSEFO Strat Done!')\n\n","sub_path":"func/strat.py","file_name":"strat.py","file_ext":"py","file_size_in_byte":4923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"255539518","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 5 09:26:33 2019\n\n@author: Tristan O'Hanlon - University of Auckland, Samual Crookes & Jonathan Rogers\n\n\n\"\"\"\nfrom pprint import pprint\nimport time\nimport numpy as np\nimport os\nfrom pyhdf import SD\nimport h5py\nimport matplotlib.pyplot as plt\nfrom scipy import interpolate\nimport constants\nfrom scipy import stats\nimport cartopy.crs as ccrs\n\n###############################################################################\nstart = time.time()\n\n#set location\n\nlocation = constants.home\n#lat = constants.lat\n\naltitude_types = [ 'Irradiance layer center height profile', 'Layer center height profile (clouds and aerosol)', 'Irradiance level height profile' ]\n\nclass DataSet:\n def __init__( self, type_name, altitude_type ):\n self.type_name = type_name\n self.altitude_type = altitude_type\n self.data = np.zeros(( constants.lat.size, constants.alt.size ))\n self.data_counts = np.zeros( (constants.lat.size, constants.alt.size ))\n \n\n# class grid_DataSet:\n# def __init__( self, type_name ):\n# self.type_name = type_name\n# self.data = np.zeros(( constants.lat.size, constants.lon.size ))\n# self.data_counts = np.zeros( (constants.lat.size, constants.lon.size ))\n\n\ndata_sets = [\n DataSet('Cloud fraction profile', 1 ),\n DataSet('Liquid water content profile used', 0 ), # kg/m3\n DataSet('Ice water content profile used', 0 ), # kg/m3\n DataSet('Temperature profile', 2 ),\n DataSet('Pressure profile', 2 ) \n] \n\n# grid_data_sets = [\n# grid_DataSet('Cloud free area percent coverage (CALIPSO-CloudSat)' ),\n# grid_DataSet('Liquid water content profile used' ),\n# grid_DataSet('Ice water content profile used' ),\n# ] \n\n# The directory where your HDF files are stored\nos.chdir('E:/CCCM/test') # Home PC\n\n\n# Load every file in the directory\nfor filename in os.listdir():\n if os.path.isdir( filename ):\n continue\n pprint( filename )\n # Load the file\n f = SD.SD(filename)\n raw_lon = f.select('Longitude of subsatellite point at surface at observation').get()\n raw_lat = 90 - f.select('Colatitude of subsatellite point at surface at observation').get()\n altitudes = []\n for altitude_type in altitude_types:\n altitudes.append( f.select(altitude_type).get() )\n altitudes[0] /= 1000\n altitudes[2] /= 1000\n \n\n for data_set in data_sets:\n sel = f.select( data_set.type_name )\n data = sel.get()\n\n fill_value = sel.attributes()['_FillValue']\n for l_index, l in enumerate( raw_lat ):\n if l <= constants.min_lat or l >= constants.max_lat:\n continue\n lat_bin = int( ( l - constants.min_lat ) / constants.lat_division)\n for a_index, a in enumerate( altitudes[data_set.altitude_type] ):\n if a <= constants.min_alt or a >= constants.max_alt:\n continue\n alt_bin = int( ( a - constants.min_alt ) / constants.alt_division )\n #print( lat_bin, alt_bin )\n val = data[l_index, a_index]\n if val == fill_value:\n continue\n data_set.data[ lat_bin, alt_bin ] += val\n data_set.data_counts[ lat_bin, alt_bin ] += 1\n\n# for data_set in grid_data_sets:\n# sel = f.select( data_set.type_name )\n# data = sel.get()\n# fill_value = sel.attributes()['_FillValue']\n# data[ data == fill_value ] = None #set fill values to nan\n# if data_set.type_name == 'Cloud free area percent coverage (CALIPSO-CloudSat)':\n# data = (100 - data) / 100\n# else:\n# data = np.nansum(data, axis = 1)\n# for l_index, ( la, lo ) in enumerate( zip( raw_lat, raw_lon ) ):\n# if la <= constants.min_lat or la >= constants.max_lat:\n# continue\n \n# lat_bin = int( ( la - constants.min_lat ) / constants.lat_division)\n# lon_bin = int( lo - 1 )\n \n# #print( lat_bin, alt_bin )\n# val = data[l_index]\n# if val == None:\n# continue\n# data_set.data[ lat_bin, lon_bin ] += val\n# data_set.data_counts[ lat_bin, lon_bin ] += 1\n \n# for data_set in grid_data_sets:\n# data_set.data /= data_set.data_counts\n# # pprint( data_set.data.shape )\n \n# clt_lat_lon = grid_data_sets[0].data\n# clwvi_lat_lon = grid_data_sets[1].data\n# clivi_lat_lon = grid_data_sets[2].data\n\nfor data_set in data_sets:\n data_set.data /= data_set.data_counts\n# pprint( data_set.data.shape )\n \ncl_alt_lat = data_sets[0].data # 0-1\nfull_clwc_alt_lat = data_sets[1].data # g/m^3\nclic_alt_lat = data_sets[2].data # g/m^3\nfull_ta_alt_lat = data_sets[3].data # K\nfull_p_alt_lat = data_sets[4].data # hPa\n\n\n# convert to cloud water mass fractions\n#https://www.translatorscafe.com/unit-converter/en-US/calculator/altitude/\n\ndensity_alt_lat = ( ( full_p_alt_lat * 100 ) / ( 287.052 * full_ta_alt_lat ) ) * 1000 # g/m^3\nfull_clw_alt_lat = full_clwc_alt_lat / density_alt_lat\ncli_alt_lat = clic_alt_lat / density_alt_lat\n\n#---create reduced altitude liquid mass fraction and temperatures---#\n\nta_alt_lat = full_ta_alt_lat[:,:constants.liq_alt_confine]\nclw_alt_lat = full_clw_alt_lat[:,:constants.liq_alt_confine]\nclwc_alt_lat = full_clwc_alt_lat[:,:constants.liq_alt_confine]\n\n#---create fractions---#\n\nfull_clw_frac_alt_lat = ( full_clw_alt_lat / ( full_clw_alt_lat + cli_alt_lat ) ) * cl_alt_lat\ncli_frac_alt_lat = ( cli_alt_lat / ( full_clw_alt_lat + cli_alt_lat ) ) * cl_alt_lat\nclw_frac_alt_lat = full_clw_frac_alt_lat[:,:constants.liq_alt_confine]\n\n\n#---create liquid and ice fractions---#\n\n# clt = np.nanmean( clt_lat_lon, axis = -1 )\n# clwvi = np.nanmean( clwvi_lat_lon, axis = -1 )\n# clivi = np.nanmean( clivi_lat_lon, axis = -1 )\n\n\n#---create southern ocean and global datasets---#\n \ncl_so = constants.globalalt_latMean(np.transpose(cl_alt_lat[constants.so_idx_1:constants.so_idx_2]), constants.lat[constants.so_idx_1:constants.so_idx_2])\nfull_clw_so = constants.globalalt_latMean(np.transpose(full_clw_alt_lat[constants.so_idx_1:constants.so_idx_2]), constants.lat[constants.so_idx_1:constants.so_idx_2])\ncli_so = constants.globalalt_latMean(np.transpose(cli_alt_lat[constants.so_idx_1:constants.so_idx_2]), constants.lat[constants.so_idx_1:constants.so_idx_2])\n\nclwc_so = constants.globalalt_latMean(np.transpose(clwc_alt_lat[constants.so_idx_1:constants.so_idx_2]), constants.lat[constants.so_idx_1:constants.so_idx_2])\nclic_so = constants.globalalt_latMean(np.transpose(clic_alt_lat[constants.so_idx_1:constants.so_idx_2]), constants.lat[constants.so_idx_1:constants.so_idx_2])\n\nfull_clw_frac_so = ( full_clw_so / ( full_clw_so + cli_so ) ) * cl_so\ncli_frac_so = ( cli_so / ( full_clw_so + cli_so ) ) * cl_so\nclw_frac_so = full_clw_frac_so[:constants.liq_alt_confine]\nclw_so = full_clw_so[:constants.liq_alt_confine]\n\ncl_g = constants.globalalt_latMean(np.transpose(cl_alt_lat[8:172]), constants.lat[8:172])\nfull_clw_g = constants.globalalt_latMean(np.transpose(full_clw_alt_lat[8:172]), constants.lat[8:172])\ncli_g = constants.globalalt_latMean(np.transpose(cli_alt_lat[8:172]), constants.lat[8:172])\n\nclwc_g = constants.globalalt_latMean(np.transpose(clwc_alt_lat[8:172]), constants.lat[8:172])\nclic_g = constants.globalalt_latMean(np.transpose(clic_alt_lat[8:172]), constants.lat[8:172])\n\nfull_clw_frac_g = ( full_clw_g / ( full_clw_g + cli_g ) ) * cl_g\ncli_frac_g = ( cli_g / ( full_clw_g + cli_g ) ) * cl_g\nclw_frac_g = full_clw_frac_g[:constants.liq_alt_confine]\nclw_g = full_clw_g[:constants.liq_alt_confine]\n\n#----------------------------#\n\n ######## Binned Temperature Data ########\n# values to bin: full_clw_alt_lat and full_ta_alt_lat\n# binned into constants.ta_g array\n# values in each bin to be summed\n# call the summed values clw_t_g\n\nstat = 'mean'\nclwc_t_g, bin_edges, binnumber = stats.binned_statistic(full_ta_alt_lat.flatten(), full_clwc_alt_lat.flatten(), stat, bins=constants.ta.size, range=(constants.min_ta, constants.max_ta))\nclwc_t_so, bin_edges, binnumber = stats.binned_statistic(full_ta_alt_lat[constants.so_idx_1:constants.so_idx_2].flatten(), full_clwc_alt_lat[constants.so_idx_1:constants.so_idx_2].flatten(), stat, bins=constants.ta.size, range=(constants.min_ta, constants.max_ta))\n\nclw_t_g, bin_edges, binnumber = stats.binned_statistic(full_ta_alt_lat.flatten(), full_clw_alt_lat.flatten(), stat, bins=constants.ta.size, range=(constants.min_ta, constants.max_ta))\nclw_t_so, bin_edges, binnumber = stats.binned_statistic(full_ta_alt_lat[constants.so_idx_1:constants.so_idx_2].flatten(), full_clw_alt_lat[constants.so_idx_1:constants.so_idx_2].flatten(), stat, bins=constants.ta.size, range=(constants.min_ta, constants.max_ta))\n\ncli_t_g, bin_edges, binnumber = stats.binned_statistic(full_ta_alt_lat.flatten(), cli_alt_lat.flatten(), stat, bins=constants.ta.size, range=(constants.min_ta, constants.max_ta))\ncli_t_so, bin_edges, binnumber = stats.binned_statistic(full_ta_alt_lat[constants.so_idx_1:constants.so_idx_2].flatten(), cli_alt_lat[constants.so_idx_1:constants.so_idx_2].flatten(), stat, bins=constants.ta.size, range=(constants.min_ta, constants.max_ta))\n\ncl_t_g, bin_edges, binnumber = stats.binned_statistic(full_ta_alt_lat.flatten(), cl_alt_lat.flatten(), stat, bins=constants.ta.size, range=(constants.min_ta, constants.max_ta))\ncl_t_so, bin_edges, binnumber = stats.binned_statistic(full_ta_alt_lat[constants.so_idx_1:constants.so_idx_2].flatten(), cl_alt_lat[constants.so_idx_1:constants.so_idx_2].flatten(), stat, bins=constants.ta.size, range=(constants.min_ta, constants.max_ta))\n\nclw_frac_t_g = ( clw_t_g / ( clw_t_g + cli_t_g ) ) * cl_t_g\nclw_frac_t_so = ( cli_t_g / ( clw_t_g + cli_t_g ) ) * cl_t_g\n\n# fig, ax = plt.subplots()\n# ax.plot( constants.ta, clw_t_g )\n# ax.plot( constants.ta, clw_t_so )\n# ax.axvline(x=273, label = '273K', color = 'black', linestyle='--')\n# plt.grid(True)\n# plt.show()\n######################\n\n#----------------------------#\n\nos.chdir( location + '/climate-analysis/reduced_data' )\n\nsave_filename = 'Jun_2006_Apr_2011_CCCM.h5'\n\nwith h5py.File(save_filename, 'w') as p:\n\n # p.create_dataset('clt', data=clt)\n # p.create_dataset('clwvi', data=clwvi)\n # p.create_dataset('clivi', data=clivi)\n # p.create_dataset('clt_lat_lon', data=clt_lat_lon)\n # p.create_dataset('clwvi_lat_lon', data=clwvi_lat_lon)\n # p.create_dataset('clivi_lat_lon', data=clivi_lat_lon)\n\n p.create_dataset('density_alt_lat', data=np.transpose( density_alt_lat ) ) # global layer total cloud fraction corresponding to alt\n p.create_dataset('full_p_alt_lat', data=np.transpose( full_p_alt_lat ) ) # global layer total cloud fraction corresponding to alt\n\n p.create_dataset('cl_g', data=cl_g) # global layer total cloud fraction corresponding to alt\n p.create_dataset('clw_g', data=clw_g) # global layer cloud liquid water mass fraction in air ( kg/kg ) corresponding to liq_alt\n p.create_dataset('cli_g', data=cli_g) # global layer cloud ice water mass fraction in air ( kg/kg ) corresponding to alt\n p.create_dataset('clwc_g', data=clwc_g) # global layer cloud liquid water content ( g/m^3 ) corresponding to liq_alt\n p.create_dataset('clic_g', data=clic_g) # global layer cloud ice water content ( g/m^3 ) corresponding to alt\n p.create_dataset('clw_frac_g', data=clw_frac_g) # global layer cloud liquid water fraction corresponding to liq_alt\n p.create_dataset('cli_frac_g', data=cli_frac_g) # global layer cloud ice water fraction corresponding to alt\n \n p.create_dataset('cl_so', data=cl_so) # southern ocean layer total cloud fraction corresponding to alt\n p.create_dataset('clw_so', data=clw_so) # southern ocean layer cloud liquid water mass fraction in air ( kg/kg ) corresponding to liq_alt\n p.create_dataset('cli_so', data=cli_so) # southern ocean layer cloud ice water mass fraction in air ( kg/kg ) corresponding to alt\n p.create_dataset('clwc_so', data=clwc_so) # southern ocean layer cloud liquid water content ( g/m^3 ) corresponding to liq_alt\n p.create_dataset('clic_so', data=clic_so) # southern ocean layer cloud ice water content ( g/m^3 ) corresponding to alt\n p.create_dataset('clw_frac_so', data=clw_frac_so) # southern ocean layer cloud liquid water fraction corresponding to liq_alt\n p.create_dataset('cli_frac_so', data=cli_frac_so) # southern ocean layer cloud ice water fraction corresponding to alt\n\n p.create_dataset('cl_t_g', data=cl_t_g) # global layer cloud liquid water fraction corresponding to ta\n p.create_dataset('cl_t_so', data=cl_t_so) # global layer cloud liquid water fraction corresponding to ta\n\n p.create_dataset('clwc_t_g', data=clwc_t_g) # global layer cloud liquid water content ( g/m^3 ) corresponding to ta\n p.create_dataset('clwc_t_so', data=clwc_t_so) # global layer cloud liquid water content ( g/m^3 ) corresponding to ta\n\n p.create_dataset('clw_t_g', data=clw_t_g) # global layer cloud liquid water mass fraction in air ( kg/kg ) corresponding to ta\n p.create_dataset('clw_t_so', data=clw_t_so) # global layer cloud liquid water mass fraction in air ( kg/kg ) corresponding to ta\n\n p.create_dataset('clw_frac_t_g', data=clw_frac_t_g) # global layer cloud liquid water fraction corresponding to ta\n p.create_dataset('clw_frac_t_so', data=clw_frac_t_so) # global layer cloud liquid water fraction corresponding to ta\n\n p.create_dataset('full_clwc_alt_lat', data= np.transpose( full_clwc_alt_lat ) ) # cloud liquid water content ( g/m^3 ) corresponding to alt and lat\n p.create_dataset('clic_alt_lat', data= np.transpose( clic_alt_lat ) ) # cloud ice water content ( g/m^3 ) corresponding to alt and lat\n\n p.create_dataset('full_clw_frac_alt_lat', data= np.transpose( full_clw_frac_alt_lat ) ) # cloud liquid water fraction corresponding to alt and lat\n p.create_dataset('clw_frac_alt_lat', data= np.transpose( clw_frac_alt_lat ) ) # cloud liquid water fraction corresponding to liq_alt and lat\n p.create_dataset('cli_frac_alt_lat', data= np.transpose( cli_frac_alt_lat ) ) # cloud ice water fraction corresponding to alt and lat\n\n p.create_dataset('ta_alt_lat', data= np.transpose( ta_alt_lat ) ) # temperature corresponding to liq_alt and lat\n p.create_dataset('cl_alt_lat', data= np.transpose( cl_alt_lat ) ) # total cloud fraction corresponding to alt and lat\n p.create_dataset('clw_alt_lat', data= np.transpose( clw_alt_lat ) ) # cloud liquid water mass fraction in air ( kg/kg ) corresponding to liq_alt and lat\n p.create_dataset('cli_alt_lat', data= np.transpose( cli_alt_lat ) ) # cloud ice water mass fraction in air ( kg/kg ) corresponding to alt and lat\n\n p.create_dataset('full_ta_alt_lat', data= np.transpose( full_ta_alt_lat ) ) # temperature corresponding to alt and lat\n p.create_dataset('full_clw_alt_lat', data= np.transpose( full_clw_alt_lat ) ) # tcloud liquid water mass fraction in air ( kg/kg ) corresponding to alt and lat\n\n p.close()\n\nend = time.time()\nprint('Create dataset took:', end - start, 's')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"backup/15.01.2019/create_reduced_cccm_dataset.py","file_name":"create_reduced_cccm_dataset.py","file_ext":"py","file_size_in_byte":15188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"277497372","text":"import http.server\nimport socketserver\nimport threading\nimport socketserver\n\ndef create_proxy_server():\n class RedirectServer(http.server.SimpleHTTPRequestHandler):\n def do_GET(self):\n self.send_response(301)\n self.send_header(\"Location\",\"http://www.meatspin.com\")\n self.end_headers()\n\n return RedirectServer\n\n\ndef set_up_proxy():\n print(\"Created port\")\n redirectHandler = create_proxy_server()\n handler = socketserver.TCPServer((\"127.0.0.1\", 8000), redirectHandler)\n print(\"serving at port 8000\")\n handler.serve_forever()\n\n\nif __name__ == \"__main__\":\n main_thread = threading.Thread(target=set_up_proxy)\n main_thread.setDaemon(False)\n main_thread.start()\n","sub_path":"bones.py","file_name":"bones.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"503743905","text":"\"\"\"\r\nAssignment 4B - Machine Learning\r\nBy: David Walesby - 000732130\r\nPurpose: To implement a multilayer perceptron classifier\r\n\"\"\"\r\nfrom sklearn.neural_network import MLPClassifier\r\nimport csv\r\nimport numpy as np\r\nimport random\r\nfrom sklearn import tree\r\nfrom sklearn.preprocessing import normalize\r\n\r\n## Reads in the datafile and returns the arrays\r\ndef ReadFile(fileName):\r\n trainingData = []\r\n trainingLabels = []\r\n testingData = []\r\n testingLabels = []\r\n\r\n with open(fileName) as file:\r\n csv_reader = csv.reader(file , delimiter=\",\")\r\n line_count = 0\r\n for row in csv_reader:\r\n randomNumber = random.randint(1 , 101)\r\n if randomNumber > 25:\r\n testingData.append(row)\r\n else:\r\n trainingData.append(row)\r\n line_count += 1\r\n print(f'Processed {line_count} lines.')\r\n\r\n trainingData = np.array(trainingData, dtype=np.float32)\r\n testingData = np.array(testingData, dtype=np.float32)\r\n trainingLabels = trainingData[:,-1]\r\n testingLabels = testingData[:,-1]\r\n trainingData = np.delete(trainingData,-1, axis=1)\r\n testingData = np.delete(testingData,-1, axis=1)\r\n return trainingData, trainingLabels, testingData, testingLabels\r\n\r\n## Runs the data for classification through a decision tree classifier and a Multi Layer Perceptron classifier and displays the results\r\ndef RunTests(normalizedTrainingData, trainingLabels, normalizedTestingData, testingLabels, fileName):\r\n clf = tree.DecisionTreeClassifier()\r\n clf = clf.fit(normalizedTrainingData, trainingLabels)\r\n decisionPrediction = clf.predict(normalizedTestingData)\r\n decisionCorrect = (decisionPrediction == testingLabels).sum()\r\n decisionTreeAccuracy = decisionCorrect/len(decisionPrediction)*100\r\n\r\n mlpPerceptron = MLPClassifier(hidden_layer_sizes= 15,max_iter=250, learning_rate_init=0.17)\r\n mlpPerceptron.fit(normalizedTrainingData,trainingLabels)\r\n mlpPrediction = mlpPerceptron.predict(normalizedTestingData)\r\n mlpCorrect = (mlpPrediction == testingLabels).sum()\r\n mlpAccuracy = mlpCorrect/len(mlpPrediction)*100\r\n print()\r\n print(f'{fileName}')\r\n print(\"------------------------------------------------------------------------\")\r\n print(f'Accuracy Tree: {round(decisionTreeAccuracy,1)}%')\r\n print(f'Accuracy MLP: {round(mlpAccuracy,1)}%')\r\n print(f'{mlpPerceptron.get_params()}')\r\n print()\r\n\r\nclf = tree.DecisionTreeClassifier()\r\n\r\n## Store file information\r\ntrainingData1, trainingLabels1, testingData1, testingLabels1 = ReadFile(\"000732130_1.csv\")\r\ntrainingData2, trainingLabels2, testingData2, testingLabels2 = ReadFile(\"000732130_2.csv\")\r\ntrainingData3, trainingLabels3, testingData3, testingLabels3 = ReadFile(\"000732130_3.csv\")\r\ntrainingData4, trainingLabels4, testingData4, testingLabels4 = ReadFile(\"000732130_4.csv\")\r\ntrainingData5, trainingLabels5, testingData5, testingLabels5 = ReadFile(\"dexter.csv\")\r\n\r\n## Normalize the training data\r\nnormalizedTrainingData1 = normalize(trainingData1, axis=0, norm='max')\r\nnormalizedTrainingData2 = normalize(trainingData2, axis=0, norm='max')\r\nnormalizedTrainingData3 = normalize(trainingData3, axis=0, norm='max')\r\nnormalizedTrainingData4 = normalize(trainingData4, axis=0, norm='max')\r\nnormalizedTrainingData5 = normalize(trainingData5, axis=0, norm='max')\r\n\r\n## Normalize the testing data\r\nnormalizedTestingData1 = normalize(testingData1, axis=0, norm='max')\r\nnormalizedTestingData2 = normalize(testingData2, axis=0, norm='max')\r\nnormalizedTestingData3 = normalize(testingData3, axis=0, norm='max')\r\nnormalizedTestingData4 = normalize(testingData4, axis=0, norm='max')\r\nnormalizedTestingData5 = normalize(testingData5, axis=0, norm='max')\r\n\r\n## Run tests\r\nRunTests(normalizedTrainingData1, trainingLabels1, normalizedTestingData1, testingLabels1,\"000732130_1.csv\")\r\nRunTests(normalizedTrainingData2, trainingLabels2, normalizedTestingData2, testingLabels2,\"000732130_2.csv\")\r\nRunTests(normalizedTrainingData3, trainingLabels3, normalizedTestingData3, testingLabels3,\"000732130_3.csv\")\r\nRunTests(normalizedTrainingData4, trainingLabels4, normalizedTestingData4, testingLabels4,\"000732130_4.csv\")\r\nRunTests(normalizedTrainingData5, trainingLabels5, normalizedTestingData5, testingLabels5,\"dexter.csv\")\r\n","sub_path":"assignment4b.py","file_name":"assignment4b.py","file_ext":"py","file_size_in_byte":4303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"366594310","text":"from django import http\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.views import View\nfrom django.core.paginator import Paginator\nfrom indexs.models import VideoModel\nimport random\nfrom indexs.models import ClassificationModel\n\nfrom indexs.models import MovieDelailModel\n\n\nclass VideoView(View):\n def get(self, request, view_id):\n try:\n video_delail = MovieDelailModel.objects.get(video_id=view_id)\n except:\n video_delail = ''\n try:\n video = VideoModel.objects.get(id=view_id)\n categroy = ClassificationModel.objects.filter(video=view_id)\n categroy = [i.categroy for i in categroy]\n reco = ClassificationModel.objects.filter(categroy__in=categroy)\n reco_video = [res.video for res in reco]\n video_order = VideoModel.objects.all().order_by(\"-activate\")[:8]#7376-1\n\n except Exception as e:\n return http.HttpResponseNotFound(e)\n page = Paginator(reco_video,10)\n pagesize = page.num_pages\n intpage = random.randint(0,pagesize)\n reco_video = [] if intpage==0 else page.page(intpage)\n\n\n context = {\n \"video\":video,\n \"categroy\":categroy,\n \"reco_video\":reco_video,\n \"video_order\":video_order,\n \"video_delail\":video_delail,\n }\n return render(request, \"views.html\",context=context)","sub_path":"iQIYI/iQIYI/apps/videoviews/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"45433920","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nThis class is part of the MacGyver challenge\ninitiated with class variables to use them in the different methods\nall the methods have a quick explanation of their utility and usage\n\"\"\"\n# generic python libraries\nimport pygame as pg\nimport sys\n# own libraries\nimport classes.gameData as gd\nimport config.config as config\nimport classes.imgLoader as imgl\n\nclass GameLogic():\n\n def __init__(self):\n self.collected = 0\n # loading the game from file\n # and initializing game board\n self.board = gd.GameData()\n self.board.get_maps_to_list()\n self.board.find_walls()\n self.board.find_path()\n # randomly distribute objects in the board\n # free path\n self.board.distribute_object()\n\n # some generic values to update the game settings more easily\n # game window title\n pg.display.set_caption(config.SCREEN_TITLE)\n\n # sets the size of the screen of pygamge\n self.screen = pg.display.set_mode((config.SCREEN_W, config.SCREEN_H))\n\n # sets values of the background\n self.screen.fill(config.BACKGR_COLOR)\n\n # sets a font to call it\n pg.font.init()\n objectstxt = pg.font.SysFont('Comic Sans MS', 25, bold=1)\n hypodermictxt = pg.font.SysFont('Comic Sans MS', 25, italic=1)\n self.toptxt = objectstxt.render('Vous avez collecté ces objets ->', False, config.TXT_COLOR)\n self.undertxt = hypodermictxt.render('Vous avez fabriqué la seringue', False, config.TXT_COLOR)\n\n def initiator(self):\n # loads to screen all resources to start\n self.load_resource()\n self.static_to_screen(self.board.wallpositions, self.walls.image)\n self.static_to_screen(self.board.pathpositions, self.path.image)\n self.collectables_to_screen()\n self.macgyver_to_screen()\n self.enemy_to_screen()\n # refresh display\n pg.display.flip()\n\n def load_resource(self):\n # -------------- loads images\n # caracters\n self.macgyver = imgl.ImageLoader(config.MACGYVER_PX)\n self.macgyver_pos = self.board.find_object(config.MACGYVER_POS)\n\n self.enemy = imgl.ImageLoader(config.ENEMY_PX)\n self.enemy_pos = self.board.find_object(config.ENEMY_POS)\n\n # walls\n self.walls = imgl.ImageLoader(config.WALL_PX)\n\n # ground\n self.path = imgl.ImageLoader(config.GROUND_PX)\n\n # Collectables\n self.tube = imgl.ImageLoader(config.TUBE_PX)\n self.needle = imgl.ImageLoader(config.NEEDLE_PX)\n self.aether = imgl.ImageLoader(config.AETHER_PX)\n\n # Final object\n self.hypodermic = imgl.ImageLoader(config.HYPODERMIC_PX)\n\n # -------------- creates lists\n # we add some elements to a list so we can iterate through\n # objects in order to display them\n self.gameObjects = []\n self.gameObjects.append(self.tube)\n self.gameObjects.append(self.needle)\n self.gameObjects.append(self.aether)\n\n '''\n use list and distribute it through the windows using\n coordinates from list multiplied by the size of the\n tile or wall structure\n '''\n # displays repetitive elements to the screen such\n # as walls, pathwalks\n def static_to_screen(self, listofobj, image):\n # display list of objects given\n for pos in listofobj:\n x_px = pos[0] * config.TILE_SIZE\n y_px = pos[1] * config.TILE_SIZE\n self.screen.blit(image, (x_px, y_px))\n\n # displays collectables to the screen\n def collectables_to_screen(self):\n # distributes objects into the maze\n\n for pos, coord in enumerate(self.board.objects_pos):\n x_px = coord[0] * config.TILE_SIZE\n y_px = coord[1] * config.TILE_SIZE\n self.screen.blit(self.gameObjects[pos].image, (x_px, y_px))\n\n # displays macgyver to the screen\n def macgyver_to_screen(self):\n self.screen.blit(\n self.macgyver.image, (\n self.macgyver_pos[0] * config.TILE_SIZE,\n self.macgyver_pos[1] * config.TILE_SIZE))\n\n # displays enemy to the screen\n def enemy_to_screen(self):\n self.screen.blit(\n self.enemy.image, (\n self.enemy_pos[0] * config.TILE_SIZE,\n self.enemy_pos[1] * config.TILE_SIZE))\n\n def getPlayerinput(self):\n # gets keys pressed by user and returns them as tuple\n self.up = pg.key.get_pressed()[pg.K_UP]\n self.down = pg.key.get_pressed()[pg.K_DOWN]\n self.left = pg.key.get_pressed()[pg.K_LEFT]\n self.right = pg.key.get_pressed()[pg.K_RIGHT]\n\n # method to move MacGyver in the maze and update screen\n def setPlayerposition(self):\n self.getPlayerinput()\n if self.macgyver_pos in self.board.pathpositions:\n if self.right and self.board.checkwall((self.macgyver_pos[0]+1, self.macgyver_pos[1])):\n self.macgyver_pos = (self.macgyver_pos[0] + 1, self.macgyver_pos[1])\n\n elif self.left and self.board.checkwall((self.macgyver_pos[0]-1, self.macgyver_pos[1])):\n self.macgyver_pos = (self.macgyver_pos[0] - 1, self.macgyver_pos[1])\n\n elif self.up and self.board.checkwall((self.macgyver_pos[0], self.macgyver_pos[1]-1)):\n self.macgyver_pos = (self.macgyver_pos[0], self.macgyver_pos[1]-1)\n\n elif self.down and self.board.checkwall((self.macgyver_pos[0], self.macgyver_pos[1]+1)):\n self.macgyver_pos = (self.macgyver_pos[0], self.macgyver_pos[1]+1)\n\n if self.collected >= 3:\n self.enemy.image = self.hypodermic.image\n \n if self.macgyver_pos in self.board.objects_pos:\n index = self.board.objects_pos.index(self.macgyver_pos)\n self.board.objects_pos[index] = (10+self.collected, 0)\n self.collected += 1\n #self.board.objects_pos.remove(self.macgyver_pos)\n elif self.macgyver_pos == self.enemy_pos:\n if self.collected >= 3:\n print(\"tous les objects ont été ramassées et l'enemi endormi, jeux terminé !\")\n sys.exit()\n else:\n print(\"Vous avez été tué ! Too bad.\")\n sys.exit()\n\n # render objects\n self.static_to_screen(self.board.wallpositions, self.walls.image)\n self.static_to_screen(self.board.pathpositions, self.path.image)\n self.macgyver_to_screen()\n self.enemy_to_screen()\n self.collectables_to_screen()\n self.screen.blit(self.toptxt, (0, 0))\n\n if self.collected >= 3:\n self.screen.blit(self.undertxt, (0, 30))\n","sub_path":"classes/gameLogic.py","file_name":"gameLogic.py","file_ext":"py","file_size_in_byte":6722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"110230258","text":"from datetime import datetime\nfrom kombu import Connection\n\n\nwith Connection('amqp://guest:guest@localhost:5672//') as connection:\n simple_queue = connection.SimpleQueue('SIMPLE_QUEUE')\n message = str.format(\n '[@] Hello, World! send at {}',\n datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n )\n simple_queue.put(message)\n print(str.format('[~] Send: {}', message))\n simple_queue.close()\n","sub_path":"packages/message_broker_packages/rabbit_mq/kombu_package/userguide/examples/hello_publisher.py","file_name":"hello_publisher.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"200550880","text":"#!/usr/bin/env python\n\n# encoding: utf-8\n\n# @author: Durand Wang\n\n# @file: FootprintMRC.py\n\n# @time: 2020/3/18 19:28\n\nimport numpy as np\n\n\nclass FootprintMRC:\n def __init__(self, n_max=1000000):\n self.m = None # 不同文件的数量m\n self.rt = [0] * n_max # 统计reuse time次数 算法中的r_1记录在rt[0] 依此类推\n self.first = {} # 以文件名为键, 记录该文件第一次访问的位置 算法中位置是从1开始数的 这里从0开始\n self.last_access_time = {} # 以文件名为键, 记录该文件上一次访问的位置 算法中位置是从1开始数的 这里从0开始\n self.i = 0 # 记录当前请求的位置\n self.first_array = None\n self.last_array = None\n\n def handle_requests(self, fid):\n if fid in self.last_access_time.keys(): # 不是第一次出现\n reuse_time = self.i - self.last_access_time[fid]\n self.rt[reuse_time] += 1\n else:\n self.first[fid] = self.i\n self.last_access_time[fid] = self.i\n self.i += 1\n\n def set(self): # !!!!在计算MRC前调用一次该函数!!!!\n self.m = len(self.first.keys())\n self.first_array = np.array(list(self.first.values())) + 1\n self.last_array = self.i - np.array(list(self.last_access_time.values()))\n\n def afp(self, omega):\n first_array_omega = self.first_array - omega\n last_array_omega = self.last_array - omega\n fi_sum = np.int64((first_array_omega[first_array_omega > 0])).sum()\n li_sum = np.int64((last_array_omega[last_array_omega > 0])).sum()\n rt_sum = 0\n for t in range(omega + 1, self.i):\n rt_sum += (t - omega) * self.rt[t - 1]\n return self.m - 1 / (self.i - omega + 1) * (fi_sum + li_sum + rt_sum)\n\n def mrc(self, size):\n total = 0\n for t in range(1, size): # 当t小于size的时候, 肯定有afp(t) <= size\n total += self.rt[t]\n for t in range(size, self.i):\n if self.afp(t) <= size:\n total += self.rt[t]\n else:\n break\n return 1 - total / self.i\n","sub_path":"FootprintMRC.py","file_name":"FootprintMRC.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"198504753","text":"# ---------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# ---------------------------------------------------------\n\n# pylint: disable=protected-access\n\n\"\"\"Contains functionality for sending telemetry to Application Insights via OpenCensus Azure Monitor Exporter.\"\"\"\n\nimport logging\n\n# import platform\nfrom os import getenv\n\n# from opencensus.ext.azure.log_exporter import AzureLogHandler\n\n# from azure.ai.ml._user_agent import USER_AGENT\n\n\nAML_INTERNAL_LOGGER_NAMESPACE = \"azure.ai.ml._telemetry\"\n\n# vienna-sdk-unitedstates\nINSTRUMENTATION_KEY = \"71b954a8-6b7d-43f5-986c-3d3a6605d803\"\n\nAZUREML_SDKV2_TELEMETRY_OPTOUT_ENV_VAR = \"AZUREML_SDKV2_TELEMETRY_OPTOUT\"\n\n# application insight logger name\nLOGGER_NAME = \"ApplicationInsightLogger\"\n\nSUCCESS = True\nFAILURE = False\n\nTRACEBACK_LOOKUP_STR = \"Traceback (most recent call last)\"\n\n# extract traceback path from message\nreformat_traceback = True\n\ntest_subscriptions = [\n \"b17253fa-f327-42d6-9686-f3e553e24763\",\n \"test_subscription\",\n \"6560575d-fa06-4e7d-95fb-f962e74efd7a\",\n \"b17253fa-f327-42d6-9686-f3e553e2452\",\n \"74eccef0-4b8d-4f83-b5f9-fa100d155b22\",\n \"4faaaf21-663f-4391-96fd-47197c630979\",\n \"00000000-0000-0000-0000-000000000\",\n]\n\n\nclass CustomDimensionsFilter(logging.Filter):\n \"\"\"Add application-wide properties to AzureLogHandler records\"\"\"\n\n def __init__(self, custom_dimensions=None): # pylint: disable=super-init-not-called\n self.custom_dimensions = custom_dimensions or {}\n\n def filter(self, record):\n \"\"\"Adds the default custom_dimensions into the current log record\"\"\"\n custom_dimensions = self.custom_dimensions.copy()\n custom_dimensions.update(getattr(record, \"custom_dimensions\", {}))\n record.custom_dimensions = custom_dimensions\n\n return True\n\n\ndef in_jupyter_notebook() -> bool:\n \"\"\"\n Checks if user is using a Jupyter Notebook. This is necessary because logging is not allowed in\n non-Jupyter contexts.\n\n Adapted from https://stackoverflow.com/a/22424821\n \"\"\"\n try: # cspell:ignore ipython\n from IPython import get_ipython\n\n if \"IPKernelApp\" not in get_ipython().config:\n return False\n except ImportError:\n return False\n except AttributeError:\n return False\n return True\n\n\ndef is_telemetry_collection_disabled():\n telemetry_disabled = getenv(AZUREML_SDKV2_TELEMETRY_OPTOUT_ENV_VAR)\n if telemetry_disabled and (telemetry_disabled.lower() == \"true\" or telemetry_disabled == \"1\"):\n return True\n if not in_jupyter_notebook:\n return True\n return False\n\n\n# def get_appinsights_log_handler(\n# user_agent,\n# *args, # pylint: disable=unused-argument\n# instrumentation_key=None,\n# component_name=None,\n# **kwargs\n# ):\n# \"\"\"Enable the OpenCensus logging handler for specified logger and instrumentation key to send info to AppInsights.\n\n# :param user_agent: Information about the user's browser.\n# :type user_agent: Dict[str, str]\n# :param instrumentation_key: The Application Insights instrumentation key.\n# :type instrumentation_key: str\n# :param component_name: The component name.\n# :type component_name: str\n# :param args: Optional arguments for formatting messages.\n# :type args: list\n# :param kwargs: Optional keyword arguments for adding additional information to messages.\n# :type kwargs: dict\n# :return: The logging handler.\n# :rtype: opencensus.ext.azure.log_exporter.AzureLogHandler\n# \"\"\"\n# try:\n# if instrumentation_key is None:\n# instrumentation_key = INSTRUMENTATION_KEY\n\n# if is_telemetry_collection_disabled():\n# return logging.NullHandler()\n\n# if not user_agent or not user_agent.lower() == USER_AGENT.lower():\n# return logging.NullHandler()\n\n# if \"properties\" in kwargs and \"subscription_id\" in kwargs.get(\"properties\"):\n# if kwargs.get(\"properties\")[\"subscription_id\"] in test_subscriptions:\n# return logging.NullHandler()\n\n# child_namespace = component_name or __name__\n# current_logger = logging.getLogger(AML_INTERNAL_LOGGER_NAMESPACE).getChild(child_namespace)\n# current_logger.propagate = False\n# current_logger.setLevel(logging.CRITICAL)\n\n# custom_properties = {\"PythonVersion\": platform.python_version()}\n# custom_properties.update({\"user_agent\": user_agent})\n# if \"properties\" in kwargs:\n# custom_properties.update(kwargs.pop(\"properties\"))\n# handler = AzureLogHandler(connection_string=f'InstrumentationKey={instrumentation_key}')\n# current_logger.addHandler(handler)\n# handler.addFilter(CustomDimensionsFilter(custom_properties))\n\n# return handler\n# except Exception: # pylint: disable=broad-except\n# # ignore exceptions, telemetry should not block\n# return logging.NullHandler()\n","sub_path":"sdk/ml/azure-ai-ml/azure/ai/ml/_telemetry/logging_handler.py","file_name":"logging_handler.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"498527786","text":"'''\nGiven an integer array nums and an integer k, return the k most frequent elements. You may return the answer in any order.\n'''\nfrom time import time\nfrom typing import List\n\n\nclass Solution:\n def topKFrequent(self, nums: List[int], k: int) -> List[int]:\n res, freq = [], {}\n for n in nums:\n freq[n] = 1 + freq.get(n, 0)\n\n for _ in range(k):\n # Get number corresponding to max count then delete\n num = max(freq, key=freq.get)\n res.append(num)\n del freq[num]\n\n return res\n\n def reference(self, nums: List[int], k: int) -> List[int]:\n count = {}\n freq = [[] for i in range(len(nums) + 1)]\n\n for n in nums:\n count[n] = 1 + count.get(n, 0)\n for n, c in count.items():\n freq[c].append(n)\n\n res = []\n for i in range(len(freq) - 1, 0, -1):\n for n in freq[i]:\n res.append(n)\n if len(res) == k:\n return res\n\n def quantify(self, test_cases, runs=50000):\n sol_start = time()\n for i in range(runs):\n for case in test_cases:\n if i == 0:\n print(self.topKFrequent(*case))\n else:\n self.topKFrequent(*case)\n print(f'Runtime for our solution: {time() - sol_start}\\n')\n\n ref_start = time()\n for i in range(0, runs):\n for case in test_cases:\n if i == 0:\n print(self.reference(*case))\n else:\n self.reference(*case)\n print(f'Runtime for reference: {time() - ref_start}')\n\n\nif __name__ == '__main__':\n test = Solution()\n test_cases = [([1, 1, 1, 2, 2, 3], 2), ([1], 1)]\n test.quantify(test_cases)\n","sub_path":"Blind 75/01 - Arrays and Hashing/347-top-k-frequent-elements.py","file_name":"347-top-k-frequent-elements.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"152089468","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('usuario', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='usuario',\n name='avatar',\n field=models.ImageField(default=datetime.datetime(2015, 10, 15, 13, 31, 43, 387558, tzinfo=utc), upload_to=b'usuarios/avatar/'),\n preserve_default=False,\n ),\n ]\n","sub_path":"server/usuario/migrations/0002_usuario_avatar.py","file_name":"0002_usuario_avatar.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"47697334","text":"import cv2\r\nimport numpy as np\r\n\r\nimg = cv2.imread('panda.jpg',cv2.IMREAD_COLOR)\r\ncv2.line(img,(0,0),(150,150),(255,0,0),15) #drawing line\r\ncv2.rectangle(img,(250,250),(750,500),(0,0,255),10) #drawing rectangle\r\ncv2.circle(img,(640,320),20,(0,0,255),-1) #drawing circle thickness -1 fills up the circle\r\n\r\npts= np.array([[50,100],[250,100],[960,500],[50,500],[750,250]],np.int32) # points of polygon\r\ncv2.polylines(img,[pts],True,(255,255,0),3) #drawing a polygon\r\n\r\nfont = cv2.FONT_HERSHEY_SIMPLEX # font\r\ncv2.putText(img,'TEST',(750,600),font,3,(0,255,255),6,cv2.LINE_AA) #writing text\r\n\r\ncv2.imshow('image',img)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","sub_path":"Test/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"53654862","text":"import os\n\nimport matplotlib\nimport matplotlib.pyplot as plt\nmatplotlib.use('Agg')\n\nimport torch\nfrom torch.nn import functional as F\nfrom torch import tensor\n\nfrom nn.models import Encoder, Decoder, Classifier\nfrom plots.scatter_plot_test import plot\nimport constants_cmap\nfrom datasets import datasets\nimport numpy as np\nfrom sklearn.decomposition import PCA\n\nimport matplotlib.cm as cm\nimport matplotlib.colors as ml_colors\nfrom matplotlib.lines import Line2D\nfrom sklearn.neighbors import KNeighborsClassifier\n\nfrom sklearn.model_selection import train_test_split\n\ndef knn(X_train,y_train, X_test, y_test):\n neigh = KNeighborsClassifier(n_neighbors=10)\n neigh.fit(X_train, y_train)\n score=neigh.score(X_test, y_test)\n print(score)\n\n return score\n\n\ndef plot_bu(model, test_loader, device, suffix, path_to_save, dataset_names, colormap, bg_color):\n zs=tensor([])\n mus=tensor([])\n logvars=tensor([])\n labels=tensor([]).long()\n\n for batch_idx, (data, label) in enumerate(test_loader):\n data = data.to(device)\n z, mu, logvar, _ = model(data)\n zs=torch.cat((zs, z), 0)\n mus=torch.cat((mus, mu), 0)\n logvars=torch.cat((logvars, logvar), 0)\n labels=torch.cat((labels, label), 0)\n\n # xs,ys=list(zip(*zs.cpu().numpy()))\n zs=zs.cpu().numpy()\n labels=labels.cpu().numpy()\n\n # np.save(os.path.join(path_to_save, \"latent_features{}.npy\".format(suffix)), np.hstack([zs, labels.reshape(-1,1), [[dataset_names[a]] for a in labels]]))\n X_pca = zs # PCA(n_components=2).fit_transform(zs)\n\n limit=10\n labels=np.array([labels[i] for i, a in enumerate(zs) if np.abs(a[0]) 9:\r\n byte2Int = byte2Int - 87\r\n print(\"rxd[%d]:\" % (i) + str(byte2Int), end=\"\\t\")\r\n tmp[i] = byte2Int\r\n if i == num - 1:\r\n print(' len: %d' % (len(mainPage.y_temp)))\r\n\r\n if num != 0:\r\n mainPage.x.append(len(mainPage.y_temp))\r\n mainPage.y_temp.append(tmp[2] * 16 + tmp[3] * 1)\r\n mainPage.y_rop.append(tmp[4] * 16 + tmp[5] * 1)\r\n\r\n print(\"-------------------------\")\r\n ser.close() # 关闭串口\r\n self.ui.editTime.setText(\"\")\r\n\r\n plt.subplots_adjust(wspace=0.5, hspace=0) # 调整子图间距\r\n fig1 = plt.subplot(121)\r\n plt.plot(mainPage.y_temp)\r\n plt.title('Temperature', fontsize=20)\r\n plt.xlabel('Time / s', fontsize=12)\r\n plt.ylabel('Temperature / °C', fontsize=12)\r\n fig2 = plt.subplot(122)\r\n plt.plot(mainPage.y_rop)\r\n plt.title('Optical', fontsize=20)\r\n plt.xlabel('Time / s', fontsize=12)\r\n plt.ylabel('Optical / Lux', fontsize=12)\r\n plt.show()\r\n\r\n def about(self):\r\n # self.ui.close()\r\n SI.aboutPage = aboutPage()\r\n SI.aboutPage.ui.show()\r\n\r\n def exit(self):\r\n self.ui.close()\r\n\r\n\r\napp = QApplication([])\r\nSI.mainPage = mainPage()\r\nSI.mainPage.ui.show()\r\napp.exec_()\r\n# pyinstaller AppLcj.py --noconsole --hidden-import PySide2.QtXml","sub_path":"Python-based Host Progra/AppLcj.py","file_name":"AppLcj.py","file_ext":"py","file_size_in_byte":5152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"308936967","text":"from setuptools import setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(name='fhem',\n version='0.5.5',\n description='Python API for FHEM home automation server',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'License :: OSI Approved :: MIT License',\n ],\n keywords='fhem home automation',\n url='http://github.com/domschl/python-fhem',\n author='Dominik Schloesser',\n author_email='dsc@dosc.net',\n license='MIT',\n packages=['fhem'],\n zip_safe=False)\n","sub_path":"fhem/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"435712690","text":"\"\"\"School URL Configuration\r\n\r\nThe `urlpatterns` list routes URLs to views. For more information please see:\r\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\r\nExamples:\r\nFunction views\r\n 1. Add an import: from my_app import views\r\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\r\nClass-based views\r\n 1. Add an import: from other_app.views import Home\r\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\r\nIncluding another URLconf\r\n 1. Import the include() function: from django.urls import include, path\r\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\r\n\"\"\"\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom django.contrib.auth import views as auth_views\r\n\r\nfrom users import views\r\n\r\nurlpatterns = [\r\n path('admin/', admin.site.urls),\r\n path('', views.home, name='home'),\r\n path('register/', views.register, name='register'),\r\n\r\n path(\"login/\", views.login_request, name='login'),\r\n path('logout/', views.logout_request, name='logout'),\r\n\r\n path('welcome/', views.welcome_page, name='welcome'),\r\n path('quiz/', views.quiz_page, name='quiz'),\r\n path('result/', views.result, name='result'),\r\n\r\n path('full_result/', views.full_result, name='full_result')\r\n]","sub_path":"School/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"296242676","text":"import numpy as np\nimport struct\n\n#initialize:\n\ncell_size= 1 * [0.]\nLevel_array=1 * [0.]\nx = 1 * [0.]\ny = 1 * [0.]\nz = 1 * [0.]\nVx = 1 * [0.]\nVy = 1 * [0.]\nVz = 1 * [0.]\nDen_array= 1 * [0.]\nTem_array= 1 * [0.]\nZSNII_array= 1 * [0.]\n\ni=1\nwith open('FL633_G016.a0.115.dat', 'rb') as f:\n\n read_data = f.read()\n N=int(len(read_data)/(13*4)) # number of bytes in the file\n print (N, ' number of bytes in the file')\n\n for i in range(1,N):\n f.seek(i*13*4+4)\n bytes = f.read(16)\n cxyz = struct.unpack('>ffff', bytes)\n#\n f.seek(i*13*4+20)\n bytes = f.read(12)\n velo= struct.unpack('>fff', bytes) \n#\n f.seek(i*13*4+32)\n bytes = f.read(4)\n Den = struct.unpack('>f', bytes)\n#\n f.seek(i*13*4+36)\n bytes = f.read(4)\n T = struct.unpack('>f', bytes)\n#\n f.seek(i*13*4+40)\n bytes = f.read(4)\n ZSNII = struct.unpack('>f', bytes)\n print (Den, T, ZSNII)\n# \n print (i, cxyz, Den, T)\n print (i, cxyz[0]) # cell_size\n print (i, cxyz[1]) # x\n print (i, cxyz[2])# y\n print (i, cxyz[3]) # z\n print (i, velo[0]) # Vx\n print (i, velo[1]) # Vy\n print (i, velo[2]) # Vz\n print (i, Den) # density\n print (i, T) # temperature\n print (i, ZSNII) # SNII metallicity mass fraction\n\n# stop\n\n if i==1:\n cell_max_reso=cxyz[0]\n cell_size[0]= cxyz[0]\n Level_array[0]= 1.\n x[0] = cxyz[1]\n y[0] = cxyz[2]\n z[0] = cxyz[3]\n Den_array[0]= Den\n Tem_array[0]= T\n ZSNII_array[0]=ZSNII\n else:\n cell_size.append( cxyz[0] )\n Level_array.append( np.log2(cxyz[0]/cell_max_reso) ) \n x.append( cxyz[1] )\n y.append( cxyz[2] )\n z.append( cxyz[3] )\n Den_array.append ( Den )\n Tem_array.append ( T )\n ZSNII_array.append(ZSNII)\n \n i=i+1\n\nLevel_max=np.log2(cxyz[0]/cell_max_reso)\n \nprint (i-1) \n\nLevel_array= [ Level_max - j for j in Level_array] \nprint (Level_array[0], Level_array[i-2])\n\n \n \n","sub_path":"read_outputs.py","file_name":"read_outputs.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"131936425","text":"#\n# Copyright (c) 2016, Novartis Institutes for BioMedical Research Inc.\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met: \n#\n# * Redistributions of source code must retain the above copyright \n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following \n# disclaimer in the documentation and/or other materials provided \n# with the distribution.\n# * Neither the name of Novartis Institutes for BioMedical Research Inc. \n# nor the names of its contributors may be used to endorse or promote \n# products derived from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#\n# Created by Nadine Schneider, June 2016\n\n\nimport numpy as np\nimport pandas as pd\nimport copy\nimport re\nfrom rdkit.Chem import PandasTools\nfrom IPython.display import SVG\n\n# generate an HTML table of the svg images to visulize them nicely in the Jupyter notebook \nPandasTools.RenderImagesInAllDataFrames(images=True)\ndef drawSVGsToHTMLGrid(svgs, cssTableName='default', tableHeader='', namesSVGs=[], size=(150,150), numColumns=4, numRowsShown=2, noHeader=False):\n rows=[]\n names=copy.deepcopy(namesSVGs)\n rows = [SVG(i).data if i.startswith(' 0:\n rows+=['']*(numColumns-x)\n d+=1\n if len(names)>0:\n names+=['']*(numColumns-x)\n rows=np.array(rows).reshape(d,numColumns)\n finalRows=[]\n if len(names)>0:\n names = np.array(names).reshape(d,numColumns)\n for r,n in zip(rows,names):\n finalRows.append(r)\n finalRows.append(n)\n d*=2\n else:\n finalRows=rows\n\n headerRemove = int(max(numColumns,d))\n df=pd.DataFrame(finalRows)\n\n style = '\\n'\n if not noHeader:\n style += '
'+str(tableHeader)+'
\\n'\n style += '
\\n'\n dfhtml=style+df.to_html()+'\\n
\\n'\n dfhtml=dfhtml.replace('class=\"dataframe\"','class=\"'+cssTableName+'\"')\n dfhtml=dfhtml.replace('','')\n for i in range(0,headerRemove):\n dfhtml=dfhtml.replace(''+str(i)+'','')\n return dfhtml\n\n# build an svg grid image to print\ndef SvgsToGrid(svgs, labels, svgsPerRow=4,molSize=(250,150),fontSize=12):\n \n matcher = re.compile(r'^(<.*>\\n)(\\n)(.*)',re.DOTALL) \n hdr='' \n ftr='' \n rect='' \n nRows = len(svgs)//svgsPerRow \n if len(svgs)%svgsPerRow : nRows+=1 \n blocks = ['']*(nRows*svgsPerRow)\n labelSizeDist = fontSize*5\n fullSize=(svgsPerRow*(molSize[0]+molSize[0]/10.0),nRows*(molSize[1]+labelSizeDist))\n print(fullSize)\n\n count=0\n for svg,name in zip(svgs,labels):\n h,r,b = matcher.match(svg).groups()\n if not hdr: \n hdr = h.replace(\"width='\"+str(molSize[0])+\"px'\",\"width='%dpx'\"%fullSize[0])\n hdr = hdr.replace(\"height='\"+str(molSize[1])+\"px'\",\"height='%dpx'\"%fullSize[1])\n if not rect: \n rect = r\n legend = '\\n'\n legend += ''+name.split('|')[0]+'\\n'\n if len(name.split('|')) > 1:\n legend += ''+name.split('|')[1]+'\\n'\n legend += '\\n'\n blocks[count] = b + legend\n count+=1\n\n for i,elem in enumerate(blocks): \n row = i//svgsPerRow \n col = i%svgsPerRow \n elem = rect+elem \n blocks[i] = '%s'%(col*(molSize[0]+molSize[0]/10.0),row*(molSize[1]+labelSizeDist),elem) \n res = hdr + '\\n'.join(blocks)+ftr \n return res \n","sub_path":"ChemTopicModel/utilsDrawing.py","file_name":"utilsDrawing.py","file_ext":"py","file_size_in_byte":5777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"568713820","text":"inset1 = {'pycharm','ipython','vscode','codepen','codesandbox'}\ninset2 = {'jsfiddle','eclipse','git','aws'}\ncount = 0\nfor i in inset1: \n for j in inset2: \n if i != j:\n j =+1\n else:\n count = +1\n j = +1\nif count == 0:\n print (\"It is disjoint set\")\nelse:\n print (\"It is not disjoint set\")\n\n\n \n \n","sub_path":"submissions/sp_009_gayathri/week_12/day_4/session_2/check_disjoint.py","file_name":"check_disjoint.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"263269663","text":"'''\nROC plot of a dataset\n'''\nimport argparse\nimport pickle\nimport numpy as np\nfrom scipy import interp\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc\n\ndef roc_process(roc_list, mean_fpr):\n '''\n Average the data over folds\n '''\n tprs = []\n aucs = [] \n i = 0\n for roc in roc_list:\n fpr = roc['fpr']\n tpr = roc['tpr']\n auc_value = roc['auc']\n tprs.append(interp(mean_fpr, fpr, tpr))\n aucs.append(auc_value)\n tprs[-1][0] = 0.0\n #plt.plot(fpr, tpr, lw=1, alpha=0.3, label='ROC fold %d (AUC = %0.2f)' % (i, auc))\n i+=1\n mean_tpr = np.mean(tprs, axis=0)\n mean_tpr[-1] = 1.0\n mean_auc = auc(mean_fpr, mean_tpr)\n return mean_tpr, mean_auc \n\n\nif __name__==\"__main__\":\n result_dir_0 = './results/roc_erneg_use_structual_features_True_p_value_0.05.pickle' \n result_dir_1 = './results/roc_erpos_use_structual_features_True_p_value_0.05.pickle'\n \n result_file_0 = open(result_dir_0, 'rb')\n roc_list_0 = pickle.load(result_file_0) \n result_file_1 = open(result_dir_1, 'rb')\n roc_list_1 = pickle.load(result_file_1)\n \n mean_fpr = np.linspace(0, 1, 100)\n mean_tpr_0, mean_auc_0 = roc_process(roc_list_0, mean_fpr)\n mean_tpr_1, mean_auc_1 = roc_process(roc_list_1, mean_fpr)\n\n # plot roc curve of random\n\n plt.plot(mean_fpr, mean_tpr_1, dashes = [6, 1, 1, 1, 1, 1], color='g', label='ER+', lw=2, alpha=.8)\n plt.plot(mean_fpr, mean_tpr_0, dashes = [6, 1, 1, 1], color='b', label='ER-', lw=2, alpha=.8)\n plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', label='Random', alpha=.8)\n print('AUC for ER+:', mean_auc_1)\n print('AUC for ER-:', mean_auc_0)\n\n plt.xlim([-0.05, 1.05])\n plt.ylim([-0.05, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic')\n plt.legend(loc=\"lower right\")\n plt.show()","sub_path":"p_value_classification/roc_machailidou.py","file_name":"roc_machailidou.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"540917336","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('catalog', '0003_auto_20150502_1639'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='category',\n options={'ordering': ('position',), 'verbose_name': 'категория', 'verbose_name_plural': 'категории'},\n ),\n migrations.AlterModelOptions(\n name='tattoo',\n options={'ordering': ('position',), 'verbose_name': 'тату', 'verbose_name_plural': 'тату'},\n ),\n migrations.AddField(\n model_name='tattoo',\n name='is_main',\n field=models.BooleanField(verbose_name='для главной', default=False),\n ),\n migrations.AlterField(\n model_name='category',\n name='position',\n field=models.PositiveSmallIntegerField(verbose_name='cортировка', help_text='Индекс сортировки (по возрастанию).', default=0),\n ),\n migrations.AlterField(\n model_name='tattoo',\n name='position',\n field=models.PositiveSmallIntegerField(verbose_name='cортировка', help_text='Индекс сортировки (по возрастанию).', default=0),\n ),\n ]\n","sub_path":"catalog/migrations/0004_auto_20150512_1251.py","file_name":"0004_auto_20150512_1251.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"28370439","text":"\"\"\"cofouter URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.8/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Add an import: from blog import urls as blog_urls\n 2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))\n\"\"\"\nfrom django.conf.urls import include, url, patterns\nfrom django.conf import settings\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom cofouter import views\nfrom wiki.urls import get_pattern as get_wiki_pattern\nfrom django_nyt.urls import get_pattern as get_nyt_pattern\n\nurlpatterns = [\n url(r'^$', views.landing, name=\"landing\"),\n url(r'^register/$', views.register, name=\"register\"),\n url(r'^about/$', views.about, name=\"about\"),\n url(r'^evesso.*', views.ssologin, name=\"evesso\"),\n url(r'^admin/', include(admin.site.urls)),\n url(r'^applications/', include('applications.urls', namespace='applications')),\n url(r'^srp/', include('srp.urls', namespace='srp')),\n url(r'^reddit/', include('subreddit.urls', namespace='subreddit')),\n url(r'^hipchat/', include('hipchat.urls', namespace='hipchat')),\n url(r'^timerboard/', include('timerboard.urls', namespace='timerboard')),\n url(r'^corpmarket/', include('corpmarket.urls', namespace=\"corpmarket\")),\n url(r'^helpdesk/', include('helpdesk.urls', namespace=\"helpdesk\")),\n url(r'^skillchecker/', include('skillchecker.urls', namespace=\"skillchecker\")),\n url(r'^wikinotifications/', get_nyt_pattern()),\n url(r'^wiki/', get_wiki_pattern()),\n url(r'^', include('core.urls', namespace='core')),\n]\n\nif settings.DEBUG:\n urlpatterns += staticfiles_urlpatterns()\n urlpatterns += patterns('',\n url(r'^media/(?P.*)$',\n 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT,\n }),\n )","sub_path":"cofouter/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"484535975","text":"endereco = \"Rua da Flores 72, apartamento 1002, Laranjeiras, Rio de Janeiro, RJ, 23440-120\"\n\nimport re # Regular Expression -- ReqEx\n\n# 5 Dígitos + hifen (opcional) + 3 dígitos\n\npadrao = re.compile(\"[0-9]{5}[-]{0,1}[0-9]{3}\")\nbusca = padrao.search(endereco) # Match\nif busca:\n cep = busca.group()\n print(cep)\n\n","sub_path":"Formação-Python/String-em-Python-aula4/extrator-url/extrator_cep.py","file_name":"extrator_cep.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"270395771","text":"from ortools.linear_solver import pywraplp\nimport networkx as nx\nfrom shapely.geometry import Point, LineString, MultiLineString\nimport os\nimport geojson\n\n\nclass RouteOptimizer():\n def __init__(self, trail_network, mindist = 0, maxdist = 100):\n \"\"\"\n This is a mixed-integer linear program. It will maximize distance\n such that each node is gone through symetrically from either side\n \"\"\"\n # Make Path object a more callable object -- Fix all this\n self.trail_network = trail_network\n self.mindist = mindist\n self.maxdist = maxdist\n self.variables = {}\n self.path_groups = {}\n self.group_vars = {}\n self.group_list = []\n self.starting_trails = {}\n self.constraints = {}\n self.solver = None\n self.objective = None\n self.results = None\n self.node_variables = {}\n self.edge_limit = {}\n \n\n def set_trip_length(self, mindist, maxdist):\n self.mindist = mindist\n self.maxdist = maxdist\n self.set_distance_constraint()\n \n\n def setup_solver(self):\n self.solver = pywraplp.Solver('Backpack Trip Planner',\n pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)\n \n self.objective = self.solver.Objective()\n self.objective.SetMaximization()\n \n\n def setup_variables(self):\n \"\"\"\n Each path is setup as an integer variable. It can either be 0 or 1.\n Paths can go from Origin_to_Destination, or Destnation_to_Origin\n \"\"\"\n \n self.set_distance_constraint()\n\n start = self.constraints[\"start_node\"] = self.solver.Constraint(0, 1) \n for path in self.trail_network.edges(data=True):\n pathwaycons = self.constraints[path[2][\"name\"]] = self.solver.Constraint(0, 1)\n pathd = path[2][\"length\"]\n constraint = self.constraints[\"Trip Distance\"]\n forward = (path[0],path[1], path[2][\"name\"])\n reverse = (path[1],path[0], path[2][\"name\"])\n \n # Add the node variables\n if path[0] not in self.node_variables:\n node1 = self.node_variables[path[0]] = self.solver.IntVar(0,1,\"node_var\"+str(path[0]))\n start.SetCoefficient(node1, 1)\n \n if path[1] not in self.node_variables:\n node2 = self.node_variables[path[1]] = self.solver.IntVar(0,1,\"node_var\"+str(path[1]))\n start.SetCoefficient(node2, 1)\n \n \n \n #Had previously set values at 2, not sure why?\n self.variables[forward] = self.solver.IntVar(0, 1, \"forward_\"+str(forward))\n self.variables[reverse] = self.solver.IntVar(0, 1, \"reverse_\"+str(reverse))\n \n # Add constraints so a pathway can go either forward or backward\n pathwaycons.SetCoefficient(self.variables[forward], 1)\n pathwaycons.SetCoefficient(self.variables[reverse], 1)\n\n # Add distances to the total distance constraint\n constraint.SetCoefficient(self.variables[forward], pathd)\n constraint.SetCoefficient(self.variables[reverse], pathd)\n \n # Add distances to objective function\n self.objective.SetCoefficient(self.variables[forward], pathd)\n self.objective.SetCoefficient(self.variables[reverse], pathd)\n \n\n def set_node_constraints(self):\n \"\"\"\n Each Pathway represents leaving a node or joining a node.\n All nodes must stay at 0, otherwise it is impossible to return to\n your origin\n \"\"\"\n \n # Have each node be a variable (Start Node) <-- Done: X\n # Constraint: Only have 1 start-node\n # Node Coefficient: 1 for Node Variable\n #Pathway Constraints Below can be 0 or 1\n # Start constraint prevents a -1\n # Pathway in single direction prevents doubling back\n \n if not self.variables:\n raise Exception(\"Pathway variables need to be setup first\")\n\n for pathway in self.variables:\n intvar = self.variables[pathway]\n\n \n if pathway[0] not in self.constraints:\n self.constraints[pathway[0]] = self.solver.Constraint(0, 1)\n edge1 = self.edge_limit[pathway[0]] = self.solver.Constraint(0,2)\n \n \n if pathway[1] not in self.constraints:\n self.constraints[pathway[1]] = self.solver.Constraint(0, 1)\n edge2 = self.edge_limit[pathway[1]] = self.solver.Constraint(0,2)\n \n node1 = self.constraints[pathway[0]]\n node2 = self.constraints[pathway[1]]\n edge1 = self.edge_limit[pathway[0]]\n edge2 = self.edge_limit[pathway[1]]\n \n \n node1.SetCoefficient(intvar, 1)\n node2.SetCoefficient(intvar, -1)\n edge1.SetCoefficient(intvar, 1)\n edge2.SetCoefficient(intvar, 1)\n \n # Allow start_condition to add a +1\n node1.SetCoefficient(self.node_variables[pathway[0]],1)\n node2.SetCoefficient(self.node_variables[pathway[1]],1)\n \n\n def set_distance_constraint(self):\n if \"Distance\" not in self.constraints:\n self.constraints[\"Trip Distance\"] = self.solver.Constraint(self.mindist, self.maxdist)\n else:\n self.constraints[\"Trip Distance\"].SetBounds(self.mindist, self.maxdist)\n \n \n def establish_groups(self):\n \"\"\"\n Create list that keeps track of which group \n (connnected component) each node belongs to\n \"\"\"\n d = list(self.trail_network.subgraph(c) for c in nx.connected_components(self.trail_network))\n for i, group in enumerate(d):\n for node in group:\n self.path_groups[node] = i\n self.group_list.append(i)\n \n ''' \n to return more than one trail, we could adjust the number of unique_starts, but we need to\n figure out what the solver is doing exactly and how to best optimize it\n '''\n def set_grouping_constraint(self, unique_starts = 1): \n \"\"\"\n A Constraint that allows only a number of networks equal to [unique_starts] chosen\n in a given area\n \"\"\"\n if not self.path_groups:\n self.establish_groups()\n \n grp_constraint = self.constraints[\"Trail Groups\"] = self.solver.Constraint(0, unique_starts)\n for group in self.group_list:\n grp_id = self.group_vars[group] = self.solver.IntVar(0,1,str(group))\n grp_constraint.SetCoefficient(grp_id, 1)\n \n for path_key in self.variables:\n \"\"\"\n Allows a path to be selected if it falls in the same group as the\n chosen hiking group\n \"\"\"\n grp_id = self.path_groups[path_key[0]]\n identifier = \"constraint_%s\" % str(grp_id)\n \n cons = self.group_vars[identifier] = self.solver.Constraint(0,self.solver.infinity())\n path_var = self.variables[path_key]\n grp_var = self.group_vars[grp_id]\n \n cons.SetCoefficient(path_var,-1)\n cons.SetCoefficient(grp_var, 1)\n \n \n def setup_lp(self):\n self.setup_solver()\n self.setup_variables()\n self.set_node_constraints()\n \n\n def solve(self):\n result_status = self.solver.Solve()\n return result_status\n \n\n def get_results(self):\n results = []\n print(\"Total Trip Length: %s km\" % self.objective.Value())\n for key in self.variables:\n intvar = self.variables[key]\n if intvar.solution_value() > 0:\n results.append(key)\n \n self.results = results\n return results\n\n\n def save_geojson(self,path_object):\n if not self.results:\n self.get_results()\n \n results = self.results\n \n lines = []\n for path_name in results:\n pointlist = []\n path = path_object.get(path_name).points\n if path.type == 'LineString':\n points = path.coords\n else:\n points = path[0].coords\n \n for coord in points:\n pointlist.append(Point([coord[0], coord[1]]))\n lines.append(LineString(pointlist))\n \n geom_in_geojson = geojson.Feature(geometry=MultiLineString(lines), properties={})\n return geojson.dumps(geom_in_geojson)\n \n\n\n \n \n","sub_path":"app/app/tripopt/tripopt.py","file_name":"tripopt.py","file_ext":"py","file_size_in_byte":8798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"627997238","text":"import math, collections\nfrom collections import defaultdict\n\nclass BackoffModel:\n\n def __init__(self, corpus):\n \"\"\"Initialize your data structures in the constructor.\"\"\"\n self.unigramCounts = defaultdict(lambda: 0)\n self.table = defaultdict(lambda: defaultdict(int))\n self.words = set([])\n self.total = 0\n self.train(corpus)\n\n def train(self, corpus):\n \"\"\" Takes a corpus and trains your language model.\n Compute any counts or other corpus statistics in this function.\n \"\"\"\n for sentence in corpus.corpus:\n prevWord = None\n for datum in sentence.data:\n token = datum.word\n self.table[prevWord][token] = self.table[prevWord][token] + 1\n self.unigramCounts[token] = self.unigramCounts[token] + 1\n self.total += 1\n self.words.add(token)\n prevWord = token\n\n def score(self, sentence):\n \"\"\" Takes a list of strings as argument and returns the log-probability of the\n sentence using your language model. Use whatever data you computed in train() here.\n \"\"\"\n score = 0.0\n prevWord = None\n vocab = len(self.words)\n for token in sentence:\n occurances = self.table[prevWord][token]\n countPrev = self.unigramCounts[prevWord]\n\n probability = float(occurances) / (float(countPrev) + vocab)\n\n #Test results of bigram\n if probability > 0:\n score += math.log(probability)\n else: #Back off to unigram\n count = self.unigramCounts[token]\n if count > 0:\n score += math.log(count)\n score -= math.log(self.total)\n\n prevWord = token\n return abs(score)\n","sub_path":"hw1/BackoffModel.py","file_name":"BackoffModel.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"421082667","text":"import django\ndjango.setup()\nfrom core.bo.curso import get_cursos\nfrom core.bo.sala import get_salas\nfrom django.test import TestCase\nfrom core.tests.povoar_testes import criar_dados, remover_dados\nfrom core.dao.centro_dao import get_centro_by_id, get_centros\nfrom core.dao.componente_dao import get_componentes_by_depto\nfrom core.dao.departamento_dao import get_depto_by_id, get_departamentos\n\n\nclass DAOTests(TestCase):\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n print('\\nDAOTests')\n criar_dados()\n\n @classmethod\n def tearDownClass(cls):\n super().tearDownClass()\n remover_dados()\n\n def test_get_centros(self):\n centros = get_centros()\n\n self.assertIsNotNone(centros, 'Testando centros')\n self.assertTrue(len(centros) > 0, 'Testando centros')\n\n def test_get_ceres(self):\n id_centro = 9999\n codigo = 9999\n sigla = 'CTESTE'\n nome = 'Centro de Teste'\n endereco = 'Rua Joaquim Gregório, Penedo, Caicó - RN'\n site = 'https://www.ceres.ufrn.br/'\n\n centro = get_centro_by_id(9999)\n\n self.assertEqual(id_centro, centro.id_unidade, 'Testando Id Unidade')\n self.assertEqual(codigo, centro.codigo, 'Testando Código')\n self.assertEqual(sigla, centro.sigla, 'Testando Sigla')\n self.assertEqual(nome, centro.nome, 'Testando Nome')\n self.assertEqual(endereco, centro.endereco, 'Testando Endereço')\n self.assertEqual(site, centro.site, 'Testando Site')\n\n centro = get_centro_by_id(6666)\n self.assertIsNone(centro)\n\n def test_get_centro(self):\n id_centro = 9999\n codigo = 9999\n sigla = 'CTESTE'\n nome = 'Centro de Teste'\n endereco = 'Rua Joaquim Gregório, Penedo, Caicó - RN'\n site = 'https://www.ceres.ufrn.br/'\n\n centro = get_centro_by_id(id_centro)\n\n self.assertEqual(id_centro, centro.id_unidade, 'Testando Id Unidade')\n self.assertEqual(codigo, centro.codigo, 'Testando Código')\n self.assertEqual(sigla, centro.sigla, 'Testando Sigla')\n self.assertEqual(nome, centro.nome, 'Testando Nome')\n self.assertEqual(endereco, centro.endereco, 'Testando Endereço')\n self.assertEqual(site, centro.site, 'Testando Site')\n\n def test_get_deptos_centro(self):\n deptos = get_departamentos()\n\n self.assertIsNotNone(deptos, 'Testando departamentos dos centros')\n self.assertTrue(len(deptos) > 0, 'Testando qtd departamentos')\n\n def test_get_componentes_by_depto(self):\n depto = get_depto_by_id(9998)\n ccs = get_componentes_by_depto(depto)\n\n self.assertEqual(4, len(ccs), 'Testando componentes')\n\n def test_get_salas(self):\n salas = get_salas()\n\n self.assertIsNotNone(salas, 'Testando salas')\n self.assertTrue(len(salas) > 0, 'Testando salas')\n\n def test_get_salas(self):\n cursos = get_cursos()\n\n self.assertIsNotNone(cursos, 'Testando cursos')\n self.assertTrue(len(cursos) > 0, 'Testando cursos')\n","sub_path":"core/tests/test_dao.py","file_name":"test_dao.py","file_ext":"py","file_size_in_byte":3063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"441787645","text":"\"\"\"P-Roc hardware platform devices.\"\"\"\nimport logging\n\nfrom mpf.platforms.interfaces.switch_platform_interface import SwitchPlatformInterface\nfrom mpf.platforms.interfaces.driver_platform_interface import DriverPlatformInterface\nfrom mpf.platforms.interfaces.gi_platform_interface import GIPlatformInterface\nfrom mpf.platforms.interfaces.matrix_light_platform_interface import MatrixLightPlatformInterface\nfrom mpf.core.utility_functions import Util\n\n\nclass PROCSwitch(SwitchPlatformInterface):\n\n \"\"\"P-ROC switch object which is use to store the configure rules and config.\"\"\"\n\n def __init__(self, config, number, notify_on_nondebounce):\n \"\"\"Initialise P-ROC switch.\"\"\"\n super().__init__(config, number)\n self.log = logging.getLogger('PROCSwitch')\n self.notify_on_nondebounce = notify_on_nondebounce\n self.hw_rules = {\"closed_debounced\": [],\n \"closed_nondebounced\": [],\n \"open_debounced\": [],\n \"open_nondebounced\": []}\n\n\nclass PROCDriver(DriverPlatformInterface):\n\n \"\"\"A P-ROC driver/coil.\n\n Base class for drivers connected to a P3-ROC. This class is used for all\n drivers, regardless of whether they're connected to a P-ROC driver board\n (such as the PD-16 or PD-8x8) or an OEM driver board.\n\n \"\"\"\n\n def __init__(self, number, config, platform):\n \"\"\"Initialise driver.\"\"\"\n self.log = logging.getLogger('PROCDriver')\n super().__init__(config, number)\n self.proc = platform.proc\n self.machine = platform.machine\n self.pdbconfig = getattr(platform, \"pdbconfig\", None)\n\n self.log.debug(\"Driver Settings for %s\", self.number)\n\n def get_board_name(self):\n \"\"\"Return board of the driver.\"\"\"\n if not self.pdbconfig:\n return \"P-Roc\"\n else:\n return \"P-Roc Board {}\".format(str(self.pdbconfig.get_coil_bank(self.config['number'])))\n\n @classmethod\n def get_pwm_on_ms(cls, coil):\n \"\"\"Find out the pwm_on_ms for this driver.\"\"\"\n # figure out what kind of enable we need:\n if coil.config['hold_power']:\n pwm_on_ms, pwm_off_ms = (Util.pwm8_to_on_off(coil.config['hold_power']))\n del pwm_off_ms\n return pwm_on_ms\n\n elif coil.config['pwm_on_ms'] and coil.config['pwm_off_ms']:\n return int(coil.config['pwm_on_ms'])\n else:\n return 0\n\n @classmethod\n def get_pwm_off_ms(cls, coil):\n \"\"\"Find out the pwm_off_ms for this driver.\"\"\"\n # figure out what kind of enable we need:\n if coil.config['hold_power']:\n pwm_on_ms, pwm_off_ms = (Util.pwm8_to_on_off(coil.config['hold_power']))\n del pwm_on_ms\n return pwm_off_ms\n\n elif coil.config['pwm_on_ms'] and coil.config['pwm_off_ms']:\n return int(coil.config['pwm_off_ms'])\n else:\n return 0\n\n def get_pulse_ms(self, coil):\n \"\"\"Find out the pulse_ms for this driver.\"\"\"\n if coil.config['pulse_ms']:\n return int(coil.config['pulse_ms'])\n else:\n return self.machine.config['mpf']['default_pulse_ms']\n\n def disable(self, coil):\n \"\"\"Disable (turn off) this driver.\"\"\"\n del coil\n self.log.debug('Disabling Driver')\n self.proc.driver_disable(self.number)\n\n def enable(self, coil):\n \"\"\"Enable (turn on) this driver.\"\"\"\n if self.get_pwm_on_ms(coil) and self.get_pwm_off_ms(coil):\n self.log.debug('Enabling. Initial pulse_ms:%s, pwm_on_ms: %s'\n 'pwm_off_ms: %s',\n self.get_pwm_on_ms(coil),\n self.get_pwm_off_ms(coil),\n self.get_pulse_ms(coil))\n\n self.proc.driver_patter(self.number,\n self.get_pwm_on_ms(coil),\n self.get_pwm_off_ms(coil),\n self.get_pulse_ms(coil), True)\n else:\n self.log.debug('Enabling at 100%')\n\n if not coil.config['allow_enable']:\n raise AssertionError(\"Received a command to enable this coil \"\n \"without pwm, but 'allow_enable' has not been\"\n \"set to True in this coil's configuration.\")\n\n self.proc.driver_schedule(number=self.number, schedule=0xffffffff,\n cycle_seconds=0, now=True)\n\n def pulse(self, coil, milliseconds):\n \"\"\"Enable this driver for `milliseconds`.\n\n ``ValueError`` will be raised if `milliseconds` is outside of the range\n 0-255.\n \"\"\"\n del coil\n\n self.log.debug('Pulsing for %sms', milliseconds)\n self.proc.driver_pulse(self.number, milliseconds)\n\n return milliseconds\n\n def state(self):\n \"\"\"Return a dictionary representing this driver's current configuration state.\"\"\"\n return self.proc.driver_get_state(self.number)\n\n\nclass PROCGiString(GIPlatformInterface):\n\n \"\"\"A P-ROc GI hardware device.\"\"\"\n\n def __init__(self, number, proc_driver, config):\n \"\"\"Initialise GI.\"\"\"\n self.log = logging.getLogger('PROCGiString')\n self.number = number\n self.proc = proc_driver\n self.config = config\n\n def on(self, brightness=255):\n \"\"\"Turn on GI to `brightness`.\n\n A brightness of 0 will turn it off. For values between 0 and 255 hardware pulse patter is used.\n \"\"\"\n if brightness > 255:\n brightness = 255\n\n # run the GIs at 50Hz\n duty_on = int(brightness / 12.75)\n duty_off = 20 - duty_on\n self.proc.driver_patter(self.number,\n int(duty_on),\n int(duty_off),\n 0, True)\n\n def off(self):\n \"\"\"Turn off a GI.\"\"\"\n self.proc.driver_disable(self.number)\n\n\nclass PROCMatrixLight(MatrixLightPlatformInterface):\n\n \"\"\"A P-ROC matrix light device.\"\"\"\n\n def __init__(self, number, proc_driver):\n \"\"\"Initialise matrix light device.\"\"\"\n self.log = logging.getLogger('PROCMatrixLight')\n self.number = number\n self.proc = proc_driver\n\n def off(self):\n \"\"\"Disable (turns off) this driver.\"\"\"\n self.proc.driver_disable(self.number)\n\n def on(self, brightness=255):\n \"\"\"Enable (turns on) this driver.\"\"\"\n if brightness >= 255:\n self.proc.driver_schedule(number=self.number, schedule=0xffffffff,\n cycle_seconds=0, now=True)\n elif brightness == 0:\n self.off()\n else:\n pass\n # patter rates of 10/1 through 2/9\n\n \"\"\"\n Koen's fade code he posted to pinballcontrollers:\n def mode_tick(self):\n if self.fade_counter % 10 == 0:\n for lamp in self.game.lamps:\n if lamp.name.find(\"gi0\") == -1:\n var = 4.0*math.sin(0.02*float(self.fade_counter)) + 5.0\n on_time = 11-round(var)\n off_time = round(var)\n lamp.patter(on_time, off_time)\n self.fade_counter += 1\n \"\"\" # pylint: disable=W0105\n","sub_path":"mpf/platforms/p_roc_devices.py","file_name":"p_roc_devices.py","file_ext":"py","file_size_in_byte":7340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"122959745","text":"from talon.voice import Word, Context, press, Key\nfrom talon import clip\n\nfrom ..utils import (\n insert,\n normalise_keys,\n parse_word,\n surround,\n text,\n sentence_text,\n word,\n parse_words,\n spoken_text,\n)\n\n\ndef title_case_capitalize_word(index, word, _):\n words_to_keep_lowercase = \"a,an,the,at,by,for,in,of,on,to,up,and,as,but,or,nor\".split(\n \",\"\n )\n if index == 0 or word not in words_to_keep_lowercase:\n return word.capitalize()\n else:\n return word\n\n\nformatters = normalise_keys(\n {\n \"thrack\": (True, lambda i, word, _: word[0:3] if i == 0 else \"\"),\n \"quattro\": (True, lambda i, word, _: word[0:4] if i == 0 else \"\"),\n \"(cram | camel)\": (\n True,\n lambda i, word, _: word if i == 0 else word.capitalize(),\n ),\n \"pathway\": (True, lambda i, word, _: word if i == 0 else \"/\" + word),\n \"dotsway\": (True, lambda i, word, _: word if i == 0 else \".\" + word),\n \"yellsmash\": (True, lambda i, word, _: word.upper()),\n \"(allcaps | yeller)\": (False, lambda i, word, _: word.upper()),\n \"yellsnik\": (\n True,\n lambda i, word, _: word.upper() if i == 0 else \"_\" + word.upper(),\n ),\n \"dollcram\": (\n True,\n lambda i, word, _: \"$\" + word if i == 0 else word.capitalize(),\n ),\n # \"champ\": (True, lambda i, word, _: word.capitalize() if i == 0 else \" \" + word),\n \"lowcram\": (\n True,\n lambda i, word, _: \"@\" + word if i == 0 else word.capitalize(),\n ),\n \"(criff | criffed)\": (True, lambda i, word, _: word.capitalize()),\n \"dotcriffed\": (True, lambda i, word, _: \".\" + word.capitalize() if i == 0 else word.capitalize()),\n \"tridal\": (False, lambda i, word, _: word.capitalize()),\n \"snake\": (True, lambda i, word, _: word if i == 0 else \"_\" + word),\n \"dotsnik\": (True, lambda i, word, _: \".\" + word if i == 0 else \"_\" + word),\n \"dot\": (True, lambda i, word, _: \".\" + word if i == 0 else \"_\" + word),\n \"smash\": (True, lambda i, word, _: word),\n \"(spine | kebab)\": (True, lambda i, word, _: word if i == 0 else \"-\" + word),\n \"title\": (False, title_case_capitalize_word),\n }\n)\n\nsurrounders = normalise_keys(\n {\n \"(surround dubstring | surround coif)\": (False, surround('\"')),\n \"(surround string | surround posh)\": (False, surround(\"'\")),\n \"(surround tics | surround glitch)\": (False, surround(\"`\")),\n \"surround prank\": (False, surround(\" \")),\n \"surround dunder\": (False, surround(\"__\")),\n \"surround angler\": (False, surround(\"<\", \">\")),\n \"surround brisk\": (False, surround(\"[\", \"]\")),\n \"surround kirk\": (False, surround(\"{\", \"}\")),\n \"surround precoif\": (False, surround('(\"', '\")')),\n \"surround prex\": (False, surround(\"(\", \")\")),\n }\n)\n\nformatters.update(surrounders)\n\n\ndef FormatText(m):\n fmt = []\n\n for w in m._words:\n if isinstance(w, Word) and w != \"over\":\n fmt.append(w.word)\n words = parse_words(m)\n if not words:\n try:\n with clip.capture() as s:\n press(\"cmd-c\")\n words = s.get().split(\" \")\n except clip.NoChange:\n words = [\"\"]\n\n tmp = []\n\n smash = False\n for i, w in enumerate(words):\n word = parse_word(w, True)\n for name in reversed(fmt):\n smash, func = formatters[name]\n word = func(i, word, i == len(words) - 1)\n tmp.append(word)\n\n sep = \"\" if smash else \" \"\n insert(sep.join(tmp))\n # if no words, move cursor inside surrounders\n if not words[0]:\n for i in range(len(tmp[0]) // 2):\n press(\"left\")\n\n\n# from ..noise import pop_control as pc\n\nctx = Context(\"formatters\")\n# ctx = Context(\"formatters\", func=lambda app, window: pc.PopControl.mode != pc.PopControl.DICTATION)\nctx.keymap(\n {\n \"phrase [over]\": spoken_text,\n \"phrase [tree]\": [spoken_text, \" tree\"],\n \"phrase [subtree]\": [spoken_text, \" subtree\"],\n\n \"squash [over]\": text,\n \"derek [] [over]\": [\" \", spoken_text],\n \"darren [] [over]\": [Key(\"cmd-right\"), \" \", spoken_text],\n \"(sentence | champ) [over]\": sentence_text,\n \"(comma | ,) [over]\": [\", \", spoken_text],\n \"period [over]\": [\". \", sentence_text],\n \"word \": word,\n \"(%s)+ [] [over]\" % (\" | \".join(formatters)): FormatText,\n # to match surrounder command + another command (i.e. not dgndictation)\n \"(%s)+\" % (\" | \".join(surrounders)): FormatText,\n }\n)\n\n\n","sub_path":"text/formatters.py","file_name":"formatters.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"542919220","text":"import json\nimport requests\nimport sys, os\nfrom uuid import getnode as get_mac\n\nETC_HOSTS=\"/etc/hosts\"\nETC_HOSTNAME=\"/etc/hostname\"\nDOMAIN_NAME = \"conf\" # must be written in lower-case\nconfHostname = \"\"\n\ncwd = os.path.dirname(sys.argv[0])\n\nmac = get_mac()\nmacString = ':'.join((\"%012X\" % mac)[i:i+2] for i in range(0, 12, 2))\n\nwith open(cwd + '/' + DOMAIN_NAME + '.json', 'r') as f:\n data = json.load(f)\n f.close()\n \ntry:\n if os.uname()[1] != data[DOMAIN_NAME][macString]:\n #we need to set a new hostname\n confHostname = data[DOMAIN_NAME][macString]\nexcept:\n print(\"Keine passende MAC-Adresse gefunden\")\n\n#url = 'https://your.domain.here/yourfile.json'\n\n#r = requests.get(url)\n#dataUrl = json.loads(r.content.decode())\n#print (dataUrl[\"DOMAIN_NAME\"][macString] + \" > \" + ETC_HOSTNAME )\n\nif confHostname:\n with open(ETC_HOSTNAME, 'w') as f:\n f.write(confHostname + '\\n')\n f.close()\n\n with open(ETC_HOSTS, 'r') as fp:\n lines = fp.read().split(\"\\n\")\n fp.close()\n\n with open(ETC_HOSTS, 'w') as fp: \n for i in lines:\n if '127.0.1.1' in i and DOMAIN_NAME in i:\n #print ('127.0.1.1\\t' + confHostname)\n fp.write ('127.0.1.1\\t' + confHostname+'\\n')\n else:\n if i:\n #print (i) \n fp.write (i+'\\n') \n fp.close()\n #os.system('sudo shutdown -r now')\n os.system('hostnamectl set-hostname ' + confHostname)\n","sub_path":"hostnamer.py","file_name":"hostnamer.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"214861736","text":"#!/usr/bin/python3\nimport argparse\nimport os\nimport tqdm\n\nimport torch\nimport torch.optim as optim\nimport numpy as np\n\nfrom networks.faceid.sphereface import sphere20a\n\nfrom transforms.noising import GaussianNoise\nimport torchvision\nfrom torchvision import transforms\n\nfrom utils import printoneline, dt, KFold\n\nstop_flag = False\ndef handler(signum, frame):\n print(\"Shutting down at \" + dt() + \" ...\")\n global stop_flag\n stop_flag = True\n\nimport signal\nsignal.signal(signal.SIGTERM, handler)\nsignal.signal(signal.SIGINT, handler)\n\nfrom loss import AngleLoss\nimport itertools\n\ndef test_w_denoiser(model, denoiser, test_dataloader, l2dist):\n model.eval()\n denoiser.eval()\n labels_arr = []\n distances_arr = []\n for batch_idx, data in tqdm.tqdm(enumerate(test_dataloader)):\n data_x, data_y, data_label = data\n data_x, data_y, data_label = data_x.cuda(), data_y.cuda(), data_label.cuda()\n \n# data_x = 255*data_x\n# sigma = pydlutil.wmad_estimator(data_x).cuda()\n# denoised_x = denoiser(data_x, sigma)\n# denoised_x = (denoised_x-127.5)/127.5\n \n# data_y = 255*data_y\n# sigma = pydlutil.wmad_estimator(data_y).cuda()\n# denoised_y = denoiser(data_y, sigma)\n# denoised_y = (denoised_y-127.5)/127.5\n\n data_x = 2*data_x - 1\n data_y = 2*data_y - 1\n out_x = model(data_x)\n out_y = model(data_y)\n \n out_dists = l2dist(out_x, out_y)\n \n distances_arr += out_dists.data.cpu().numpy().tolist()\n labels_arr += data_label.data.cpu().numpy().tolist()\n \n return np.asarray(distances_arr), np.asarray(labels_arr)\n\ndef test(model, dataloader, l2dist):\n model.eval()\n labels_arr = []\n distances_arr = []\n for batch_idx, data in enumerate(dataloader):\n data_x, data_y, data_label = data\n data_x, data_y, data_label = data_x.cuda(), data_y.cuda(), data_label.cuda()\n \n data_x = (data_x*255 - 127.5)/128\n data_y = (data_y*255 - 127.5)/128\n\n imglist = [data_x.data.cpu().numpy(), data_x.data.cpu().numpy()[:,:,:,::-1], data_y.data.cpu().numpy(), data_y.data.cpu().numpy()[:,:,:,::-1]]\n\n img = np.vstack(imglist)\n img = torch.from_numpy(img).float().cuda()\n output = model(img)\n f = output.data\n \n cur_batch_size = data_x.size(0)\n out_x, out_y = f[:cur_batch_size], f[2*cur_batch_size:3*cur_batch_size]\n out_dists = l2dist(out_x, out_y)\n \n distances_arr += out_dists.data.cpu().numpy().tolist()\n labels_arr += data_label.data.cpu().numpy().tolist()\n \n return np.asarray(distances_arr), np.asarray(labels_arr)\n\ndef k_fold_eval(dists, labels):\n thresholds = np.arange(-1.0, 1.0, 0.001)\n acc_arr = []\n for pairs in KFold(n=6000, n_folds=10):\n train_pairs, test_pairs = pairs\n t, _ = find_best(thresholds, dists[train_pairs], labels[train_pairs])\n acc_arr.append(eval_acc(t, dists[test_pairs], labels[test_pairs]))\n return np.mean(acc_arr), np.std(acc_arr)\n\ndef eval_acc(threshold, dists, labels):\n accuracy = ((dists > threshold) == labels).mean()\n return accuracy\n\ndef find_best(thresholds, dists, labels):\n best_threshold = best_acc = 0\n for threshold in thresholds:\n accuracy = eval_acc(threshold, dists, labels)\n if accuracy >= best_acc:\n best_acc = accuracy\n best_threshold = threshold\n return best_threshold, best_acc\n \nfrom datasets.lfw import LFWDataset\n\nhigh_noise_std_arr = (np.arange(30, 55, 4)/255).tolist()\nlow_noise_std_arr = (np.arange(5, 29, 4)/255).tolist()\n\ntrain_data_dir = \"/tmp/CASIA-WebFace-sphereface/\"\n\nimport torch.nn as nn\nimport math\nimport torch\nfrom torch.nn.parameter import Parameter\nimport torch.nn.functional as F\nModule = nn.Module\nimport collections\nfrom itertools import repeat\n\ndef _ntuple(n):\n def parse(x):\n if isinstance(x, collections.Iterable):\n return x\n return tuple(repeat(x, n))\n return parse\n\n_single = _ntuple(1)\n_pair = _ntuple(2)\n_triple = _ntuple(3)\n_quadruple = _ntuple(4)\n\nunfold = F.unfold\n\ndef conv2d_local(input, weight, bias=None, padding=0, stride=1, dilation=1):\n if input.dim() != 4:\n raise NotImplementedError(\"Input Error: Only 4D input Tensors supported (got {}D)\".format(input.dim()))\n if weight.dim() != 6:\n # outH x outW x outC x inC x kH x kW\n raise NotImplementedError(\"Input Error: Only 6D weight Tensors supported (got {}D)\".format(weight.dim()))\n \n outH, outW, outC, inC, kH, kW = weight.size()\n kernel_size = (kH, kW)\n \n # N x [inC * kH * kW] x [outH * outW]\n cols = unfold(input, kernel_size, dilation=dilation, padding=padding, stride=stride)\n cols = cols.view(cols.size(0), cols.size(1), cols.size(2), 1).permute(0, 2, 3, 1)\n \n out = torch.matmul(cols, weight.view(outH * outW, outC, inC * kH * kW).permute(0, 2, 1))\n out = out.view(cols.size(0), outH, outW, outC).permute(0, 3, 1, 2)\n \n if bias is not None:\n out = out + bias.expand_as(out)\n return out\n\n\nclass Conv2dLocal(Module):\n \n def __init__(self, in_height, in_width, in_channels, out_channels,\n kernel_size, stride=1, padding=0, bias=True, dilation=1):\n super(Conv2dLocal, self).__init__()\n \n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = _pair(kernel_size)\n self.stride = _pair(stride)\n self.padding = _pair(padding)\n self.dilation = _pair(dilation)\n \n self.in_height = in_height\n self.in_width = in_width\n self.out_height = int(math.floor(\n (in_height + 2 * self.padding[0] - self.dilation[0] * (self.kernel_size[0] - 1) - 1) / self.stride[0] + 1))\n self.out_width = int(math.floor(\n (in_width + 2 * self.padding[1] - self.dilation[1] * (self.kernel_size[1] - 1) - 1) / self.stride[1] + 1))\n self.weight = Parameter(torch.Tensor(\n self.out_height, self.out_width,\n out_channels, in_channels, *self.kernel_size))\n if bias:\n self.bias = Parameter(torch.Tensor(\n out_channels, self.out_height, self.out_width))\n else:\n self.register_parameter('bias', None)\n \n self.reset_parameters()\n \n def reset_parameters(self):\n n = self.in_channels\n for k in self.kernel_size:\n n *= k\n stdv = 1. / math.sqrt(n)\n self.weight.data.uniform_(-stdv, stdv)\n if self.bias is not None:\n self.bias.data.uniform_(-stdv, stdv)\n \n def __repr__(self):\n s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'\n ', stride={stride}')\n if self.padding != (0,) * len(self.padding):\n s += ', padding={padding}'\n if self.dilation != (1,) * len(self.dilation):\n s += ', dilation={dilation}'\n if self.bias is None:\n s += ', bias=False'\n s += ')'\n return s.format(name=self.__class__.__name__, **self.__dict__)\n \n def forward(self, input):\n return conv2d_local(\n input, self.weight, self.bias, stride=self.stride,\n padding=self.padding, dilation=self.dilation)\n\nclass FeatureDenoiser(nn.Module):\n def __init__(self, n_features=512, n_channels=10):\n super(FeatureDenoiser, self).__init__()\n self.conv1 = Conv2dLocal(n_features, 1, 1, n_channels, 1, 1, 0)\n self.prelu_1 = nn.PReLU(n_channels)\n self.conv2 = Conv2dLocal(n_features, 1, n_channels, n_channels, 1, 1, 0)\n self.prelu_2 = nn.PReLU(n_channels)\n self.conv3 = Conv2dLocal(n_features, 1, n_channels, 1, 1, 1, 0)\n self.n_channels = n_channels\n \n def forward(self, x):\n x = x.view(x.size()[0], 1, x.size()[1], 1)\n x = x + self.conv3(self.prelu_2(self.conv2(self.prelu_1(self.conv1(x)))))\n x = x.view(x.size()[0],-1)\n return x\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Validation script')\n parser.add_argument('-d', '--device', type=str, required=True,\n help='indices of GPUs to enable (default: all)')\n parser.add_argument('-b', '--batch_size', type=int, default=32,\n help='batch_size (default: 32)')\n args = parser.parse_args()\n \n os.environ[\"CUDA_DEVICE_ORDER\"]=\"PCI_BUS_ID\" \n os.environ[\"CUDA_VISIBLE_DEVICES\"]=args.device\n \n fdn = FeatureDenoiser(n_channels=3)\n faceid = sphere20a(feature=True, dn_block=None)\n# faceid_ckpt_path = \"/home/safin/FaceReID/ckpt/sphereface_01.01_high_noise/01.01_sphere20a_40.pth\" #trained for high noise\n# faceid_ckpt_path = \"/home/safin/FaceReID/ckpt/faceid_joint_3stages_20.01_8\"\n# faceid_ckpt_path = \"/home/safin/sphereface_pytorch/sphere20a_19.pth\"\n# faceid_ckpt_path = \"/home/safin/FaceReID/ckpt/faceid_joint_19.01_11\" #trained for high noise\n\n# faceid_ckpt_path = \"ckpt/1stage_udnet_fixed_sphereface_27.01/faceid/faceid_1stage_udnet_fixed_sphereface_27.01_30\"\n# faceid_ckpt_path = \"ckpt/1stage_udnet_fixed_sphereface_28.01_finetune/faceid/faceid_1stage_udnet_fixed_sphereface_28.01_finetune_5\"\n# faceid_ckpt_path = \"ckpt/joint_1stage_udnet_27.01_fixed/faceid/faceid_joint_1stage_udnet_27.01_fixed_60\"\n# faceid_ckpt_path = \"ckpt/joint_02.02/faceid/joint_02.02_32\"\n# faceid_ckpt_path = \"ckpt/joint_02.02_fixed/faceid/jo int_02.02_fixed_\"+str(n_ckpt)\n# faceid_ckpt_path = \"ckpt/joint_07.02_fixed/faceid/weights_\"+str(n_ckpt)\n n_ckpt = 90\n# faceid_ckpt_path = \"ckpt/sphereface_10.02/faceid/weights_\"+str(n_ckpt)\n# faceid_ckpt_path = \"/home/safin/FaceReID/ckpt/sphereface_14.02/faceid/weights_19\" 0.984\n faceid_ckpt_path = \"/home/safin/ckpt/sphereface_clean/sphere20a_19.pth\"\n# faceid_ckpt_path = \"/home/safin/ms-thesis/ckpt/1st_udnet7pa_sphereface_dn_24.02/faceid/weights_1\"\n# faceid_ckpt_path = \"/home/safin/sphereface_pytorch/sphere20a_19.pth\"\n faceid.load_state_dict(torch.load(faceid_ckpt_path))\n faceid = faceid.cuda()\n\n basic_transform = transforms.Compose([\n transforms.ToTensor()\n ])\n noise_transform = transforms.Compose([\n transforms.ToTensor(),\n GaussianNoise(high_noise_std_arr, clamp=[0,1])\n ])\n \n transform = basic_transform #noise_transform\n lfw_data_dir = \"/home/safin/datasets/lfw/lfw-sphereface/\"\n lfw_dataset = LFWDataset(lfw_data_dir, \"/home/safin/datasets/lfw/pairs.txt\", transform, \"png\")\n dataloader_test = torch.utils.data.dataloader.DataLoader(lfw_dataset, batch_size=args.batch_size, shuffle=True, pin_memory=True, num_workers=12)\n \n print(\"The number of parameters:\", sum(p.numel() for p in faceid.parameters()))\n\n l2dist = torch.nn.CosineSimilarity().cuda()\n dists, labels = test(faceid, dataloader_test, l2dist)\n \n print(\"SphereFace, tested on high noised with denoiser:\", k_fold_eval(dists, labels))\n\n \n \n# exp = ExpRunner()\n# exp.init_model(args.device, last_ckpt=args.resume)\n# exp.run_experiments(args.name, args.epochs, batch_size=args.batch_size)\n \n# denoiser = UDNet(kernel_size = (5, 5),\n# input_channels = 3,\n# output_features = 74,\n# rbf_mixtures = 51,\n# rbf_precision = 4,\n# stages = 1)\n# denoiser_ckpt_path = \"/home/safin/FaceReID/ckpt/denoiser_joint_19.01_11\"\n# denoiser_ckpt_path = \"/home/safin/FaceReID/ckpt/model_udnet_3stages_17.01_4\" #trained for high noise\n# denoiser_ckpt_path = \"/home/safin/FaceReID/ckpt/denoiser_joint_3stages_20.01_8\" \n# denoiser_ckpt_path = \"/home/safin/FaceReID/ckpt/model_udnet_3stages_17.01_4\"\n# denoiser_ckpt_path = \"/home/safin/FaceReID/ckpt/udnet_1stage_20.01/model_udnet_1stage_20.01_5\"\n# denoiser_ckpt_path = \"ckpt/joint_02.02/denoiser/joint_02.02_35\"\n# n_ckpt = 73\n# denoiser_ckpt_path = \"ckpt/joint_02.02_fixed/denoiser/joint_02.02_fixed_\"+str(n_ckpt)\n# denoiser_ckpt_path = \"ckpt/joint_07.02_fixed/denoiser/weights_\"+str(n_ckpt)\n# denoiser.load_state_dict(torch.load(denoiser_ckpt_path))\n# freeze_model(denoiser)\n# denoiser = denoiser.cuda()\n# print(\"The number of denoiser parameters:\", sum(p.numel() for p in denoiser.parameters()))\n \n# denoiser_ckpt_path = \"/home/safin/pydl/networks/UDNet/ckpt/model_udnet_14.01_3\"\n# denoiser.load_state_dict(torch.load(denoiser_ckpt_path))\n# denoiser = denoiser.cuda()\n ","sub_path":"val_sphereface.py","file_name":"val_sphereface.py","file_ext":"py","file_size_in_byte":12611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"87"} +{"seq_id":"438658863","text":"import streamlit as st\r\nimport tempfile\r\nimport warnings\r\nimport os\r\nfrom PIL import Image\r\nfrom video import *\r\n\r\nwarnings.filterwarnings(\"ignore\", message=r\"Passing\", category=FutureWarning)\r\n\r\n\r\n# hide hamburger menu\r\n# hide_streamlit_style = \"\"\"\r\n# \r\n# \"\"\"\r\n# st.markdown(hide_streamlit_style, unsafe_allow_html=True)\r\n\r\n\r\n# hide footer\r\nhide_footer_style = \"\"\"\r\n