diff --git "a/4552.jsonl" "b/4552.jsonl" new file mode 100644--- /dev/null +++ "b/4552.jsonl" @@ -0,0 +1,985 @@ +{"seq_id":"29538288599","text":"from songs import Songs\r\nfrom spotifyapp import Spotify\r\n\r\n#Scrap the songs website to get the titles\r\nsongs = Songs()\r\ntitle_details = songs.scrap_songs()\r\n\r\n#Create spotify object and authenticate to generate the token and then create playlist\r\nsp = Spotify()\r\nsp.authenticate_spotify()\r\nsp.spotify_create_play_list(title_details['title_text_list'], title_details['year'])\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"sagar160589/python-spotify-private-playlist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"13907605133","text":"from random import randint\n\ndef dnk(len_mol, time, mass_one):\n count = 0\n nukl_num = 0\n while count<=time:\n nukl_num += randint(50, 100)\n count += 1\n total_mass = (len_mol*2+nukl_num)*mass_one\n return total_mass\n\nprint(dnk(121024, 10800, 345))\n","repo_name":"askhat-aubakirov/bioinfNIS","sub_path":"exercises/aidar_aubakirov/problem1/AidarBioInfTask.py","file_name":"AidarBioInfTask.py","file_ext":"py","file_size_in_byte":273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7516366966","text":"from pysmt.shortcuts import Symbol, ForAll, Exists, And, Iff, GE, LT, Real, Int\nfrom pysmt.shortcuts import Minus, Equals, Plus, ToReal, Implies, LE, TRUE, Not\nfrom pysmt.shortcuts import Times, QuantifierEliminator\nfrom pysmt.shortcuts import is_sat, is_valid\nfrom pysmt.typing import REAL, BOOL, INT\nfrom pysmt.test import TestCase, main\nfrom pysmt.test import (skipIfNoSolverForLogic, skipIfNoQEForLogic,\n skipIfQENotAvailable)\nfrom pysmt.test.examples import get_example_formulae\nfrom pysmt.exceptions import (SolverReturnedUnknownResultError, PysmtValueError,\n NoSolverAvailableError, ConvertExpressionError)\nfrom pysmt.logics import LRA, LIA, UFLIRA\n\n\nclass TestQE(TestCase):\n\n @skipIfNoSolverForLogic(LRA)\n @skipIfNoQEForLogic(LRA)\n def test_qe_eq(self):\n qe = QuantifierEliminator(logic=LRA)\n\n varA = Symbol(\"A\", BOOL)\n varB = Symbol(\"B\", BOOL)\n\n varAt = Symbol(\"At\", REAL)\n varBt = Symbol(\"Bt\", REAL)\n\n f = And(Iff(varA, GE(Minus(varAt, varBt), Real(0))),\n Iff(varB, LT(Minus(varAt, varBt), Real(1))))\n\n qf = Exists([varBt, varA], f)\n r1 = qe.eliminate_quantifiers(qf)\n\n try:\n self.assertValid(Iff(r1, qf), logic=LRA,\n msg=\"The two formulas should be equivalent.\")\n except SolverReturnedUnknownResultError:\n pass\n\n def test_selection(self):\n with self.assertRaises(NoSolverAvailableError):\n QuantifierEliminator(logic=UFLIRA)\n\n with self.assertRaises(NoSolverAvailableError):\n QuantifierEliminator(name=\"nonexistent\")\n\n\n @skipIfNoQEForLogic(LRA)\n def test_selection_lra(self):\n QuantifierEliminator(logic=LRA)\n\n @skipIfQENotAvailable('z3')\n def test_qe_z3(self):\n qe = QuantifierEliminator(name='z3')\n self._bool_example(qe)\n self._real_example(qe)\n self._int_example(qe)\n self._alternation_bool_example(qe)\n self._alternation_real_example(qe)\n self._alternation_int_example(qe)\n self._std_examples(qe, LRA)\n self._std_examples(qe, LIA)\n self._modular_congruence(qe)\n\n # Additional test for raising error on back conversion of\n # quantified formulae\n p, q = Symbol(\"p\", INT), Symbol(\"q\", INT)\n\n f = ForAll([p], Exists([q], Equals(ToReal(p),\n Plus(ToReal(q), ToReal(Int(1))))))\n with self.assertRaises(PysmtValueError):\n qe.eliminate_quantifiers(f).simplify()\n\n\n @skipIfQENotAvailable('msat_fm')\n def test_qe_msat_fm(self):\n qe = QuantifierEliminator(name='msat_fm')\n self._bool_example(qe)\n self._real_example(qe)\n self._alternation_bool_example(qe)\n self._alternation_real_example(qe)\n self._std_examples(qe, LRA)\n\n with self.assertRaises(PysmtValueError):\n self._int_example(qe)\n\n with self.assertRaises(PysmtValueError):\n self._alternation_int_example(qe)\n\n # Additional test for raising error on back conversion of\n # quantified formulae\n p, q = Symbol(\"p\", INT), Symbol(\"q\", INT)\n\n f = ForAll([p], Exists([q], Equals(ToReal(p),\n Plus(ToReal(q), ToReal(Int(1))))))\n with self.assertRaises(PysmtValueError):\n qe.eliminate_quantifiers(f).simplify()\n\n\n @skipIfQENotAvailable('msat_lw')\n def test_qe_msat_lw(self):\n qe = QuantifierEliminator(name='msat_lw')\n self._bool_example(qe)\n self._real_example(qe)\n self._alternation_bool_example(qe)\n self._alternation_real_example(qe)\n self._int_example(qe)\n self._alternation_int_example(qe)\n self._std_examples(qe, LIA)\n\n self._modular_congruence(qe)\n\n # Additional test for raising error on back conversion of\n # quantified formulae\n p, q = Symbol(\"p\", INT), Symbol(\"q\", INT)\n\n f = ForAll([p], Exists([q], Equals(ToReal(p),\n Plus(ToReal(q), ToReal(Int(1))))))\n with self.assertRaises(PysmtValueError):\n qe.eliminate_quantifiers(f).simplify()\n\n def _modular_congruence(self, qe):\n p, q = (Symbol(n, INT) for n in \"pq\")\n f = Exists([q], Equals(Times(q, Int(2)), p))\n with self.assertRaises(ConvertExpressionError):\n qe.eliminate_quantifiers(f)\n\n\n def _bool_example(self, qe):\n # Bool Example\n x, y = Symbol(\"x\"), Symbol(\"y\")\n\n f = ForAll([x], Implies(x,y))\n qf = qe.eliminate_quantifiers(f).simplify()\n\n self.assertEqual(qf, y)\n\n\n def _real_example(self, qe):\n # Real Example\n r, s = Symbol(\"r\", REAL), Symbol(\"s\", REAL)\n\n f = ForAll([r], Implies(LT(Real(0), r), LT(s, r)))\n qf = qe.eliminate_quantifiers(f).simplify()\n\n self.assertEqual(qf, LE(s, Real(0)))\n\n\n def _int_example(self, qe):\n # Int Example\n p, q = Symbol(\"p\", INT), Symbol(\"q\", INT)\n\n f = ForAll([p], Implies(LT(Int(0), p), LT(q, p)))\n qf = qe.eliminate_quantifiers(f).simplify()\n\n self.assertValid(Iff(qf, LE(q, Int(0))))\n\n def _alternation_bool_example(self, qe):\n # Alternation of quantifiers\n x, y = Symbol(\"x\"), Symbol(\"y\")\n\n f = ForAll([x], Exists([y], Iff(x, Not(y))))\n qf = qe.eliminate_quantifiers(f).simplify()\n\n self.assertEqual(qf, TRUE())\n\n\n def _alternation_real_example(self, qe):\n # Alternation of quantifiers\n r, s = Symbol(\"r\", REAL), Symbol(\"s\", REAL)\n\n f = ForAll([r], Exists([s], Equals(r, Plus(s, Real(1)))))\n qf = qe.eliminate_quantifiers(f).simplify()\n\n self.assertEqual(qf, TRUE())\n\n def _alternation_int_example(self, qe):\n # Alternation of quantifiers\n p, q = Symbol(\"p\", INT), Symbol(\"q\", INT)\n\n f = ForAll([p], Exists([q], Equals(p, Plus(q, Int(1)))))\n qf = qe.eliminate_quantifiers(f).simplify()\n\n self.assertEqual(qf, TRUE())\n\n def _std_examples(self, qe, target_logic):\n for (f, validity, satisfiability, logic) in get_example_formulae():\n if logic != target_logic: continue\n qf = qe.eliminate_quantifiers(f)\n s = is_sat(qf)\n v = is_valid(qf)\n\n self.assertEqual(validity, v, f)\n self.assertEqual(satisfiability, s, f)\n\nif __name__ == '__main__':\n main()\n","repo_name":"pysmt/pysmt","sub_path":"pysmt/test/test_qe.py","file_name":"test_qe.py","file_ext":"py","file_size_in_byte":6486,"program_lang":"python","lang":"en","doc_type":"code","stars":528,"dataset":"github-code","pt":"48"} +{"seq_id":"44487334082","text":"# Utilize o arquivo ‘compras.csv’ como base para resolver os seguintes\n# exercício.\n# 6) Utilizando as faixas etárias, diga quantas pessoas há em cada faixa?\n# Utilize as seguintes faixas etárias nos exercícios em que for necessário.\n# ● Jovens, 18 a 25 anos\n# ● Adultos, 26 a 59 anos\n# ● Idosos, igual ou maior que 60 anos\n\nfrom dados import abr_dados, dicionario\n\ndados = abr_dados('compras.csv')\ninfo = dicionario(dados)\n\njovens = 0\nadultos = 0\nidosos = 0\n\nfor pessoa in info:\n if pessoa['idade'] >= 18 and pessoa['idade'] <= 25:\n jovens += 1\n elif pessoa['idade'] >= 26 and pessoa['idade'] <= 59:\n adultos += 1\n else:\n idosos += 1\n\nprint(f'Jovens: {jovens} pessoas\\nAdultos: {adultos} pessoas\\nIdosos: {idosos} pessoas')\n","repo_name":"JLVanin/Data-Science-Growdev","sub_path":"Introdução à Programação/Exercícios Aula 08/ex006.py","file_name":"ex006.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30735031947","text":"#Machine Learning utilities\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport operator\r\n\r\nfrom sklearn import preprocessing\r\nfrom sklearn import model_selection\r\n\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.ensemble import VotingClassifier\r\n\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.metrics import log_loss\r\nfrom sklearn.metrics import roc_curve\r\n\r\nfrom sklearn import ensemble\r\nimport xgboost as xgb\r\n\r\ndef standardize(df):\r\n '''Returns standardized DataFrame'''\r\n\t\r\n return (df-df.mean())/df.std()\r\n\r\ndef normalize(df):\r\n '''Returns normalized DataFrame'''\r\n return (df-df.min())/(df.max()-df.min())\r\n\t\r\ndef encode(df, col):\r\n '''Returns encoded Series'''\t\r\n \r\n le = preprocessing.LabelEncoder()\r\n return le.fit_transform(df[col])\t\r\n\t\r\ndef run_ml_flow(df):\r\n '''Runs Machine Learning flow, returns evaluation DataFrame'''\r\n \r\n df = df.dropna()\r\n targets = ['1D', '1W', '1M', '3M']\r\n evaluation = pd.DataFrame(columns=targets, index=pd.MultiIndex.from_product([['AUC', 'f1', 'log loss'], ['LR', 'RF']]))\r\n\r\n for target in targets:\r\n\r\n #split\r\n X_train, X_test, y_train, y_test = model_selection.train_test_split(df.values[:,:-4], df[target].map(lambda x: 1 if x > 0 else 0).values, test_size=0.2, shuffle=False)\r\n\r\n #classifiers\r\n clfs = {\r\n 'RF' : RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1),\r\n 'LR' : LogisticRegression(random_state=1),\r\n #'Vote' : VotingClassifier(estimators=[('lr', LogisticRegression(random_state=1)), ('rf', RandomForestClassifier(n_estimators=50, max_depth=5, random_state=1))], voting='soft')\r\n }\r\n\r\n #fit\r\n for k, clf in clfs.items():\r\n clf.fit(X_train, y_train)\r\n\r\n #evaluate\r\n if target == '3M':\r\n plt.figure(figsize=(15, 5))\r\n \r\n for k, clf in clfs.items():\r\n predictions = clf.predict(X_test)\r\n probas = clf.predict_proba(X_test)\r\n\r\n evaluation.loc['AUC', k] = roc_auc_score(y_test, predictions)\r\n evaluation.loc['f1', k] = f1_score(y_test, predictions)\r\n evaluation.loc['log loss', k] = log_loss(y_test, probas)\r\n\t\t\t\r\n if target == '3M':\r\n plot_roc(y_test, probas[:,1], k)\r\n plot_log_loss(y_test, probas[:,1], k)\r\n \r\n if target == '3M':\r\n plt.show()\t\t\r\n \r\n return evaluation\r\n\r\ndef rank_features_xgb(X, Y, columns):\r\n # fit \r\n model = xgb.XGBClassifier(n_estimators=100, max_depth=10, learning_rate=0.01, seed=0)\r\n model.fit(X, Y) \r\n\r\n #list feature importance\r\n #model.get_booster().feature_names = columns\r\n imp_dict = model.get_booster().get_fscore()\r\n imp_dict = sorted(imp_dict.items(), key=operator.itemgetter(1), reverse=True)\r\n\r\n imp_arr = np.asarray(imp_dict)\r\n indices = np.empty([imp_arr.shape[0], 0])\r\n\r\n for x in imp_arr[:,0]:\r\n indices = np.append(indices, x.replace('f', ''))\r\n\r\n indices = indices.astype(int)\r\n importances = imp_arr[:,1].astype(int) #[:25] #limit to 25\r\n #indices = indices[:25] #limit to 25\r\n\r\n # Print the feature ranking\r\n #print(\"Feature ranking:\")\r\n\r\n #for f in range(indices.shape[0] - 1):\r\n # print(f, 'index', indices[f], X.columns[indices[f]], importances[f] )\r\n\r\n # Plot the feature importances of the forest\r\n plt.figure(figsize=(15,5))\r\n plt.title(\"XGB Feature importances\")\r\n plt.bar(range(indices.shape[0]), importances[:indices.shape[0]], color=\"r\", align=\"center\")\r\n plt.xticks(range(X.shape[1] + 1), np.array(columns)[indices], rotation='vertical')\r\n plt.xlim([-1, X.shape[1] + 1])\r\n plt.show() \r\n\r\n # plot via xgb\r\n #fig, ax = plt.subplots(1,1,figsize=(15,10))\r\n #xgb.plot_importance(model, ax=ax) \r\n \r\n return indices\r\n\r\ndef rank_features_etc(X, Y, columns):\r\n # supervised ranking\r\n model = ensemble.ExtraTreesClassifier(n_estimators=100, max_depth=10, random_state=0)\r\n model.fit(X, Y)\r\n \r\n #list feature importance\r\n importances = model.feature_importances_\r\n std = np.std([tree.feature_importances_ for tree in model.estimators_], axis=0)\r\n indices = np.argsort(importances)[::-1]\r\n #indices = indices[:25] #limit to 25\r\n\t\r\n # Print the feature ranking\r\n #print(\"Feature ranking:\")\r\n\r\n #for f in range(X.shape[1] - 1):\r\n # print(f, 'index', indices[f], X.columns[indices[f]], importances[indices[f]])\r\n \r\n # Plot the feature importances of the forest\r\n plt.figure(figsize=(15,5))\r\n plt.title(\"ETC Feature Importances\")\r\n plt.bar(range(X.shape[1]), importances[indices], color=\"r\", yerr=std[indices], align=\"center\")\r\n plt.xticks(range(X.shape[1]), np.array(columns)[indices], rotation='vertical')\r\n plt.xlim([-1, X.shape[1] + 1])\r\n plt.show()\r\n \r\n return indices\t\r\n\t\r\ndef show_feature_importance(df, target_col):\r\n '''Shows feature importances by ETC and XGC'''\r\n df = df.dropna()\r\n X, Y = df.values[:,:-4], df[target_col].map(lambda x: 1 if x > 0 else 0).values\r\n indices_etc = rank_features_etc(X, Y, df.columns[:-4])\r\n indices_xgb = rank_features_xgb(X, Y, df.columns[:-4])\t\r\n \r\n # combine two methods for the feature selection\r\n topk = 5\r\n indices = np.concatenate((indices_etc[:topk], indices_xgb[:topk]), axis=0)\r\n indices = np.unique(indices)\r\n return indices\t\r\n\t\r\ndef plot_roc(y_test, probas, k):\r\n '''\r\n\tPlots ROC curve\r\n\t\r\n\ty_test -- predictions\r\n\tprobas -- probabilities\r\n\tk -- classifier\r\n\t'''\r\n \r\n plt.subplot(1, 2, 1)\r\n plt.figure(1)\r\n plt.plot([0, 1], [0, 1], 'k--')\r\n fpr, tpr, _ = roc_curve(y_test, probas)\r\n plt.plot(fpr, tpr, label=k)\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title('ROC')\r\n plt.legend(loc='best')\r\n\t\r\ndef plot_log_loss(y_test, probas, k):\r\n '''\r\n\tPlots log loss for instances\r\n\t\r\n\ty_test -- predictions\r\n\tprobas -- probabilities\r\n\tk -- classifier\t\r\n\t'''\r\n\t\r\n #collect log loss for instances\r\n plt.subplot(1, 2, 2)\r\n v = []\r\n\r\n for x, y in zip(y_test,probas):\r\n v.append(ll(x, y))\r\n\r\n v = np.sort(v) \r\n\r\n #plot log loss for instances\r\n plt.plot(v, label=k)\r\n plt.axhline(y=log_loss([1, 0], [0.5, 0.5]), color='black', linestyle='--')\r\n plt.xlabel('Instances')\r\n plt.ylabel('Log Loss')\r\n plt.title('Log Loss')\r\n plt.legend(loc='best')\r\n\r\ndef ll(yt, yp):\r\n '''\r\n returns logarithmic loss\r\n \r\n yt -- prediction\r\n yp -- probability\r\n '''\r\n \r\n return -(yt * np.log(yp) + (1 - yt) * np.log(1 - yp))\t\r\n\r\ndef select_features(features, indices, df):\r\n '''\r\n Selects top features via indices, returns lean DataFrame\r\n '''\r\n\t\r\n # create new feature list\r\n cols = []\r\n\r\n for f in range(features):\r\n cols.append(df.columns[indices[f]]) \r\n print(cols)\r\n \r\n # reduce df for selected cols\r\n cols.extend(['1D', '1W', '1M', '3M'])\r\n df = pd.DataFrame(df, columns=cols)\r\n \r\n return df\t","repo_name":"algonell/ipo-miner","sub_path":"Notebooks/ml.py","file_name":"ml.py","file_ext":"py","file_size_in_byte":7196,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"48"} +{"seq_id":"13734207812","text":"# #swapping\n# a=2\n# b=4\n# a,b=b,a\n# print (a)\n# print(b)\n# #new line :\n# print(\"This is line 1\\nThis is line 2\\nThis is line 3\")\n\n#accept marks and calculate the percentage\nimport math\nname=input(\"Enter ur name here: \")\nprint(\"hello \",name,\"Enter marks of following subjects below\")\nPhy=int(input(\"Physics: \"))\nChe=int(input(\"Chem: \"))\nMat=int(input(\"Maths: \"))\nEng=int(input(\"English: \"))\nComp=int(input(\"Computer: \"))\naverage=(Phy+Che+Mat+Eng+Comp)/5\nprint(\"U have got \",average,\" percentage\")\n","repo_name":"ajayp1717/Python-Projects","sub_path":"Python Practice/PPS Lab.py","file_name":"PPS Lab.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26006897032","text":"class Solution:\n def rob(self, nums: List[int]) -> int:\n def helper(nums):\n n = len(nums)\n dp = [0] * n\n dp[0] = nums[0]\n dp[1] = max(nums[0],nums[1])\n for i in range(2, n):\n dp[i] = max(dp[i-1], dp[i-2] + nums[i])\n return dp[-1]\n\n if len(nums) ==0: return 0\n if len(nums) == 1: return nums[0]\n if len(nums) == 2: return max(nums)\n # either use first house and can't use last or last and not first:\n return max(helper(nums[:-1]), helper(nums[1:]))\n","repo_name":"ajo01/leetcode-repository","sub_path":"dynamic programming/medium/213. House Robber II.py","file_name":"213. House Robber II.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32304256971","text":"from typing import Any, Dict, List, Optional\nfrom xml.etree import ElementTree as ET\nimport datetime\n\nfrom moto.core import BaseBackend, BackendDict, BaseModel\n\nfrom .resources import VOICE_DATA\nfrom .utils import make_arn_for_lexicon\n\n\nclass Lexicon(BaseModel):\n def __init__(self, name: str, content: str, account_id: str, region_name: str):\n self.name = name\n self.content = content\n self.size = 0\n self.alphabet = None\n self.last_modified = None\n self.language_code = None\n self.lexemes_count = 0\n self.arn = make_arn_for_lexicon(account_id, name, region_name)\n\n self.update()\n\n def update(self, content: Optional[str] = None) -> None:\n if content is not None:\n self.content = content\n\n # Probably a very naive approach, but it'll do for now.\n try:\n root = ET.fromstring(self.content)\n self.size = len(self.content)\n self.last_modified = int( # type: ignore\n (\n datetime.datetime.now() - datetime.datetime(1970, 1, 1)\n ).total_seconds()\n )\n self.lexemes_count = len(root.findall(\".\"))\n\n for key, value in root.attrib.items():\n if key.endswith(\"alphabet\"):\n self.alphabet = value # type: ignore\n elif key.endswith(\"lang\"):\n self.language_code = value # type: ignore\n\n except Exception as err:\n raise ValueError(f\"Failure parsing XML: {err}\")\n\n def to_dict(self) -> Dict[str, Any]:\n return {\n \"Attributes\": {\n \"Alphabet\": self.alphabet,\n \"LanguageCode\": self.language_code,\n \"LastModified\": self.last_modified,\n \"LexemesCount\": self.lexemes_count,\n \"LexiconArn\": self.arn,\n \"Size\": self.size,\n }\n }\n\n def __repr__(self) -> str:\n return f\"\"\n\n\nclass PollyBackend(BaseBackend):\n def __init__(self, region_name: str, account_id: str):\n super().__init__(region_name, account_id)\n self._lexicons: Dict[str, Lexicon] = {}\n\n def describe_voices(self, language_code: str) -> List[Dict[str, Any]]:\n \"\"\"\n Pagination is not yet implemented\n \"\"\"\n if language_code is None:\n return VOICE_DATA\n\n return [item for item in VOICE_DATA if item[\"LanguageCode\"] == language_code]\n\n def delete_lexicon(self, name: str) -> None:\n # implement here\n del self._lexicons[name]\n\n def get_lexicon(self, name: str) -> Lexicon:\n # Raises KeyError\n return self._lexicons[name]\n\n def list_lexicons(self) -> List[Dict[str, Any]]:\n \"\"\"\n Pagination is not yet implemented\n \"\"\"\n\n result = []\n\n for name, lexicon in self._lexicons.items():\n lexicon_dict = lexicon.to_dict()\n lexicon_dict[\"Name\"] = name\n\n result.append(lexicon_dict)\n\n return result\n\n def put_lexicon(self, name: str, content: str) -> None:\n # If lexicon content is bad, it will raise ValueError\n if name in self._lexicons:\n # Regenerated all the stats from the XML\n # but keeps the ARN\n self._lexicons[name].update(content)\n else:\n lexicon = Lexicon(\n name, content, self.account_id, region_name=self.region_name\n )\n self._lexicons[name] = lexicon\n\n\npolly_backends = BackendDict(PollyBackend, \"polly\")\n","repo_name":"getmoto/moto","sub_path":"moto/polly/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3581,"program_lang":"python","lang":"en","doc_type":"code","stars":7174,"dataset":"github-code","pt":"48"} +{"seq_id":"74476854226","text":"import pymysql.cursors \nimport unidecode\nimport mysql.connector\nimport json\nfrom emotion_recognition import detect_emotion\n\n# Ce fichier correspond aux fonctions appelées par l'API. Dans notre cas, les fonctions ici sont utilisées pour allez ajouter des lignes en BD\n# On effectue quelques traitements logiques (detection d'émotions) pour ajouter quelques infos avant ajout en BD dans certaines fonctions\n# PyMySQL permet de se connecter à une base de donnée MySQL et d'envoyer des requêtes SQL.\n\n# On est obligé de faire un getConnection() avant chaque requête et non une fois pour tous le fichier car \n# sinon il y a des problèmes de lock et la connexion se coupe entre les requêtes\n\n# config PyMySQL pour connexion à la base de données\ndef getConnection():\n connection = pymysql.connect(host='localhost',\n user='root',\n password='root', \n db='LavalExperienceDB',\n charset='utf8mb4',\n cursorclass=pymysql.cursors.DictCursor)\n print (\"connect successful\")\n return connection\n\n\n \n# ----------------------Users---------------------------\ndef addUser( username,email, gender, age, userStatus):\n connection = getConnection()\n returnValue = username, email, gender, age, userStatus\n \n with connection.cursor() as cursor:\n try: \n cursor.execute(\"INSERT INTO users (username, email, gender, age, userStatus) VALUES (%s,%s,%s,%s,%s);\", (username,email,gender,age,userStatus) )\n connection.commit() \n \n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n finally:\n cursor.close()\n connection.close()\n return \"200\"\n # This is a workaround. The normal line would be the one below. Unfortunately, it raise an error that doesn't make much sense : \n # --------- \n # The view function did not return a valid response. The return type must be a string, dict, tuple, Response instance, or WSGI callable, but it was a tuple.\n # ---------\n # Flask is asking for a tuple and is upset that he got a tuple .. ? couldn't find the bug, went for the workaround because this data isn't used anyway \n # (at least for the moment). \n #return returnValue\n\n# ----------------------Kayahara---------------------------\ndef addKahayaraResult( username, videoname,videotype, inputs, dateExperience):\n connection = getConnection()\n returnValue = username, videoname, inputs, dateExperience\n with connection.cursor() as cursor:\n try: \n \n cursor.execute(\"INSERT INTO KayaharaResults (username, videoname,videotype, input, dateExperience) VALUES (%s,%s,%s,%s,%s);\", (username, videoname, videotype, json.dumps(inputs), dateExperience)) \n connection.commit() \n \n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n finally:\n cursor.close()\n connection.close()\n return returnValue\n\n#-------------------Emotions et performances (EP)---------------\n\ndef addEPFeelingsScreenshot( username, feeling,source, dateExperience):\n connection = getConnection()\n returnValue = username, feeling, source, dateExperience\n emotion_detected = detect_emotion(source, feeling) #Détection d'émotions\n\n with connection.cursor() as cursor:\n try: \n cursor.execute(\"INSERT INTO ei_feelings_screenshots (username, feeling, source, emotion_detected, date) VALUES (%s,%s,%s,%s,%s);\", (username, feeling, source,json.dumps(emotion_detected), dateExperience)) \n connection.commit() \n \n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n finally:\n cursor.close()\n connection.close()\n return returnValue\n\ndef addEPReactionsScreenshot( username, timer,source, dateExperience):\n connection = getConnection()\n returnValue = username, timer, source, dateExperience\n emotion_detected = detect_emotion(source) #Détection d'émotions\n with connection.cursor() as cursor:\n try: \n cursor.execute(\"INSERT INTO ei_reactions_screenshots (username, seconds_after_reveal, source, emotion_detected, date) VALUES (%s,%s,%s,%s,%s);\", (username, timer, source, json.dumps(emotion_detected), dateExperience)) \n connection.commit() \n \n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n finally:\n cursor.close()\n connection.close()\n return returnValue\n\ndef addEPResults(username, taskQuestions, taskResult,taskCheat,timeToAnswer, secondTrial, sanctionGiven, dateExperience):\n connection = getConnection()\n returnValue = username, taskQuestions, taskResult,taskCheat,timeToAnswer, secondTrial, sanctionGiven, dateExperience\n\n #Petit bloc de refacto pour enlever les accents en BD pour éviter les /u009 au lieu de é\n\n taskQuestions[\"canadaCulture\"] = [unidecode.unidecode(x) for x in taskQuestions[\"canadaCulture\"]]\n with connection.cursor() as cursor:\n try: \n\n \n affected_count = cursor.execute(\"INSERT INTO ei_results (username, task_questions, task_answers, task_cheats,time_to_answer, second_trial, sanction_given, date) VALUES (%s,%s,%s,%s,%s,%s,%s,%s);\", \n (username, json.dumps(taskQuestions),json.dumps(taskResult),json.dumps(taskCheat),json.dumps(timeToAnswer), secondTrial, sanctionGiven, dateExperience)) \n\n connection.commit()\n\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n finally:\n cursor.close()\n connection.close()\n return returnValue\n\ndef addEPFeedback(username, pretaskForm):\n connection = getConnection()\n returnValue = username, pretaskForm\n with connection.cursor() as cursor:\n try: \n cursor.execute(\"INSERT INTO ei_feedback (username, pretask_form) VALUES (%s,%s);\", \n (username,json.dumps(pretaskForm))) \n connection.commit() \n \n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n finally:\n cursor.close()\n connection.close()\n return returnValue\n\ndef updateEPFeedback(username, sanctionGiven, posttaskForm):\n connection = getConnection()\n returnValue = username, sanctionGiven,posttaskForm\n with connection.cursor() as cursor:\n try: \n cursor.execute(\"UPDATE ei_feedback SET sanction_given =%s, posttask_forms = %s WHERE username = %s;\", \n (sanctionGiven,json.dumps(posttaskForm), username)) \n connection.commit() \n \n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n finally:\n cursor.close()\n connection.close()\n return returnValue\n\n\n# -------------------------Data Retrieve for Email ------------------------\ndef getUserResultAndWriteFile(username, filename):\n #On récupère les données de chaque table pour cet utilisateur puis on écrit tout dans un fichier.\n connection = getConnection()\n with open(filename, \"w\") as file:\n with connection.cursor() as cursor:\n try: \n cursor.execute(\"SELECT * from ei_feelings_screenshots where username = %s\", \n (username)) \n result = cursor.fetchall()\n file.write(json.dumps(result,indent=4, sort_keys=True, default=str))\n\n cursor.execute(\"SELECT * from ei_reactions_screenshots where username = %s\", \n (username)) \n result = cursor.fetchall()\n file.write(json.dumps(result,indent=4, sort_keys=True, default=str))\n\n cursor.execute(\"SELECT * from ei_results where username = %s\", \n (username)) \n result = cursor.fetchall()\n file.write(json.dumps(result,indent=4, sort_keys=True, default=str))\n\n cursor.execute(\"SELECT * from ei_feedback where username = %s\", \n (username)) \n result = cursor.fetchall()\n file.write(json.dumps(result,indent=4, sort_keys=True, default=str))\n except mysql.connector.Error as err:\n print(\"Something went wrong: {}\".format(err))\n finally:\n cursor.close()\n connection.close()\n\n","repo_name":"Civel-1/ULWebsite","sub_path":"Serveur/database_utils.py","file_name":"database_utils.py","file_ext":"py","file_size_in_byte":8782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39910694570","text":"from collections import deque\n\nclass AhoCorasick(object):\n def __init__(self, keywords):\n self.adj_list = []\n self.adj_list.append({\n \"value\" : \"\",\n \"next_states\" : [],\n \"fail_state\" : 0,\n \"output\" : []\n })\n self.add_keywords(keywords)\n self.set_fail_transitions()\n\n def add_keywords(self, keywords):\n for keyword in keywords:\n self.add_keyword(keyword)\n\n def find_next_state(self, current_state, value):\n for node in self.adj_list[current_state][\"next_states\"]:\n if self.adj_list[node][\"value\"] == value:\n return node\n return None\n\n def add_keyword(self, keyword):\n current_state = 0\n j = 0\n keyword = keyword.lower()\n child = self.find_next_state(current_state, keyword[j])\n while child != None:\n current_state = child\n j += 1\n if j < len(keyword):\n child = self.find_next_state(current_state, keyword[j])\n else:\n break\n for i in xrange(j, len(keyword)):\n node = {\n \"value\" : keyword[i],\n \"next_states\" : [],\n \"fail_state\" : 0,\n \"output\" : []\n }\n self.adj_list.append(node)\n self.adj_list[current_state][\"next_states\"].append(len(self.adj_list) - 1)\n current_state = len(self.adj_list) - 1\n self.adj_list[current_state][\"output\"].append(keyword)\n\n def set_fail_transitions(self):\n q = deque()\n child = 0\n for node in self.adj_list[0][\"next_states\"]:\n q.append(node)\n self.adj_list[node][\"fail_state\"] = 0\n while q:\n r = q.popleft()\n for child in self.adj_list[r][\"next_states\"]:\n q.append(child)\n state = self.adj_list[r][\"fail_state\"]\n while self.find_next_state(state, self.adj_list[child][\"value\"]) == None and state != 0:\n state = self.adj_list[state][\"fail_state\"]\n self.adj_list[child][\"fail_state\"] = self.find_next_state(state, self.adj_list[child][\"value\"])\n if self.adj_list[child][\"fail_state\"] is None:\n self.adj_list[child][\"fail_state\"] = 0\n self.adj_list[child][\"output\"] = self.adj_list[child][\"output\"] + self.adj_list[self.adj_list[child][\"fail_state\"]][\"output\"]\n\n def get_matches(self, line):\n line = line.lower()\n current_state = 0\n found = []\n\n for i in xrange(len(line)):\n while self.find_next_state(current_state, line[i]) is None and current_state != 0:\n current_state = self.adj_list[current_state][\"fail_state\"]\n current_state = self.find_next_state(current_state, line[i])\n if current_state is None:\n current_state = 0\n else:\n for j in self.adj_list[current_state][\"output\"]:\n found.append({\"index\": i - len(j) + 1, \"word\": j})\n return found\n","repo_name":"shams-sam/logic-lab","sub_path":"AhoCorasick/aho_corasick.py","file_name":"aho_corasick.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1101620807","text":"\"\"\"\nmis_tests\\tasks\\phptopy.py\nTraduce en lo posible código php a python\n\"\"\"\nimport io\nimport sys\nfrom pprint import pprint\nimport re\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\nclass Phptopy:\n\n def __init__(self):\n currpath = os.path.dirname(os.path.abspath(__file__))\n self.currpath = os.path.dirname(os.path.abspath(currpath+\"/../../../\"))\n self.pathfrom = self.currpath+\"\\\\prj_mysqlhive\\\\backend\\\\vendor\\\\theframework\\\\components\\\\db\"\n self.pathto = self.currpath+\"\\\\prj_python37\\\\theframework\\\\translated\"\n print(self.pathfrom,os.path.isdir(self.pathfrom))\n\n def __get_files(self):\n arFiles = [\n filename for filename in listdir(self.pathfrom) if isfile(join(self.pathfrom, filename))\n ]\n return arFiles\n\n def __get_content(self,filename):\n with open(filename) as f:\n return f.read()\n\n def __get_intopy(self,content):\n arCharsRm = [\"{\",\"}\",\"$\",\"private \",\"public \",\";\",\"\",\"\",\"self.\"), (\"self::\",\"self.\"), (\"//\",\"# \"),\n (\"TRUE\",\"True\"), (\"true\",\"True\"), (\"FALSE\",\"False\"),(\"false\",\"False\"),(\"!===\",\"!=\"),\n (\"!==\",\"!=\"),(\")) \",\"): \"),(\".$\",\"+$\"),(\".=\",\"+=\"),(\".\\\"\",\"+\\\"\"),(\"\\\".\",\"\\\"+\"),(\"NULL\",\"None\")\n # ,(\"!\",\"not\")\n ]\n\n for dic in arCharsRep:\n content = content.replace(dic[0],dic[1])\n\n for c in arCharsRm:\n content = content.replace(c,\"\")\n\n return content\n\n def __get_in_lines(self,content):\n return content.split(\"\\n\")\n\n def __get_not_emptylines(self,content):\n arLines = self.__get_in_lines(content)\n arFiltered = filter(lambda sLine: not re.match(r'^\\s*$', sLine), arLines)\n return \"\\n\".join(arFiltered)\n\n def __write_file(self,filename,content):\n f = open(filename, \"w\")\n f.write(content)\n f.close()\n\n\n def __translate(self,arFiles):\n for filename in arFiles:\n if \".php\" not in filename:\n continue\n pprint(filename)\n sFile = self.pathfrom +\"\\\\\"+ filename\n sContent = self.__get_content(sFile)\n #sContent = sContent.strip()\n # pprint(sContent)\n sContent = self.__get_intopy(sContent)\n # sContent = self.__get_not_emptylines(sContent)\n sFileNew = self.pathto + \"\\\\\"+filename+\".py\"\n print(\"sFileNew: \",sFileNew)\n self.__write_file(sFileNew,sContent)\n\n def run(self):\n arFiles = self.__get_files()\n self.__translate(arFiles)\n\n\nif __name__ == \"__main__\":\n o = Phptopy()\n o.run()\n","repo_name":"eacevedof/prj_python37","sub_path":"mis_tests/tasks/phptopy.py","file_name":"phptopy.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"7191500898","text":"from django.conf import settings\nfrom django.contrib.auth.mixins import AccessMixin\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import models\n\n\nclass TimeStampedMixin(models.Model):\n # 생성일시, 수정일시를 저장한다\n created = models.DateTimeField(auto_now_add=True)\n updated = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\n\nclass Postable(TimeStampedMixin):\n # 자동으로 생성일시, 수정일시 필드가 추가된다\n # 작성자와 내용을 저장한다\n author = models.ForeignKey(\n settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name='작성자')\n content = models.TextField(max_length=100, verbose_name='내용')\n\n class Meta:\n abstract = True\n\n\nclass ValidAuthorRequiredMixin(AccessMixin):\n \"\"\"상속받은 객체의 author가 운영진이거나 객체의 author가 아니면 403을 반환한다\"\"\"\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n # 애초에 로그인을 안했으면 거부한다.\n return self.handle_no_permission()\n elif self.get_object().author != request.user and not request.user.is_staff:\n # 상속받은 객체의 author가 현재 user가 아니고 운영진도 아니라면 거부한다.\n raise PermissionDenied\n else:\n return super(ValidAuthorRequiredMixin, self).dispatch(request, *args, **kwargs)\n","repo_name":"YooInKeun/CAU_CSE_Capstone_3","sub_path":"BeautyForMe/beautyforme/beautyforme/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":1480,"program_lang":"python","lang":"ko","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"25034058385","text":"# krs_csvReaderEx03.py\n# csv 파일을 읽어 와서 하단의 5행만 테스트용으로 학습시켜보도록 한다.\n# 이것을 함수 형태로 작성한다.\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nimport numpy as np\n\nfrom mykeras.mykeras.myfunction import getDataSet\n\nfilename = './score2.csv'\n# loadtxt : 데이터를 튜플 형식으로 반환해준다.\ndata = np.loadtxt(filename, dtype=np.float32, delimiter=',')\n\nx_train, x_test, y_train, y_test = getDataSet(data = data, testing_row = 5)\n\nmodel = Sequential()\n\n\nx = len(x_test[1])\nprint('input_dim:', x)\n\n\nmodel = Sequential()\nmodel.add(Dense(input_dim=x, units=1))\n\nmodel.compile(optimizer='adam', loss='mse')\n\nmodel.fit(x_train, y_train, epochs=10000)\n\nfor item in x_test :\n result = model.predict( np.array([item]))\n print( result )\n \n \n \n ","repo_name":"woojungjang/Keras","sub_path":"mykeras/krs_csvReaderEx03.py","file_name":"krs_csvReaderEx03.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12979364078","text":"# -*- coding: utf-8 -*-\r\n\r\n# Class ISMN-6650-001\r\n# Irida Medina #\r\n\r\n\r\n# Assignment 3 - Question 1 -------------------------------------\r\n# Printing title\r\nprint('----------Unique Words----------')\r\n\r\n# Ask the user for the input file name\r\nv_file = input('Enter the name of the input file: ')\r\n\r\n#Opening file\r\ninputFile = open(v_file, 'r')\r\ntext1 = inputFile.read()\r\ninputFile.close()\r\n\r\n#Checking the unique words\r\nword_list = text1.split()\r\nunique_words = set(word_list)\r\n\r\n#Print results\r\nprint('---------------------------------------')\r\nprint('These are the unique words in the text:')\r\nfor word in unique_words:\r\n print(word)\r\n ","repo_name":"lunaim174/python_class","sub_path":"Irida Medina - A3 - Question 1.py","file_name":"Irida Medina - A3 - Question 1.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38978520734","text":"# Reading genome files\ndef rGenome(filename):\n g = \"\"\n with open(filename, \"r\") as f:\n for l in f:\n if not l[0] == \">\":\n g += l.rstrip()\n return g\n\n\n# Parses the read and quality strings from FASTQ file containing sequencing reads\ndef rFastq(filename):\n sequences = []\n qualities = []\n with open(filename) as fx:\n while True:\n fx.readline()\n seq = fx.readline().rstrip()\n fx.readline()\n qual = fx.readline().rstrip()\n if len(seq) == 0:\n break\n sequences.append(seq)\n qualities.append(qual)\n return sequences, qualities\n# sequencess, qualities = rFastq(\"/Users/. . ./Downloads/lambda_virus.fa\")\n\n\n# 1. How many times does AGGT or its reverse complement (ACCT) occur in the lambda virus genome?\n# E.g. if AGGT occurs 10 times and ACCT occurs 12 times, you should report 22.\n\n# Naive Exact Matching algorithm\ndef naive(p, t):\n o = []\n y = len(t)\n x = len(p)\n for i in range(y - x + 1):\n match = True\n for j in range(x):\n if t[i + j] != p[j]:\n match = False\n break\n if match:\n o.append(i)\n return o\n\n# Finding reverse complement of a DNA strand\ndef reverse_comp(seq):\n comp = {\"A\": \"T\", \"T\": \"A\", \"C\": \"G\", \"G\": \"C\"}\n DNA_strand = \" \"\n for nuc_base in seq[::-1]:\n DNA_strand = DNA_strand + comp[nuc_base]\n return DNA_strand\n\n# Naive exact matching algorithm that is strand aware\n# Instead of looking only for occurrences of P in T, look for occurrences of the reverse complement of P in T\n\ndef naive_with_rc(p, t):\n o_naive = naive(p, t)\n o_naive_rc = naive(reverse_comp(p), t)\n rc = reverse_comp(p)\n if rc == p:\n return o_naive\n elif rc != p:\n return o_naive + o_naive_rc\n\nlambda_virus_genome = rGenome(\"/Users/. . ./Downloads/lambda_virus.fa\")\np = \"ACCT\"\no = naive_with_rc(p, lambda_virus_genome)\nprint(\"Q1(a). ACCT in lambda_virus_genome:\")\nprint(\"# Occurrences: %d\" % len(o))\n\nlambda_virus_genome = rGenome(\"/Users/. . ./Downloads/lambda_virus.fa\")\np = \"AGGT\"\no = naive_with_rc(p, lambda_virus_genome)\nprint(\"Q1(b). AGGT in lambda_virus_genome:\")\nprint(\"# Occurrences: %d\" % len(o))\n\n\n# 2. How many times does TTAA or its reverse complement occur in the lambda virus genome?\n# Hint: TTAA and its reverse complement are equal, do not double count\n# if P and its reverse complement are identical; given match offset should be reported only once\n\ndef naive_2mm(p, t):\n o = []\n y = len(t)\n x = len(p)\n for i in range(y-x + 1):\n match = True\n mismatch_c = 0\n for j in range(x):\n if t[i+j] != p[j]:\n mismatch_c+=1\n if mismatch_c > 2:\n match = False\n break\n if match:\n o.append(i)\n return o\n\nlambda_virus_genome = rGenome(\"/Users/. . ./Downloads/lambda_virus.fa\")\np = \"TTAA\"\no = naive_with_rc(p, lambda_virus_genome)\nprint(\"Q2. TTAA in lambda_virus_genome:\")\nprint(\"# Occurrences: %d\" % len(o))\n\n\n# 3. What is the offset of the leftmost occurrence of ACTAAGT or its reverse complement in the Lambda virus genome?\n# E.g. if the leftmost occurrence of ACTAAGT is at offset 40 (0-based) and the leftmost occurrence of its reverse complement\n# ACTTAGT is at offset 29, then report 29.\n\nlambda_virus_genome = rGenome(\"/Users/. . ./Downloads/lambda_virus.fa\")\np = \"ACTAAGT\"\no = naive_with_rc(p, lambda_virus_genome)\nprint(\"Q3(a). ACTAAGT in lambda_virus_genome:\")\nprint(\"# Occurrences: %d\" % min(o))\n\nlambda_virus_genome = rGenome(\"/Users/. . ./Downloads/lambda_virus.fa\")\np = \"ACTTAGT\"\no = naive_with_rc(p, lambda_virus_genome)\nprint(\"Q3(b). ACTTAGT in lambda_virus_genome:\")\nprint(\"# Occurrences: %d\" % min(o))\n\n\n# 4. What is the offset of the leftmost ocurrence of AGTCGA or its reverse complement in the Lambda virus genome?\n\nlambda_virus_genome = rGenome(\"/Users/. . ./Downloads/lambda_virus.fa\")\np = \"AGTCGA\"\no = naive_with_rc(p, lambda_virus_genome)\nprint(\"Q4(a). AGTCGA in lambda_virus_genome:\")\nprint(\"# Occurrences: %d\" % min(o))\n\nlambda_virus_genome = rGenome(\"/Users/. . ./Downloads/lambda_virus.fa\")\np = \"AGTGGA\"\no = naive_with_rc(p, lambda_virus_genome)\nprint(\"Q4(b). AGTGGA in lambda_virus_genome:\")\nprint(\"# Occurrences: %d\" % min(o))\n\n\n# 5. As we will discuss, sometimes we would like to find approximate matches for P in T. That is, we want to find\n# occurrences with one or more differences. For Questions 5 and 6, make a new version of the naive function called\n# naive_2mm that allows up to 2 mismatches per occurrence. Unlike for the previous questions, do not consider the\n# reverse complement here. We're looking for approximate matches for P itself, not its reverse complement.\n# For example, ACTTTA occurs twice in ACTTACTTGATAAAGT, once at offset 0 with 2 mismatches, and once at offset 4\n# with 1 mismatch. So naive_2mm('ACTTTA', 'ACTTACTTGATAAAGT') should return the list [0, 4].\n# Hint: See this notebook for a few examples you can use to test your naive_2mm function.\n# How many times does TTCAAGCC occur in the Lambda virus genome when allowing up to 2 mismatches\n\nlambda_virus_genome = rGenome(\"/Users/. . ./Downloads/lambda_virus.fa\")\np = \"TTCAAGCC\"\no = naive_2mm(p, lambda_virus_genome)\nprint(\"Q5. TTCAAGCC (up to 2 matches) in lambda_virus_genome:\")\nprint(\"# Occurrences: %d\" % len(o))\n\n\n# 6. What is the offset of the leftmost occurrence of the AGGAGGTT in the Lambda virus genome when allowing\n# up to 2 matches?\n\nlambda_virus_genome = rGenome(\"/Users/. . ./Downloads/lambda_virus.fa\")\np = \"AGGAGGTT\"\no = naive_2mm(p, lambda_virus_genome)\nprint(\"Q6. AGGAGGTT (up to 2 matches) in lambda_virus_genome:\")\nprint(\"# Occurrences: %d\" % min(o))\n\n\n# 7. Finally, download and parse the provided FASTQ file containing real DNA sequencing reads derived from a human:\n# https://d28rh4a8wq0iu5.cloudfront.net/ads1/data/ERR037900_1.first1000.fastq\n# Note that the file has many reads in it and you should examine all of them together when answering this question.\n# The reads are taken from this study:\n# Ajay, S. S., Parker, S. C., Abaan, H. O., Fajardo, K. V. F., & Margulies, E. H. (2011).\n# Accurate and comprehensive sequencing of personal genomes. Genome research, 21(9), 1498-1505.\n# This dataset has something wrong with it; one of the sequencing cycles is poor quality.\n# Report which sequencing cycle has the problem. Remember that a sequencing cycle corresponds to a particular offset\n# in all the reads. For example, if the leftmost read position seems to have a problem consistently across reads, report 0.\n# If the fourth position from the left has the problem, report 3. Do whatever analysis you think is needed to identify\n# the bad cycle. It might help to review the \"Analyzing reads by position\" video.\n\n# def createHist(qualities):\n # sequences, qualities = rFastq(\"/Users/. . ./Downloads/ERR037900.first1000.fastq\")\n # phredscore = []\n # qualities = qualities[-5]\n # for phred in quals:\n # q = ord(phred)-33\n # phredscore.append(q)\n # return phredscore\n# h = createHist(qualities)\n# print(h)\n# import matplotlib.pyplot as plt\n# plt.bar(range(len(h)), h)\n# plt.show()\n# min_h = min(h)\n# index_min_h = h.index(min_h)\n# print(index_min_h)\n","repo_name":"bakuncwa/algo4dnaseq_jhu","sub_path":"algo4dnaseq_w1/algo4dnaseq_hw1.py","file_name":"algo4dnaseq_hw1.py","file_ext":"py","file_size_in_byte":7316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16490540444","text":"import torch\nimport numpy as np\nimport pytorch_lightning as pl\n\nfrom utils import create_memmap\n\n# Create a DataLoader\nclass DataLoader(torch.utils.data.Dataset):\n def __init__(self, data_path, type=\"val\"):\n self.data = np.memmap(\n data_path,\n dtype='float32',\n mode='r+',\n shape=(100, 8, 512, 512)\n )\n \n if type == \"train\":\n self.data = self.data[:80, ...]\n elif type == \"val\":\n self.data = self.data[80:, ...]\n else:\n raise ValueError(\"type must be either train or val\")\n \n def __len__(self):\n return self.data.shape[0]\n\n def __getitem__(self, idx):\n batch = self.data[idx, ...]\n X = torch.from_numpy(batch[:4, ...]).float()\n y = torch.from_numpy(batch[4:, ...]).float()\n return X, y\n\n# 1. Create a DataLoader\nclass AC_DataLoader(pl.LightningDataModule):\n def __init__(self, data_path, batch_size):\n super().__init__()\n self.data_path = data_path\n self.batch_size = batch_size\n \n def train_dataloader(self):\n train_data = DataLoader(self.data_path, type=\"train\")\n return torch.utils.data.DataLoader(\n train_data,\n batch_size=self.batch_size,\n shuffle=True\n )\n def val_dataloader(self):\n val_data = DataLoader(self.data_path, type=\"val\")\n return torch.utils.data.DataLoader(\n val_data,\n batch_size=self.batch_size,\n shuffle=False\n )\n\n# Set a model\nclass SimpleCNN(torch.nn.Module):\n def __init__(self,inch=4, outch=4):\n super(SimpleCNN,self).__init__()\n self.cnn_layers=torch.nn.Sequential(\n torch.nn.Conv2d(\n in_channels=inch,\n out_channels=outch,\n kernel_size=1,\n stride=1)\n )\n def forward(self, x):\n x = self.cnn_layers(x)\n return x\n\nclass LitModel(pl.LightningModule):\n def __init__(self, model):\n super().__init__()\n self.model = model\n self.loss = torch.nn.MSELoss()\n \n def forward(self, x):\n return self.model(x)\n \n def training_step(self, batch, batch_idx):\n X, y = batch\n y_hat = self.model(X)\n loss = self.loss(y_hat, y)\n self.log('train_loss', loss)\n return loss\n \n def validation_step(self, batch, batch_idx):\n X, y = batch\n y_hat = self.model(X)\n loss = self.loss(y_hat, y)\n self.log('val_loss', loss)\n return loss\n\n def configure_optimizers(self):\n return torch.optim.Adam(self.parameters(), lr=0.001)\n\n\nif __name__ == \"__main__\":\n ## 1. Create a memmap file\n file = \"data.bin\"\n shape = (100, 8, 512, 512)\n #create_memmap(file, shape)\n\n ## 2. Create a memmap file\n dataset = AC_DataLoader(file, 4)\n \n ## 3. Create a model\n model = SimpleCNN()\n litmodel = LitModel(model)\n \n ## 4. Define a callback\n checkpoint_callback = pl.callbacks.ModelCheckpoint(\n monitor='val_loss',\n dirpath='checkpoints',\n filename='model-{epoch:02d}-{val_loss:.2f}',\n save_top_k=1,\n mode='min',\n )\n \n early_stop_callback = pl.callbacks.EarlyStopping(\n monitor='val_loss',\n min_delta=0.00,\n patience=25,\n verbose=False,\n mode='min'\n )\n \n ## 5. Define logger WANDB\n #logger = pl.loggers.WandbLogger(\"test\")\n logger = None\n \n\n ## 6. Define trainer\n trainer = pl.Trainer(\n gpus=1,\n max_epochs=100,\n logger=logger,\n callbacks=[checkpoint_callback, early_stop_callback]\n )\n \n ## 7. Train\n trainer.fit(litmodel, dataset)\n \n ## 8. Create a torchscript model\n # It is a way to convert your PyTorch models (written in Python)\n # into a format that can be efficiently executed in different\n # environments, such as servers, edge devices, or embedded systems. \n model = trainer.model.model\n model.eval()\n example = torch.rand(1, 4, 512, 512)\n traced_script_module = torch.jit.trace(model, example) # good for models with static computation graph.\n traced_script_module.save(\"model.pt\")\n \n ## 9. Load a torchscript model\n #import torch\n #model = torch.jit.load(\"model.pt\")","repo_name":"csaybar/RPythonsync","sub_path":"lit_model.py","file_name":"lit_model.py","file_ext":"py","file_size_in_byte":4341,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72779145427","text":"# mydate.py\nimport random\n \ndef is_valid_month_num(n):\n if(n >= 1 and n < 13):\n return True\n return False\n\ndef month_num_to_string(month_num):\n months = ['January', 'February', 'March', 'April', 'May', 'June',\n 'July', 'August', 'September', 'October', 'November', 'December']\n if (is_valid_month_num(month_num)):\n return months[month_num-1]\n return None\n\ndef date_to_string(date_list):\n name = month_num_to_string(date_list[1])\n string = \"{} {}, {}\".format(name, date_list[2], date_list[0])\n return string\n\ndef dates_to_strings(date_list):\n new_list = []\n for l in date_list:\n new_list.append(date_to_string(l))\n return new_list\n\ndef remove_years(date_list):\n new_list = []\n for l in date_list:\n new_list.append(l[1:])\n return new_list\n\ndef is_leap_year(year):\n if (year%4 == 0):\n if (year%100 == 0):\n if (year%400 == 0):\n return True\n else: return True\n return False\n\ndef get_num_days_in_month(month_num, year):\n months_to_days = [(1, 31), (2, 28), (3, 31), (4, 30), (5, 31), (6, 30), (7, 31), (8, 31), (9, 30), (10, 31), (11, 30), (12, 31)]\n if ( is_valid_month_num(month_num) ):\n if ( is_leap_year(year) ):\n return months_to_days[month_num-1][1]+1\n return months_to_days[month_num-1][1]\n return None\n\ndef generate_date(start_year, end_year):\n date = []\n date.append(random.randint(start_year, end_year))\n \n month = random.randint(1, 12)\n max_days = get_num_days_in_month(month, date[0])\n\n date.append(month)\n date.append(random.randint(1, max_days))\n \n return date\n","repo_name":"NYUeServ/stit.github.io","sub_path":"Desktop/Fall 2018/DMA/assignments/1/candacej97-homework01-master/mydate.py","file_name":"mydate.py","file_ext":"py","file_size_in_byte":1669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11000449437","text":"import torch\nfrom PIL import Image\nfrom torchvision import datasets\n\nEXT_LIST = (\".jpg\", \".jpeg\", \".png\", \".ppm\", \".bmp\",\n \".pgm\", \".tif\", \".tiff\", \".webp\")\n\ndef loader(path):\n return Image.open(path)\n\ndef dataset_loader(folder_path, transform, BATCH_SIZE=64):\n # Read all images from a directory \n folder_dataset = datasets.DatasetFolder(root=folder_path,\n loader=loader,\n extensions=EXT_LIST,\n transform=transform)\n # Get Class labels and corresponding mapping to index\n class_labels, lables_to_index = folder_dataset.find_classes(folder_path)\n # Make batches of images\n batch_loader = torch.utils.data.DataLoader(folder_dataset,\n batch_size=BATCH_SIZE,\n shuffle=False,\n num_workers=4,\n pin_memory=True)\n \n return lables_to_index, batch_loader\n\n\n \n \n","repo_name":"MeharRamzan/CLIP","sub_path":"datasets/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7395306057","text":"from chapter7.Data import CoraData\r\nimport numpy as np\r\nimport torch as th\r\n\r\nDEVICE = \"cuda\" if th.cuda.is_available() else \"cpu\"\r\ndata = CoraData().data\r\ndef sampling(src_nodes, sample_num, neighbor_table, con):\r\n\r\n results = []\r\n for sid in src_nodes:\r\n res = np.random.choice(neighbor_table[sid], size=(sample_num,))\r\n neighbor_node = np.asarray(res).flatten()\r\n con_us = []\r\n for i in neighbor_node:\r\n con_us.append(con[i])\r\n label_Y = data.y[neighbor_node]\r\n label_yr = np.unique(label_Y)\r\n # ur\r\n con_ur = np.unique(con_us)\r\n hopk_result1 = cb_backdoor(neighbor_node, label_Y, con_us, label_yr, con_ur, neighbor_node.size)\r\n results.append(hopk_result1)\r\n return np.asarray(results).flatten()\r\n\r\n\r\n\r\n\r\n\r\ndef multihop_sampling(src_nodes, sample_nums, neighbor_table,con_us):\r\n\r\n sampling_result = [src_nodes]\r\n for k, hopk_num in enumerate(sample_nums):\r\n hopk_result = sampling(sampling_result[k], hopk_num, neighbor_table, con_us)\r\n sampling_result.append(hopk_result)\r\n return sampling_result\r\n\r\n\r\ndef rand_cat_fast(p, N):\r\n K = len(p)\r\n u = np.random.rand(N, 1)\r\n P = np.cumsum(p, axis=0)\r\n U = np.tile(P.T, (N, 1))\r\n c = np.tile(u, (1, K)) >= U\r\n c = c + 0\r\n x = np.sum(c, 1) + 1\r\n return x\r\n\r\n\r\ndef histcnd(Y, U, yr, ur):\r\n Nyu = np.zeros((7, 2), dtype=int)\r\n for index, i in enumerate(Y):\r\n Nyu[i - 1][U[index] - 1] += 1\r\n\r\n return Nyu\r\n\r\n\r\ndef cb_backdoor(X, Y, U, yr, ur, M):\r\n # 1.Estimate f(u,y), f(y) and f(u|y)\r\n N = len(X)\r\n K = yr.shape[0]\r\n Nyu = histcnd(Y, U, yr, ur)\r\n # print(Nyu)\r\n pyu = Nyu / N\r\n pu = np.sum(pyu, axis=0, keepdims=True).T\r\n py = np.sum(pyu, axis=1, keepdims=True)\r\n py_u = pyu / pu.T\r\n Ns = np.sum(Nyu, axis=1)\r\n\r\n\r\n\r\n # 2. For each y in range of values of Y variable\r\n\r\n\r\n v = np.zeros((M, 1), dtype=float)\r\n\r\n index_i = []\r\n\r\n for i in yr:\r\n # 3. Resample indices\r\n for index, j in enumerate(Y):\r\n\r\n if j == i:\r\n v[index][0] = j / (py_u[i - 1][U[index] - 1]).T / N\r\n\r\n a = rand_cat_fast(v, Ns[i - 1])\r\n for i in a:\r\n index_i.append(i)\r\n\r\n x1=[]\r\n\r\n for i in index_i:\r\n if i == len(index_i) +1:\r\n x1.append(X[len(index_i)-1])\r\n else:\r\n x1.append(X[i - 1])\r\n Xw = np.array(x1)\r\n\r\n return Xw\r\n\r\n\r\n","repo_name":"ysutaoteam/C-GraphSAGE","sub_path":"Causal-GraphSAGE-Master/chapter7/causal_sampling.py","file_name":"causal_sampling.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"74876541586","text":"\nfrom __future__ import annotations\n\nimport operator\nimport collections\n# import collections.abc\nimport functools\nimport itertools\n\nimport typing\nimport datetime\n\nimport numpy\nimport pandas\n\nimport jax\nimport jax.numpy\nimport jax.numpy.linalg\n\nimport jaxopt\nimport optax\n\nimport xtuples as xt\n\nfrom ... import xjd\n\n# ---------------------------------------------------------------\n\n# NOTE: works differently to the below?\n# as we just multiply through by the mask?\n@xt.nTuple.decorate(init=xjd.init_null)\nclass Zero(typing.NamedTuple):\n\n data: xjd.Loc\n v: numpy.ndarray\n\n def init(\n self, site: xjd.Site, model: xjd.Model, data = None\n ) -> tuple[Zero, tuple, xjd.SiteValue]: ...\n\n def apply(\n self,\n site: xjd.Site,\n state: xjd.Model,\n data = None,\n ) -> typing.Union[tuple, jax.numpy.ndarray]:\n return jax.numpy.multiply(\n self.data.access(state),\n self.v\n )\n\n# ---------------------------------------------------------------\n\ndef where(mask, if_yes, if_no):\n not_mask = 1 + (mask * -1.)\n return jax.numpy.multiply(\n mask, if_yes\n ) + jax.numpy.multiply(\n not_mask, if_no\n )\n\n# ---------------------------------------------------------------\n\n\n@xt.nTuple.decorate(init=xjd.init_null)\nclass Positive(typing.NamedTuple):\n\n data: xjd.Loc\n condition: typing.Union[xjd.Loc, numpy.ndarray]\n\n def init(\n self, site: xjd.Site, model: xjd.Model, data = None\n ) -> tuple[Positive, tuple, xjd.SiteValue]: ...\n\n def apply(\n self,\n site: xjd.Site,\n state: xjd.Model,\n data = None,\n ) -> typing.Union[tuple, jax.numpy.ndarray]:\n data = self.data.access(state)\n data_pos = jax.numpy.abs(data)\n if isinstance(self.condition, xjd.Loc):\n mask = self.condition.access(state)\n else:\n mask = self.condition\n return where(mask, data_pos, data)\n \n@xt.nTuple.decorate(init=xjd.init_null)\nclass Negative(typing.NamedTuple):\n\n data: xjd.Loc\n condition: typing.Union[xjd.Loc, numpy.ndarray]\n\n def init(\n self, site: xjd.Site, model: xjd.Model, data = None\n ) -> tuple[Negative, tuple, xjd.SiteValue]: ...\n\n def apply(\n self,\n site: xjd.Site,\n state: xjd.Model,\n data = None,\n ) -> typing.Union[tuple, jax.numpy.ndarray]:\n data = self.data.access(state)\n data_neg = -1 * jax.numpy.abs(data)\n if isinstance(self.condition, xjd.Loc):\n mask = self.condition.access(state)\n else:\n mask = self.condition\n return where(mask, data_neg, data)\n\n# ---------------------------------------------------------------\n\n@xt.nTuple.decorate(init=xjd.init_null)\nclass Where(typing.NamedTuple):\n\n condition: typing.Union[xjd.Loc, numpy.ndarray]\n x: typing.Union[xjd.Loc, numpy.ndarray]\n y: typing.Union[xjd.Loc, numpy.ndarray]\n\n def init(\n self, site: xjd.Site, model: xjd.Model, data = None\n ) -> tuple[Where, tuple, xjd.SiteValue]: ...\n\n def apply(\n self,\n site: xjd.Site,\n state: xjd.Model,\n data = None,\n ) -> typing.Union[tuple, jax.numpy.ndarray]:\n if isinstance(self.x, xjd.Loc):\n x = self.x.access(state)\n else:\n x = self.x\n if isinstance(self.y, xjd.Loc):\n y = self.y.access(state)\n else:\n y = self.y\n if isinstance(self.condition, xjd.Loc):\n mask = self.condition.access(state)\n else:\n mask = self.condition\n return jax.numpy.where(mask, x, y)\n \n# ---------------------------------------------------------------\n","repo_name":"tomjrwilliams/xjd","sub_path":"src/xjd/nodes/transforms/masks.py","file_name":"masks.py","file_ext":"py","file_size_in_byte":3696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37242257611","text":"from flask import Flask, render_template, jsonify\nfrom threading import Thread\nfrom time import sleep\nfrom os import chdir\nfrom expect import Interactor\nfrom turbo_flask import Turbo\n\nclass ServerControl:\n def __init__(self):\n\n self.server_logs = \"\"\n # self.index_path = \"/home/kepper104/hosting/unturned-wrapper/index.html\"\n chdir(\"/home/kepper104/hosting/unturned-wrapper/\")\n self.index_path = \"index.html\"\n # self.index_path = ''\n # with open(\"index.html\", 'r') as f:\n # self.index_path = f.read()\n\n\n\n\n def run(self):\n self.app = Flask(__name__)\n self.turbo = Turbo(self.app)\n # self.app.config['SECRET_KEY'] = 'secret!'\n\n # print(\"STARTED \" * 1000)\n # self.start_server()\n\n self.server = Interactor(self)\n self.t = Thread(target=self.server.start, args=())\n self.t.start()\n\n @self.app.route(\"/\")\n def index():\n return render_template(self.index_path)\n\n @self.app.route(\"/start_server\")\n def start_server():\n print(\"Server Start\")\n self.server_logs += \"Starting server...\\n\"\n return \"nothing\"\n\n @self.app.route(\"/stop_server\")\n def stop_server():\n print(\"Server Stop\")\n self.server_logs += \"Stopping server...\\n\"\n self.server.kill()\n # self.server.command = \"players\"\n return \"nothing\"\n\n @self.app.context_processor\n def inject_load():\n logs = self.server_logs.split(\"\\n\")\n return {'logs': \"\\n\".join(logs[-50:])}\n\n @self.app.before_first_request\n def before_first_request():\n # print(\"START THREAD\\n\" * 100)\n Thread(target=self.update_load).start()\n\n self.app.run(debug=False, host='0.0.0.0')\n\n def update_load(self):\n with self.app.app_context():\n while True:\n sleep(0.2)\n # print(\"updating.....\")\n self.turbo.push(self.turbo.replace(render_template('logs.html'), 'load'))\n\n","repo_name":"kepper104/unturned-wrapper","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41725691377","text":"import torch\nimport torch.nn as nn\n\n\ndef off_diagonal(x: torch.Tensor) -> torch.Tensor:\n \"\"\"Return a flattened view of the off-diagonal elements of a square matrix.\n\n Code taken from: https://github.com/facebookresearch/barlowtwins/blob/8e8d284ca0bc02f88b92328e53f9b901e86b4a3c/main.py#L180\n\n Args:\n x (torch.Tensor): A square matrix\n\n Returns:\n torch.Tensor: A flattened view of the off-diagonal elements of the input\n \"\"\"\n\n n, m = x.shape\n assert n == m\n\n return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten()\n\n\nclass EmpiricalCrossCorrelation(nn.Module):\n\n def __init__(self, lambd: float = 0.0051):\n \"\"\"Empirical Cross Correlation loss function.\n\n Code taken from: https://github.com/facebookresearch/barlowtwins/blob/8e8d284ca0bc02f88b92328e53f9b901e86b4a3c/main.py#L212\n\n Args:\n lambd (float, optional): Weight on off-diagonal entries. Defaults to 0.0051.\n \"\"\"\n super(EmpiricalCrossCorrelation, self).__init__()\n\n self.lambd = lambd\n\n def forward(self, z1: torch.Tensor, z2: torch.Tensor):\n # Empirical cross-correlation matrix\n c = z1.T @ z2\n\n batch_size = z1.shape[0]\n\n c.div_(batch_size)\n\n on_diag = torch.diagonal(c).add_(-1).pow_(2).sum()\n off_diag = off_diagonal(c).pow_(2).sum()\n\n loss = on_diag + self.lambd * off_diag\n\n return loss\n","repo_name":"V0XNIHILI/torch-mate","sub_path":"src/torch_mate/loss/EmpiricalCrossCorrelation.py","file_name":"EmpiricalCrossCorrelation.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35989072624","text":"import json\n\nfrom utils.config import get_config, get_file\nfrom utils.selenium_browser import get_browser\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import WebDriverException\n\nif __name__ == '__main__':\n \"\"\"\n 用于获取手机端cookie\n \"\"\"\n config = get_config()\n config['headless'] = False\n browser = get_browser(config)\n browser.get(\"https://plogin.m.jd.com/login/login\")\n try:\n wait = WebDriverWait(browser, 135)\n print(\"请在网页端通过手机号码登录\")\n wait.until(EC.presence_of_element_located((By.ID, 'msShortcutMenu')))\n browser.get(\"https://home.m.jd.com/myJd/newhome.action\")\n username = wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'my_header_name'))).text\n cookie = \"\"\n for _ in browser.get_cookies():\n if _[\"name\"] == \"pt_key\" or _[\"name\"] == \"pt_pin\":\n cookie += _[\"name\"] + \"=\" + _[\"value\"] + \";\"\n config['mobile_cookie'] = cookie[0:-1]\n print(\"获取的cookie是:\" + cookie)\n with open(get_file(\"./config.json\"), mode='w', encoding=\"utf-8\") as f:\n json.dump(config, f, indent=4, ensure_ascii=False)\n print(\"成功添加\", username)\n except WebDriverException:\n print(\"添加失败\")\n finally:\n browser.close()\n","repo_name":"wssh13/JDMemberCloseAccount","sub_path":"add_cookie.py","file_name":"add_cookie.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"21302794425","text":"'''\n1. 오름차순으로 정렬\n2. 현재 그룹 current에 추가하면서 충분하면 다음 그룹으로 넘어가고 ret 값 1 증가\n'''\n\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\n\narr = list(map(int, input().split()))\narr = sorted(arr)\n\nret = 0\ncurrent = 0\n\nfor x in arr:\n current += 1\n if current >= x:\n ret += 1\n current = 0\n\nprint(ret)","repo_name":"goo314/problem-solving","sub_path":"Book-Coding-Test/ch11_past_greedy/p01.py","file_name":"p01.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27064330509","text":"## Melon Music crawling \n\nimport math\nimport time\nimport datetime\nstart = time.time()\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nimport time \n## for processing\nfrom konlpy.tag import Hannanum\nimport re\nimport pandas as pd\nimport numpy as np\nimport csv\nimport sys\n## visualization\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom collections import Counter\nfrom wordcloud import WordCloud\nfrom PIL import Image\n\n## interactiveshell\nfrom IPython.core.interactiveshell import InteractiveShell\nInteractiveShell.ast_node_interactivity = \"all\"\n#ignore warnings\nimport warnings\nwarnings.filterwarnings(action=\"ignore\")\n\nurl = \"https://www.melon.com/\"\nprint(\"가수 이름을 입력하세요\")\nsinger = str(input())\n\n\n#수집할 정보 리스트 \ntitles =[]\ninfos =[]\nlyricses=[]\n\ndef melon_crawling(url, singer):\n \n ##크롬 드라이버 호출\n driver = webdriver.Chrome()\n driver.get(url) \n \n ##페이지 이동\n #검색창\n driver.find_element(By.XPATH,'//*[@id=\"top_search\"]').click()\n #가수명 입력\n driver.find_element(By.XPATH,'//*[@id=\"top_search\"]').send_keys(singer)\n #검색창 클릭\n driver.find_element(By.XPATH,'//*[@id=\"gnb\"]/fieldset/button[2]').click() \n #곡 선택\n driver.find_element(By.XPATH,'//*[@id=\"divCollection\"]/ul/li[3]/a').click() \n #아티스트명에서\n driver.find_element(By.XPATH,'//*[@id=\"conts\"]/div[3]/div[1]/a[2]').click()\n driver.execute_script(\"window.scrollTo(0, 500)\") \n time.sleep(3) \n \n # 곡정보 크롤링\n try:\n for page in range(1,6):\n for i in range(1, 51): #50개씩 있음\n #곡정보 칸으로 이동\n sing_css= f'#frm_defaultList > div > table > tbody > tr:nth-child({i}) > td:nth-child(3) > div > div > a.btn.btn_icon_detail'\n driver.find_element(By.CSS_SELECTOR,sing_css).click() \n driver.execute_script(\"window.scrollTo(0, 500)\") \n time.sleep(3) \n ##페이지 파싱 및 정보 수집\n html_source = driver.page_source \n soup = BeautifulSoup(html_source, 'lxml')\n try: \n title =driver.find_element(By.CSS_SELECTOR, \"#downloadfrm > div > div > div.entry > div.info > div.song_name\").text \n titles.append(title)\n #세부 정보 \n info = driver.find_element(By.CSS_SELECTOR, \"#downloadfrm > div > div > div.entry > div.meta\").text\n infos.append(info)\n \n #가사\n lyrics =driver.find_element(By.CSS_SELECTOR, \"#d_video_summary\").text\n lyricses.append(lyrics)\n print(f'{i}번째 곡 크롤링 완료, 곡 제목:', title)\n except:\n print(\"가사 정보가 없습니다.\")\n lyricses.append(\"unknown\")\n print(f'{i}번째 곡 크롤링 완료, 곡 제목:', title, \"* 특이사항: 가사 없음\")\n driver.back()\n print(f\"{page}번째 페이지 크롤링 완료\")\n print(\"====================\")\n n= page+1\n print(f'{n}번째 페이지 크롤링 시작 ')\n last_page_height = driver.execute_script(\"return document.documentElement.scrollHeight\") \n driver.execute_script(\"window.scrollTo(0, document.documentElement.scrollHeight);\") \n driver.find_element(By.XPATH, f'//*[@id=\"pageObjNavgation\"]/div/span/a[{page}]').click()\n time.sleep(3)\n except:\n print(\"더 이상 수집할 수 있는 정보가 없습니다. \")\n pass\n \n driver.close()\n return titles, lyricses, infos\n\n\ntitles, lyricses, infos = melon_crawling(url, singer)\n\nimport pickle\nprint(len(titles), len(lyricses), len(infos))\nwith open(\"titles.pkl\", 'wb') as f:\n pickle.dump(titles, f)\n \nwith open(\"lyricses.pkl\", 'wb') as f:\n pickle.dump(lyricses, f)\n\nwith open(\"infos.pkl\", 'wb') as f:\n pickle.dump(infos, f)\n\nprint(\"저장 완료\")\n\n#전처리\ndf= pd.DataFrame({\"title\": titles, \n \"lyrics\":lyricses, \n \"information\":infos})\nalbum = []\ndate= []\ngenre =[]\n\nfor i in range(len(df)):\n album.append(df.information[i].split('\\n')[1])\n date.append(df.information[i].split('\\n')[3])\n genre.append(df.information[i].split('\\n')[5])\ndf['album'] = album\ndf['date'] = date\ndf['genre'] = genre\n\ndf.drop([\"information\"], axis =1, inplace=True )\n\ndf.to_csv(\"Melon_Crawling.csv\", index=False)\n\nend = time.time()\nsec = (end - start)\nresult = datetime.timedelta(seconds=sec)\nresult_list = str(datetime.timedelta(seconds=sec)).split(\".\")\nprint(\"총 수행시간 :\", result_list[0])\n\n\n\n","repo_name":"HwangJae-won/Music_Data","sub_path":"Melon_Music_crawling.py","file_name":"Melon_Music_crawling.py","file_ext":"py","file_size_in_byte":4912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16522196362","text":"def logging_dict(debug=False):\n logging_config = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"root\": {\n \"level\": \"DEBUG\",\n \"handlers\": [\n \"consoleHandler\",\n \"logFileHandler\"\n ]\n },\n \"handlers\": {\n \"consoleHandler\": {\n \"class\": \"logging.StreamHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"consoleFormatter\",\n \"stream\": \"ext://sys.stdout\"\n },\n \"logFileHandler\": {\n \"class\": \"logging.FileHandler\",\n \"level\": \"INFO\",\n \"formatter\": \"logFileFormatter\",\n \"filename\": \"./log/tune.log\",\n \"mode\": \"a+\",\n \"encoding\": \"utf-8\"\n }\n },\n \"formatters\": {\n \"consoleFormatter\": {\n \"format\": \"[%(asctime)s] [%(levelname)s] %(message)s\"\n },\n \"logFileFormatter\": {\n \"format\": \"[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)s] %(message)s\"\n }\n }\n }\n if debug:\n logging_config[\"handlers\"][\"consoleHandler\"][\"level\"] = \"DEBUG\"\n logging_config[\"handlers\"][\"logFileHandler\"][\"level\"] = \"DEBUG\"\n return logging_config\n","repo_name":"ssl-oyamata/postgres_opttune","sub_path":"pgopttune/utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1317,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"29415847955","text":"# -*- coding: utf-8 -*-\nDESC = u\"스페셜 저지: 문제에 첨부된 저지 모듈을 이용한 채점 (도움말 참조)\"\n\nimport imp\n\ndef judge(data_dir, input_path, output_path, expected_path):\n try:\n judge_module_info = imp.find_module('judge', [data_dir])\n except ImportError:\n raise Exception(\"Can't find judge module from attachment.\")\n judge_module = imp.load_module('judge', *judge_module_info)\n assert hasattr(judge_module, 'judge'), 'judge.judge() not present'\n return judge_module.judge(input_path, output_path, expected_path)\n","repo_name":"hhjeong/LCS","sub_path":"judge/differs/special_judge.py","file_name":"special_judge.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26808425493","text":"from flask_sqlalchemy import SQLAlchemy;\n\ndb = SQLAlchemy();\n\ndef connectDB(app):\n '''Connect to a specified database.'''\n db.app = app;\n db.init_app(app);\n\nclass Pet(db.Model):\n '''Pet relation model.'''\n\n __tablename__ = 'pets';\n\n id = db.Column(db.Integer, autoincrement = True, primary_key = True);\n name = db.Column(db.String(32), nullable = False);\n species = db.Column(db.String(16), nullable = False);\n age = db.Column(db.Integer, nullable = True);\n photo_url = db.Column(db.String(), nullable = True);\n notes = db.Column(db.Text, nullable = True);\n available = db.Column(db.Boolean, default=bool('True'));\n\n def __repr__(self):\n '''Self representation of a Pet Instance is: '''\n return f'';\n\n def returnInstanceAttributes(self):\n '''Return a dict (kwarg list) of this object's attributes.'''\n # [property for property in dir(selectedPet) if not property.startswith('_')]\n # Hint by Meitham: https://stackoverflow.com/questions/11637293/iterate-over-object-attributes-in-python\n\n # [for property in dir(self) if not (property.startwith('_') or callable(getattr(self, property)))]; # eliminates all (d)under properties and functions\n # yields 3 'overhead' attributes: 'metadata', 'query', 'registry' if I do the above:\n # self.query yields a `flask_sqlalchemy.BaseQuery` object, i.e. \n # self.registry yields a `sqlalchemy.orm.decl_api.registry` object, i.e. \n # self.metadata yields a `MetaData()` function\n # get instance attributes fast with `vars()`: https://datagy.io/python-print-objects-attributes/\n # sqlAlchemy has an overhead attribute of '_sa_instance_state', i.e. vars(self) may yield: \n # {'_sa_instance_state': , 'species': 'Doggo', 'id': 3, 'photo_url': '', 'available': True, 'age': 2, 'name': 'TestChecked', 'notes': ''}\n instanceAttributes = vars(self);\n # instanceAttributes.pop('_sa_instance_state'); # apparently it skips over it, for now\n return instanceAttributes;\n\n def updatePet(self, data):\n '''Update the selected Pet instance.''' \n # self.update(**data); # there is no 'update()' method for the instance\n # https://stackoverflow.com/questions/270879/efficiently-updating-database-using-sqlalchemy-orm\n db.session.query(Pet).filter(Pet.id == self.id).update(data);\n db.session.commit();\n return;\n\n # def deletePet(self):\n # db.session.delete(self.id);\n # db.session.commit();\n # return;\n\n @classmethod\n def createPet(cls, data):\n '''Create a new Pet instance.'''\n db.session.add(Pet(**data)); # convert a dict to kwargs: https://stackoverflow.com/questions/5710391/converting-python-dict-to-kwargs\n db.session.commit();\n return;\n\n @classmethod\n def returnPetByID(cls, petID):\n '''Return a Pet by petID.'''\n return cls.query.get_or_404(petID);\n # `None`python if it DNE\n \n @classmethod\n def returnPetByAvailability(cls):\n '''Return Pet(s) categorized by availability.'''\n return {\n 'available': cls.query.filter(cls.available == True),\n 'adopted': cls.query.filter(cls.available == False)\n };\n\n @classmethod\n def returnListOfPets(cls, returnLimit = None, pageNumber = 0):\n '''Return Pet(s), generalized with pagination ( added return limit and page number). Need to integrate filters. (WHERE)'''\n query = cls.query;\n if returnLimit:\n query = query.limit(returnLimit)\n if pageNumber:\n query = query.offset(returnLimit * pageNumber);\n return query.all();\n\n # @classmethod\n # def sortPetsByDateCreated(cls, sortMethod = \"ASC\", returnLimit = None):\n # # returnListOfPets(returnLimit)\n # return returnListOfPets;\n \n # @classmethod\n # def sortPetsByDistance(cls):\n # #\n # return cls.;\n\n @classmethod\n def cleanRequestData(cls, requestData, createPetDefaults = False):\n '''This is a general helper method to clean request data relating to the Pet object.'''\n mutableRequestData = dict(requestData);\n\n if mutableRequestData['csrf_token']:\n # remove 'csrf_token' from messing up Pet obj when passing information as **kwarg\n mutableRequestData.pop('csrf_token');\n \n if 'age' in mutableRequestData and not isinstance(mutableRequestData['age'],(int)):\n # added `'age' in requestFormData` for generality. parses `''` as `None` because `''` conflicts with the SQL (NULLABLE) INTEGER constraint\n mutableRequestData['age'] = None;\n \n if not createPetDefaults:\n # if not from a createRoute\n\n mutableRequestData['available'] = 'available' in mutableRequestData;\n # POST only returns \"successful\" controls (https://stackoverflow.com/questions/30681482/why-we-cannot-post-unchecked-checkbox), i.e. a checked checkbox is successful, otherwise not\n # DO THIS FOR ALL BOOLEANFIELDS (also radio buttons)\n \n return mutableRequestData;\n \n\n\n","repo_name":"YiJohnZhang/sb_u02_assignments","sub_path":"02_24.01.15_wtforms_exercise/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4679436392","text":"# Michael Baart v00199818\n# Amy Hanvoravongchai v00822271\n\nimport os\nimport re\nimport string\nimport math\n\ndef get_data(data_file, label_file):\n\n with open(data_file) as f:\n data = f.readlines()\n\n with open(label_file) as g:\n labels = g.readlines()\n\n data = [x.strip() for x in data]\n labels = [x.strip() for x in labels]\n\n return data, labels\n\ndef train_model(data, labels):\n\n wise_count = 0\n prediction_count = 0\n\n data = [x.split() for x in data]\n for i, sentence in enumerate(data):\n print(sentence)\n for word in sentence:\n if word not in model:\n model[word] = [0, 0]\n\n model[word][int(labels[i])] += 1\n\n if int(labels[i]) == 0:\n wise_count += 1\n else:\n prediction_count += 1\n\n return model, wise_count, prediction_count\n\ndef calc_cond_probabilities(model, wise_count, pred_count):\n cond_probabilities = {}\n\n for word in model:\n if word not in cond_probabilities:\n cond_probabilities[word] = [0,0]\n #calculate conditional probabilities P(word|wise) & P(word|prediction)\n cond_probabilities[word][0] = (model[word][0] + 1)/(wise_count + len(model))\n cond_probabilities[word][1] = (model[word][1] + 1)/(pred_count + len(model))\n\n return cond_probabilities\n\ndef classify_test_data(test_data_saying, prob_wise, prob_pred, cond_probs):\n temp_wise = 1\n temp_pred = 1\n for word in test_data_saying:\n if word in model:\n temp_wise *= math.log(pow(cond_probs[word][0], model[word][0]), 2)\n temp_pred *= math.log(pow(cond_probs[word][1], model[word][1]), 2)\n\n temp_wise *= math.log(prob_wise, 2)\n temp_pred *= math.log(prob_pred, 2)\n\n if temp_wise > temp_pred:\n return 0\n return 1\n\nif __name__ == '__main__':\n model = {}\n\n train_data, train_labels = get_data(\"traindata.txt\", \"trainlabels.txt\")\n train_model, train_wise_count, train_prediction_count = train_model(train_data, train_labels)\n\n #P(c) & P(-c)\n probablility_wise = train_wise_count/(train_wise_count + train_prediction_count)\n probablility_prediction = train_prediction_count/(train_wise_count + train_prediction_count)\n\n #P(Chinese|c),... etc\n conditional_probabilities = {}\n conditional_probabilities = calc_cond_probabilities(train_model, train_wise_count, train_prediction_count)\n\n # Setup test data\n # test_data, test_labels = get_data(\"testdata.txt\", \"testlabels.txt\")\n test_data, test_labels = get_data(\"traindata.txt\", \"trainlabels.txt\")\n\n\n result = []\n for i, saying in enumerate(test_data):\n words = saying.split(' ')\n result.append(classify_test_data(words, probablility_wise, probablility_prediction, conditional_probabilities))\n\n print(result)\n","repo_name":"amyhanv/textclassifier","sub_path":"test-classifier.py","file_name":"test-classifier.py","file_ext":"py","file_size_in_byte":2806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12435317101","text":"import numpy as np\nN = 100\nh = 2/N\nx = np.linspace(-1,1,N)\nprint(x)\ny = np.sqrt(1 - x**2)\nprint(y)\nyk = sum(y)\nI = h*yk\nprint('The value of integral is :', I)","repo_name":"Jainam2848/Coding","sub_path":"Computational Physics/Chapter4/Exercise4.4.py","file_name":"Exercise4.4.py","file_ext":"py","file_size_in_byte":159,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"18017112691","text":"from datetime import date\nfrom decimal import Decimal\nfrom typing import NamedTuple\nfrom urllib.parse import urlencode\n\nfrom calculadora_do_cidadao.adapters import Adapter\nfrom calculadora_do_cidadao.fields import DateField\nfrom calculadora_do_cidadao.typing import MaybeIndexesGenerator\n\n\nURL = \"https://www3.bcb.gov.br/novoselic/rest/fatoresAcumulados/pub/search\"\nURL_PARAMS = {\n \"parametrosOrdenacao\": '[{\"nome\":\"periodo\",\"decrescente\":false}]',\n \"page\": 1,\n \"pageSize\": 48,\n}\n\n\nclass Selic(Adapter):\n \"\"\"Adapter for Brazilian Central Bank SELIC series.\"\"\"\n\n url = f\"{URL}?{urlencode(URL_PARAMS)}\"\n file_type = \"json\"\n\n HEADERS = {\n \"Accept\": \"application/json, text/plain, */*\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n }\n POST_DATA = (\n {\n \"campoPeriodo\": \"mensal\",\n \"dataInicial\": \"\",\n \"dataFinal\": \"\",\n \"ano\": year,\n \"exibirMeses\": True,\n }\n for year in range(date.today().year, 1996, -1)\n )\n IMPORT_KWARGS = {\"json_path\": [\"registros\"]}\n SHOULD_AGGREGATE = True\n\n def serialize(self, row: NamedTuple) -> MaybeIndexesGenerator:\n reference = DateField.deserialize(row.periodo.replace(\" \", \"\")) # type: ignore\n value = Decimal(row.fator) # type: ignore\n yield reference, value\n\n def aggregate(self):\n accumulated = 1\n for key in sorted(self.data.keys()):\n self.data[key] = accumulated * self.data[key]\n accumulated = self.data[key]\n","repo_name":"cuducos/calculadora-do-cidadao","sub_path":"calculadora_do_cidadao/adapters/selic.py","file_name":"selic.py","file_ext":"py","file_size_in_byte":1529,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"48"} +{"seq_id":"16877254924","text":"from posts.models import Comment, Follow, Group, Post, User\nfrom rest_framework import serializers\nfrom rest_framework.relations import SlugRelatedField\nfrom rest_framework.validators import UniqueTogetherValidator\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Group\n fields = '__all__'\n\n\nclass PostSerializer(serializers.ModelSerializer):\n author = SlugRelatedField(slug_field='username', read_only=True)\n image = serializers.ImageField(required=False, read_only=True)\n\n class Meta:\n fields = '__all__'\n model = Post\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n author = serializers.SlugRelatedField(\n read_only=True, slug_field='username'\n )\n\n class Meta:\n fields = '__all__'\n model = Comment\n\n\nclass FollowSerializer(serializers.ModelSerializer):\n user = serializers.StringRelatedField(\n default=serializers.CurrentUserDefault()\n )\n following = serializers.SlugRelatedField(\n slug_field='username',\n queryset=User.objects.all(),\n )\n\n def validate(self, data):\n if self.context['request'].user == data['following']:\n raise serializers.ValidationError(\n 'Вы не можете оформить на себя подписку'\n )\n return data\n\n class Meta:\n model = Follow\n fields = ('user', 'following')\n validators = [\n UniqueTogetherValidator(\n queryset=Follow.objects.all(),\n fields=('user', 'following')\n ),\n ]\n","repo_name":"malabr1sta/api_final_yatube","sub_path":"yatube_api/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5113153162","text":"import logging\nimport json\nimport numpy as np\nimport random\nfrom sklearn.datasets import load_iris\nfrom sklearn_instrumentation import SklearnInstrumentor\nfrom sklearn_instrumentation.instruments.logging import TimeElapsedLogger\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.decomposition import PCA, TruncatedSVD\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import FeatureUnion, Pipeline\nfrom sklearn.preprocessing import Normalizer, StandardScaler\nfrom kafka import KafkaProducer\nfrom pykafka import KafkaClient\nfrom datetime import datetime\n\nclient = KafkaClient(hosts='localhost:9092')\ntopic = client.topics['ml1']\nproducer = topic.get_sync_producer()\nlogging.basicConfig(level=logging.INFO)\n\nX, y = load_iris(return_X_y=True)\nrf = RandomForestClassifier()\n\ndef pipeline():\n \"\"\"A dummy model that has a bunch of components that we can test.\"\"\"\n r=random.randrange(1,5)\n if r==4:\n nest=100\n else:\n nest=10\n model = Pipeline(\n [\n (\"scaler\", StandardScaler()),\n (\"normal\", Normalizer()),\n (\n \"union\",\n FeatureUnion(\n [\n (\"pca\", PCA(n_components=1)),\n (\"svd\", TruncatedSVD(n_components=2)),\n ],\n n_jobs=1, # parallelized components won't generate spans\n ),\n ),\n (\"class\", RandomForestClassifier(n_estimators=nest)),\n ]\n )\n X_train,y_train=load_iris(return_X_y=True)\n model.fit(X_train, y_train)\n return model\ndef random_input():\n \"\"\"A random record from the feature set.\"\"\"\n rows = X.shape[0]\n random_row = np.random.choice(rows, size=1)\n return X[random_row, :]\n\n\nrf.fit(X, y)\nrf.predict(X)\n\nmodel = pipeline()\n#instrumentor3 = SklearnInstrumentor(instrument=OpenTelemetrySpanner())\n#instrumentor3.instrument_estimator(model)\nj=1\nwhile j:\n r=random.randrange(0,100)\n x_test = random_input()+1\n #x_test[0,1]=x_test[0,1]+r\n x1=x_test.tolist()\n z=model.predict(x_test)\n z1=z.tolist()\n m={}\n t=datetime.now()\n y=t.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n m={\"ts\":y,\"col1\":x1[0][0],\"col2\":x1[0][1],\"col3\":x1[0][2],\"col4\":x1[0][3],\"predict\":z1[0]}\n n=json.dumps(m)\n producer.produce(n.encode('ascii'))\n #print(n)\n# No more logging\n#rf.predict(X)\n","repo_name":"vnarayaj/druid-anomaly","sub_path":"ml_mon_predict.py","file_name":"ml_mon_predict.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8732424359","text":"# -*- coding: utf-8 -*-\n\"\"\"\ndownloads NOAA tide gauge files\nkristine larson\n\"\"\"\nimport argparse\nimport datetime\nimport matplotlib.pyplot as plt\nimport os\nimport requests\nimport sys\nimport gnssrefl.gps as g\n\nfrom gnssrefl.utils import validate_input_datatypes, str2bool\n\n\ndef quickp(station,t,sealevel):\n \"\"\"\n \"\"\"\n fs = 10\n fig,ax=plt.subplots()\n ax.plot(t, sealevel, '-')\n plt.title('Tides at ' + station)\n plt.xticks(rotation =45,fontsize=fs);\n plt.ylabel('meters')\n plt.grid()\n fig.autofmt_xdate()\n\n plt.show()\n return\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"station\", help=\"station name\", type=str)\n parser.add_argument(\"date1\", help=\"start-date, 20150101\", type=str)\n parser.add_argument(\"date2\", help=\"end-date, 20150110\", type=str)\n parser.add_argument(\"-output\", default=None, help=\"Optional output filename\", type=str)\n parser.add_argument(\"-plt\", default=None, help=\"quick plot to screen\", type=str)\n args = parser.parse_args().__dict__\n\n # convert all expected boolean inputs from strings to booleans\n boolean_args = ['plt']\n args = str2bool(args, boolean_args)\n\n\n # only return a dictionary of arguments that were added from the user - all other defaults will be set in code below\n return {key: value for key, value in args.items() if value is not None}\n\n\ndef download_tides(station: str, date1: str, date2: str, output: str = None, plt: bool = False):\n \"\"\"\n Downloads NOAA tide gauge files\n Downloads a json and converts it to plain txt with columns!\n\n Parameters:\n ___________\n station : string\n 7 character ID of the station.\n\n date1 : string\n start date.\n Example value: 20150101\n\n date2 : string\n end date.\n Example value: 20150110\n\n output : string, optional\n Optional output filename\n default is None\n\n plt: boolean, optional\n plot comes to the screen\n default is None\n\n\n \"\"\"\n g.check_environ_variables()\n\n xdir = os.environ['REFL_CODE']\n outdir = xdir + '/Files/'\n if not os.path.exists(outdir) :\n subprocess.call(['mkdir', outdir])\n\n\n # metadata records {'id': '8764227', 'name': 'LAWMA, Amerada Pass', 'lat': '29.4496', 'lon': '-91.3381'}\n\n #station = '8764227' this was a test station\n if len(station) != 7:\n print('station must have 7 characters ', station); sys.exit()\n if len(date1) != 8:\n print('date1 must have 8 characters', date1); sys.exit()\n if len(date2) != 8:\n print('date2 must have 8 characters', date2); sys.exit()\n\n urlL = \"https://api.tidesandcurrents.noaa.gov/api/prod/datagetter?\"\n endL = \"&product=water_level&datum=mllw&units=metric&time_zone=gmt&application=web_services&format=json\"\n url = urlL + \"begin_date=\" + date1 + \"&end_date=\" + date2 + \"&station=\" + station + endL\n data = requests.get(url).json()\n if 'error' in data.keys():\n print(data['error'])\n sys.exit()\n else:\n print(data['metadata'])\n # number of records\n NV = len(data['data']) \n if output is None:\n # use the default\n outfile = outdir + station + '_' + 'noaa.txt'\n else:\n outfile = outdir + output\n\n tt = []; slevel = []; obstimes = []\n fout = open(outfile, 'w+')\n fout.write(\"%YYYY MM DD HH MM Water(m) DOY MJD\\n\")\n for i in range(0, NV):\n t = data['data'][i]['t']\n slr = data['data'][i]['v']\n slf = data['data'][i]['f']\n #print(i,t, slr,slf)\n if slr == '':\n aa=1 \n #print('no data')\n else:\n sl = float(data['data'][i]['v'])\n year = int(t[0:4]); mm = int(t[5:7]); dd = int(t[8:10])\n hh = int(t[11:13]); minutes = int(t[14:16])\n today = datetime.datetime(year, mm, dd)\n doy = (today - datetime.datetime(today.year, 1, 1)).days + 1\n m, f = g.mjd(year, mm, dd, hh, minutes, 0)\n mjd = m + f;\n tt.append(mjd)\n bigT = datetime.datetime(year=year, month=mm, day=dd, hour=hh, minute=minutes, second=0)\n obstimes.append(bigT)\n\n slevel.append(sl)\n fout.write(\" {0:4.0f} {1:2.0f} {2:2.0f} {3:2.0f} {4:2.0f} {5:7.3f} {6:3.0f} {7:15.6f} \\n\".format(year, mm, dd, hh, minutes, sl, doy, mjd))\n fout.close()\n print('NOAA tide gauge data written out to: ', outfile)\n\n if plt:\n quickp(station,obstimes,slevel)\n\n\ndef main():\n args = parse_arguments()\n download_tides(**args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"hejeff749/gnssrefl","sub_path":"gnssrefl/download_tides.py","file_name":"download_tides.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"9011934255","text":"import paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nfrom ppdet.modeling.initializer import constant_\nfrom paddle.nn.initializer import KaimingNormal\n\n\nclass ConvModule(nn.Layer):\n def __init__(self,\n in_channels,\n out_channels,\n kernel_size=1,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n bias=False,\n norm_type='bn',\n wtih_act=True):\n super(ConvModule, self).__init__()\n assert norm_type in ['bn', 'sync_bn', 'gn', None]\n self.with_norm = norm_type is not None\n self.wtih_act = wtih_act\n self.conv = nn.Conv2D(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias_attr=bias,\n weight_attr=KaimingNormal())\n if self.with_norm:\n if norm_type == 'bn':\n self.bn = nn.BatchNorm2D(out_channels)\n elif norm_type == 'gn':\n self.bn = nn.GroupNorm(out_channels, out_channels)\n\n if self.wtih_act:\n self.act = nn.ReLU()\n\n def forward(self, inputs):\n x = self.conv(inputs)\n if self.with_norm:\n x = self.bn(x)\n if self.wtih_act:\n x = self.act(x)\n return x\n\n\ndef LinearModule(hidden_dim):\n return nn.LayerList(\n [nn.Linear(\n hidden_dim, hidden_dim, bias_attr=True), nn.ReLU()])\n\n\nclass FeatureResize(nn.Layer):\n def __init__(self, size=(10, 25)):\n super(FeatureResize, self).__init__()\n self.size = size\n\n def forward(self, x):\n x = F.interpolate(x, self.size)\n return x.flatten(2)\n\n\nclass ROIGather(nn.Layer):\n '''\n ROIGather module for gather global information\n Args: \n in_channels: prior feature channels\n num_priors: prior numbers we predefined\n sample_points: the number of sampled points when we extract feature from line\n fc_hidden_dim: the fc output channel\n refine_layers: the total number of layers to build refine\n '''\n\n def __init__(self,\n in_channels,\n num_priors,\n sample_points,\n fc_hidden_dim,\n refine_layers,\n mid_channels=48):\n super(ROIGather, self).__init__()\n self.in_channels = in_channels\n self.num_priors = num_priors\n self.f_key = ConvModule(\n in_channels=self.in_channels,\n out_channels=self.in_channels,\n kernel_size=1,\n stride=1,\n padding=0,\n norm_type='bn')\n\n self.f_query = nn.Sequential(\n nn.Conv1D(\n in_channels=num_priors,\n out_channels=num_priors,\n kernel_size=1,\n stride=1,\n padding=0,\n groups=num_priors),\n nn.ReLU(), )\n self.f_value = nn.Conv2D(\n in_channels=self.in_channels,\n out_channels=self.in_channels,\n kernel_size=1,\n stride=1,\n padding=0)\n self.W = nn.Conv1D(\n in_channels=num_priors,\n out_channels=num_priors,\n kernel_size=1,\n stride=1,\n padding=0,\n groups=num_priors)\n\n self.resize = FeatureResize()\n constant_(self.W.weight, 0)\n constant_(self.W.bias, 0)\n\n self.convs = nn.LayerList()\n self.catconv = nn.LayerList()\n for i in range(refine_layers):\n self.convs.append(\n ConvModule(\n in_channels,\n mid_channels, (9, 1),\n padding=(4, 0),\n bias=False,\n norm_type='bn'))\n\n self.catconv.append(\n ConvModule(\n mid_channels * (i + 1),\n in_channels, (9, 1),\n padding=(4, 0),\n bias=False,\n norm_type='bn'))\n\n self.fc = nn.Linear(\n sample_points * fc_hidden_dim, fc_hidden_dim, bias_attr=True)\n\n self.fc_norm = nn.LayerNorm(fc_hidden_dim)\n\n def roi_fea(self, x, layer_index):\n feats = []\n for i, feature in enumerate(x):\n feat_trans = self.convs[i](feature)\n feats.append(feat_trans)\n cat_feat = paddle.concat(feats, axis=1)\n cat_feat = self.catconv[layer_index](cat_feat)\n return cat_feat\n\n def forward(self, roi_features, x, layer_index):\n '''\n Args:\n roi_features: prior feature, shape: (Batch * num_priors, prior_feat_channel, sample_point, 1)\n x: feature map\n layer_index: currently on which layer to refine\n Return: \n roi: prior features with gathered global information, shape: (Batch, num_priors, fc_hidden_dim)\n '''\n\n roi = self.roi_fea(roi_features, layer_index)\n # return roi\n # print(roi.shape)\n # return roi\n bs = x.shape[0]\n # print(bs)\n #roi = roi.contiguous().view(bs * self.num_priors, -1)\n roi = roi.reshape([bs * self.num_priors, -1])\n # roi = paddle.randn([192,2304])\n # return roi\n # print(roi)\n # print(self.fc)\n # print(self.fc.weight)\n roi = self.fc(roi)\n roi = F.relu(self.fc_norm(roi))\n # return roi\n #roi = roi.view(bs, self.num_priors, -1)\n roi = roi.reshape([bs, self.num_priors, -1])\n query = roi\n\n value = self.resize(self.f_value(x)) # (B, C, N) global feature\n query = self.f_query(\n query) # (B, N, 1) sample context feature from prior roi\n key = self.f_key(x)\n value = value.transpose(perm=[0, 2, 1])\n key = self.resize(key) # (B, C, N) global feature\n sim_map = paddle.matmul(query, key)\n sim_map = (self.in_channels**-.5) * sim_map\n sim_map = F.softmax(sim_map, axis=-1)\n\n context = paddle.matmul(sim_map, value)\n context = self.W(context)\n\n roi = roi + F.dropout(context, p=0.1, training=self.training)\n\n return roi\n\n\nclass SegDecoder(nn.Layer):\n '''\n Optionaly seg decoder\n '''\n\n def __init__(self,\n image_height,\n image_width,\n num_class,\n prior_feat_channels=64,\n refine_layers=3):\n super().__init__()\n self.dropout = nn.Dropout2D(0.1)\n self.conv = nn.Conv2D(prior_feat_channels * refine_layers, num_class, 1)\n self.image_height = image_height\n self.image_width = image_width\n\n def forward(self, x):\n x = self.dropout(x)\n x = self.conv(x)\n x = F.interpolate(\n x,\n size=[self.image_height, self.image_width],\n mode='bilinear',\n align_corners=False)\n return x\n\n\nimport paddle.nn as nn\n\n\ndef accuracy(pred, target, topk=1, thresh=None):\n \"\"\"Calculate accuracy according to the prediction and target.\n\n Args:\n pred (torch.Tensor): The model prediction, shape (N, num_class)\n target (torch.Tensor): The target of each prediction, shape (N, )\n topk (int | tuple[int], optional): If the predictions in ``topk``\n matches the target, the predictions will be regarded as\n correct ones. Defaults to 1.\n thresh (float, optional): If not None, predictions with scores under\n this threshold are considered incorrect. Default to None.\n\n Returns:\n float | tuple[float]: If the input ``topk`` is a single integer,\n the function will return a single float as accuracy. If\n ``topk`` is a tuple containing multiple integers, the\n function will return a tuple containing accuracies of\n each ``topk`` number.\n \"\"\"\n assert isinstance(topk, (int, tuple))\n if isinstance(topk, int):\n topk = (topk, )\n return_single = True\n else:\n return_single = False\n\n maxk = max(topk)\n if pred.shape[0] == 0:\n accu = [pred.new_tensor(0.) for i in range(len(topk))]\n return accu[0] if return_single else accu\n assert pred.ndim == 2 and target.ndim == 1\n assert pred.shape[0] == target.shape[0]\n assert maxk <= pred.shape[1], \\\n f'maxk {maxk} exceeds pred dimension {pred.shape[1]}'\n pred_value, pred_label = pred.topk(maxk, axis=1)\n pred_label = pred_label.t() # transpose to shape (maxk, N)\n correct = pred_label.equal(target.reshape([1, -1]).expand_as(pred_label))\n if thresh is not None:\n # Only prediction values larger than thresh are counted as correct\n correct = correct & (pred_value > thresh).t()\n res = []\n for k in topk:\n correct_k = correct[:k].reshape([-1]).cast(\"float32\").sum(0,\n keepdim=True)\n correct_k = correct_k * (100.0 / pred.shape[0])\n res.append(correct_k)\n return res[0] if return_single else res\n\n\nclass Accuracy(nn.Layer):\n def __init__(self, topk=(1, ), thresh=None):\n \"\"\"Module to calculate the accuracy.\n\n Args:\n topk (tuple, optional): The criterion used to calculate the\n accuracy. Defaults to (1,).\n thresh (float, optional): If not None, predictions with scores\n under this threshold are considered incorrect. Default to None.\n \"\"\"\n super().__init__()\n self.topk = topk\n self.thresh = thresh\n\n def forward(self, pred, target):\n \"\"\"Forward function to calculate accuracy.\n\n Args:\n pred (torch.Tensor): Prediction of models.\n target (torch.Tensor): Target for each prediction.\n\n Returns:\n tuple[float]: The accuracies under different topk criterions.\n \"\"\"\n return accuracy(pred, target, self.topk, self.thresh)\n","repo_name":"swx3027925806/PaddleDetection-OOD-Det","sub_path":"ppdet/modeling/clrnet_utils.py","file_name":"clrnet_utils.py","file_ext":"py","file_size_in_byte":10125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"23144209796","text":"from dataclasses import dataclass, field\n\n\n@dataclass(order=True, frozen=True)\nclass Person:\n sort_index: int = field(init=False, repr=False)\n name: str\n job: str\n age: int\n strength: int = 100\n\n def __post_init__(self):\n object.__setattr__(self, 'sort_index', self.strength)\n # setattr(self, 'sort_index', self.strength) # ERROR\n\n def __str__(self):\n return f'{self.name}, {self.job} ({self.age})'\n\n\nif __name__ == '__main__':\n person1 = Person(\"Geralt\", \"Witcher\", 30, 99)\n person2 = Person(\"Yennefer\", \"Sorceress\", 25)\n person3 = Person(\"Yennefer\", \"Sorceress\", 25)\n\n # setattr(person1, 'name', 'Some name') # error, \"dataclasses.FrozenInstanceError: cannot assign to field 'name'\"\n print(person1)\n print(id(person2))\n print(id(person3))\n print(person3 == person2)\n print(person1 > person2)\n","repo_name":"aviz92/PythonCourses","sub_path":"PythonCourses/BetterPython/N11_data_classes/dataclasses-after.py","file_name":"dataclasses-after.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20322558421","text":"import gi\ngi.require_version('ZBar', '1.0')\ngi.require_version(\"Gtk\",\"3.0\")\nfrom gi.repository import Gtk, GdkPixbuf, ZBar\n \nclass MainWindow(Gtk.Window):\n def __init__(self):\n Gtk.Window.__init__(self)\n \n main_vbox = Gtk.VBox()\n self.add(main_vbox)\n \n label = Gtk.Label()\n main_vbox.pack_start(label,False,False,0)\n \n button = Gtk.Button()\n button.props.label = \"Read Barcode\"\n main_vbox.pack_start(button,False,False,0)\n \n \n self.video_device = \"/dev/video0\"\n zbarw = ZBar.Gtk()\n main_vbox.pack_start(zbarw,False,False,0)\n \n zbarw.request_video_size(600,400)\n\n zbarw.connect(\"decoded\",self.on_decodedd,button,label)\n button.connect(\"clicked\",self.on_button_clicked,zbarw,label)\n \n def on_button_clicked(self,button,zbarw,label):\n label.props.label = \"\"\n if zbarw.get_video_device() == self.video_device:\n zbarw.set_video_device(\"\")\n zbarw.set_video_enabled(False)\n button.props.label = \"Start Read Barcode\"\n else:\n zbarw.set_video_device(self.video_device)\n zbarw.set_video_enabled(True)\n button.props.label = \"Stop Read Barcode\"\n\n \n def on_decodedd(self,zbarw,type_,data,button,label):\n print(type_)\n print(data)\n zbarw.set_video_device(\"\")\n zbarw.set_video_enabled(False)\n button.props.label = \"Start Read Barcode\"\n label.props.label = str(type_) + \" : \" + str(data)\n\n \n \n \nmw = MainWindow()\nmw.connect(\"delete-event\",Gtk.main_quit)\nmw.show_all()\nGtk.main()\n","repo_name":"yucefsourani/MyScripts","sub_path":"zbargtk3_0.py","file_name":"zbargtk3_0.py","file_ext":"py","file_size_in_byte":1691,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"25872482971","text":"N,M = map(int,input().split())\n\narr = [list(map(int,list(input()))) for _ in range(N)]\nresult = 0\nmax_size = min(N,M)\n\nfor x in range(N):\n for y in range(M):\n if result == max_size:\n break\n if arr[x][y] == 1:\n for sizes in range(result,max_size+1):\n temp = 0\n for dx in range(sizes):\n for dy in range(sizes):\n if 0<=x+dx str:\n z = f'В диапазоне от {a} до {b} получили {randint(a, b)}'\n return z\n\n\nresult = func(1, 6)","repo_name":"ValeryBurlakov/python_based","sub_path":"lectures/lection_6/super_module.py","file_name":"super_module.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42523160640","text":"# Having a function for getting input makes it easier to get more input.\n# But at a certain point, it becomes a pain...\n# Is there anything else that could be made into a function?\n\n\ndef get_valid_input():\n valid_input = False\n while not valid_input:\n num1 = input(\"Spell out a number with letters: \")\n if num1 in number_dict:\n return num1\n else:\n print(f\"{num1} is not supported. Try entering a different number: \")\n\n\nnumber_dict = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}\n\nnum1 = get_valid_input()\nnum2 = get_valid_input()\nnum3 = get_valid_input()\nnum4 = get_valid_input()\n\nnum1_int = number_dict[num1]\nnum2_int = number_dict[num2]\nnum3_int = number_dict[num3]\nnum4_int = number_dict[num4]\n\nnumber_sum = num1_int + num2_int + num3_int + num4_int\n\nprint(f\"The sum is {number_sum}\")","repo_name":"PdxCodeGuild/class_mudpuppy","sub_path":"1 Python/class_demos/function-lecture/add-number-words-extended.py","file_name":"add-number-words-extended.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"28741949076","text":"import asyncio\nimport contextlib\nimport itertools\nimport logging\nimport pathlib\nimport shutil\nimport subprocess\nimport typing\nimport unittest\nimport warnings\nfrom collections.abc import AsyncGenerator, Iterator, Sequence\n\nimport numpy as np\nimport pytest\nfrom lsst.ts import salobj, utils\n\n# Long enough to perform any reasonable operation\n# including starting a CSC or loading a script (seconds)\nSTD_TIMEOUT = 60\n# Timeout for when we expect no new data (seconds).\nNODATA_TIMEOUT = 0.5\n\nnp.random.seed(47)\n\nindex_gen = utils.index_generator()\nTEST_DATA_DIR = pathlib.Path(__file__).resolve().parent / \"data\"\nTEST_CONFIG_DIR = TEST_DATA_DIR / \"configs\" / \"good_no_site_file\"\n\n\ndef all_permutations(items: Sequence[typing.Any]) -> Iterator[typing.Any]:\n \"\"\"Return all permutations of a list of items and of all sublists,\n including [].\n \"\"\"\n for i in range(len(items) + 1):\n for val in itertools.permutations(items, r=i):\n yield val\n\n\nclass FailInReportFaultCsc(salobj.TestCsc):\n \"\"\"A Test CSC that fails in report_summary_state when reporting fault.\n\n This CSC always starts up in the ENABLED state.\n\n Used to test for infinite loops and other issues when something\n goes wrong while going to the fault state.\n\n Parameters\n ----------\n index : `int`\n SAL index.\n doraise : `bool`\n Raise in report_summary_state?\n If false then call fault instead.\n report_first : `bool`\n Call super().report_summary_state first in report_summary_state?\n If false then call it last.\n \"\"\"\n\n def __init__(self, index: int, doraise: bool, report_first: bool) -> None:\n super().__init__(index=index, initial_state=salobj.State.ENABLED)\n self.doraise = doraise\n self.report_first = report_first\n\n async def _report_summary_state(self) -> None:\n if self.report_first:\n await super()._report_summary_state()\n if self.summary_state == salobj.State.FAULT:\n if self.doraise:\n raise RuntimeError(\n \"Intentionally raise an exception when going to the FAULT state\"\n )\n else:\n await self.fault(\n code=10934,\n report=\"a report that will be ignored\",\n traceback=\"a traceback that will be ignored\",\n )\n if not self.report_first:\n await super()._report_summary_state()\n\n\nclass CommunicateTestCase(salobj.BaseCscTestCase, unittest.IsolatedAsyncioTestCase):\n def basic_make_csc(\n self,\n initial_state: salobj.State | int,\n config_dir: str | pathlib.Path | None,\n simulation_mode: int,\n ) -> salobj.BaseCsc:\n return salobj.TestCsc(\n self.next_index(),\n initial_state=initial_state,\n config_dir=config_dir,\n simulation_mode=simulation_mode,\n )\n\n @contextlib.asynccontextmanager\n async def make_remote(self, identity: str) -> AsyncGenerator[salobj.Remote, None]:\n \"\"\"Create a remote to talk to self.csc with a specified identity.\n\n Uses the domain created by make_csc.\n\n Parameters\n ----------\n identity : `str`\n Identity for remote.\n\n Notes\n -----\n Adds a logging.StreamHandler if one is not already present.\n \"\"\"\n domain = self.csc.domain\n original_default_identity = domain.default_identity\n try:\n domain.default_identity = identity\n remote = salobj.Remote(\n domain=domain,\n name=self.csc.salinfo.name,\n index=self.csc.salinfo.index,\n )\n finally:\n domain.default_identity = original_default_identity\n assert remote.salinfo.identity == identity\n try:\n await remote.start_task\n yield remote\n finally:\n await remote.close()\n\n async def test_authorization(self) -> None:\n \"\"\"Test authorization.\n\n For simplicity this test calls setAuthList without a +/- prefix.\n The prefix is tested elsewhere.\n \"\"\"\n # TODO DM-36605 remove use of utils.modify_environ\n # once authlist support is enabled by default\n with utils.modify_environ(LSST_DDS_ENABLE_AUTHLIST=\"1\"):\n async with self.make_csc(initial_state=salobj.State.ENABLED):\n await self.assert_next_sample(\n self.remote.evt_authList,\n authorizedUsers=\"\",\n nonAuthorizedCSCs=\"\",\n )\n\n domain = self.csc.salinfo.domain\n\n # Note that self.csc and self.remote have the same user_host.\n csc_user_host = domain.user_host\n\n # Make a remote that pretends to be from a different CSC\n # and test non-authorized CSCs\n other_name_index = \"Script:5\"\n async with self.make_remote(\n identity=other_name_index\n ) as other_csc_remote:\n all_csc_names = [\"ATDome\", \"Hexapod:1\", other_name_index]\n for csc_names in all_permutations(all_csc_names):\n csc_names_str = \", \".join(csc_names)\n with self.subTest(csc_names_str=csc_names_str):\n await self.remote.cmd_setAuthList.set_start(\n nonAuthorizedCSCs=csc_names_str, timeout=STD_TIMEOUT\n )\n await self.assert_next_sample(\n self.remote.evt_authList,\n authorizedUsers=\"\",\n nonAuthorizedCSCs=\", \".join(sorted(csc_names)),\n )\n if other_name_index in csc_names:\n # A blocked CSC; this should fail.\n with salobj.assertRaisesAckError(\n ack=salobj.SalRetCode.CMD_NOPERM\n ):\n await other_csc_remote.cmd_wait.set_start(\n duration=0, timeout=STD_TIMEOUT\n )\n else:\n # Not a blocked CSC; this should work.\n await other_csc_remote.cmd_wait.set_start(\n duration=0, timeout=STD_TIMEOUT\n )\n\n # My user_host should work regardless of\n # non-authorized CSCs.\n await self.remote.cmd_wait.set_start(\n duration=0, timeout=STD_TIMEOUT\n )\n\n # Disabling authorization should always work\n self.csc.cmd_wait.authorize = False\n try:\n await other_csc_remote.cmd_wait.set_start(\n duration=0, timeout=STD_TIMEOUT\n )\n finally:\n self.csc.cmd_wait.authorize = True\n\n # Test authorized users that are not me.\n # Reported auth users should always be in alphabetical order;\n # test this by sending users NOT in alphabetical order.\n all_other_user_hosts = [f\"notme{i}{csc_user_host}\" for i in (3, 2, 1)]\n other_user_host = all_other_user_hosts[1]\n\n async with self.make_remote(\n identity=other_user_host\n ) as other_user_remote:\n for auth_user_hosts in all_permutations(all_other_user_hosts):\n users_str = \", \".join(auth_user_hosts)\n with self.subTest(users_str=users_str):\n await self.remote.cmd_setAuthList.set_start(\n authorizedUsers=users_str,\n nonAuthorizedCSCs=\"\",\n timeout=STD_TIMEOUT,\n )\n await self.assert_next_sample(\n self.remote.evt_authList,\n authorizedUsers=\", \".join(sorted(auth_user_hosts)),\n nonAuthorizedCSCs=\"\",\n )\n if other_user_host in auth_user_hosts:\n # An allowed user; this should work.\n await other_user_remote.cmd_wait.set_start(\n duration=0, timeout=STD_TIMEOUT\n )\n else:\n # Not an allowed user; this should fail.\n with salobj.assertRaisesAckError(\n ack=salobj.SalRetCode.CMD_NOPERM\n ):\n await other_user_remote.cmd_wait.set_start(\n duration=0, timeout=STD_TIMEOUT\n )\n\n # Temporarily disable authorization and try again;\n # this should always work.\n self.csc.cmd_wait.authorize = False\n try:\n await other_user_remote.cmd_wait.set_start(\n duration=0, timeout=STD_TIMEOUT\n )\n finally:\n self.csc.cmd_wait.authorize = True\n\n # My user_host should work regardless of\n # authorized users.\n self.remote.salinfo.domain.identity = csc_user_host\n await self.remote.cmd_wait.set_start(\n duration=0, timeout=STD_TIMEOUT\n )\n\n async def test_duplicate_rejection(self) -> None:\n async with self.make_csc(initial_state=salobj.State.STANDBY):\n assert not self.csc.check_if_duplicate\n\n duplicate_csc = salobj.TestCsc(\n index=self.csc.salinfo.index, check_if_duplicate=True\n )\n try:\n # Change origin so heartbeat private_origin differs.\n duplicate_csc.salinfo.domain.origin += 1\n assert duplicate_csc.check_if_duplicate\n with pytest.raises(\n salobj.ExpectedError, match=\"found another instance\"\n ):\n await asyncio.wait_for(duplicate_csc.done_task, timeout=STD_TIMEOUT)\n finally:\n await duplicate_csc.close()\n\n async def test_set_auth_list_prefix(self) -> None:\n \"\"\"Test the setAuthList command with a +/- prefix\"\"\"\n async with self.make_csc(initial_state=salobj.State.ENABLED):\n await self.assert_next_sample(\n self.remote.evt_authList,\n authorizedUsers=\"\",\n nonAuthorizedCSCs=\"\",\n )\n\n all_csc_names = [\"Test:5\", \"ATDome:0\", \"ATDome\"]\n expected_cscs_set = set()\n # Test adding non-authorized CSCs.\n # Test that a trailing :0 is stripped (so, in this test\n # setAuthList treats ATDome:0 and ATDome as the same CSC).\n # Reported non-auth CSCs should always be in alphabetical order;\n # test this by sending CSC names NOT in alphabetical order.\n for i in range(len(all_csc_names)):\n csc_names = all_csc_names[:i]\n # Compute a variant of csc_names with trailing :0 stripped\n # and use that for the expected output from the authList event.\n nonzero_csc_names = [\n csc[:-2] if csc.endswith(\":0\") else csc for csc in csc_names\n ]\n expected_cscs_set |= set(nonzero_csc_names)\n add_some_str = \"+ \" + \", \".join(csc_names)\n await self.remote.cmd_setAuthList.set_start(\n nonAuthorizedCSCs=add_some_str, timeout=STD_TIMEOUT\n )\n await self.assert_next_sample(\n self.remote.evt_authList,\n authorizedUsers=\"\",\n nonAuthorizedCSCs=\", \".join(sorted(expected_cscs_set)),\n )\n\n # Removing CSCs should have the expected effect\n for i in range(len(all_csc_names)):\n csc_names = all_csc_names[:i]\n # Compute a variant of csc_names with trailing :0 stripped\n # and use that for the expected output from the authList event.\n nonzero_csc_names = [\n csc[:-2] if csc.endswith(\":0\") else csc for csc in csc_names\n ]\n expected_cscs_set -= set(nonzero_csc_names)\n remove_some_str = \"- \" + \", \".join(csc_names)\n await self.remote.cmd_setAuthList.set_start(\n nonAuthorizedCSCs=remove_some_str, timeout=STD_TIMEOUT\n )\n await self.assert_next_sample(\n self.remote.evt_authList,\n authorizedUsers=\"\",\n nonAuthorizedCSCs=\", \".join(sorted(expected_cscs_set)),\n )\n\n # Test setting authorized users.\n # Test that user = csc_user_host (the CSC's user_host) is ignored.\n # Reported auth users should always be in alphabetical order;\n # test this by sending users NOT in alphabetical order.\n csc_user_host = salobj.get_user_host()\n all_users_hosts = [csc_user_host] + [\n f\"notme{i}{csc_user_host}\" for i in (3, 2, 1)\n ]\n expected_user_set = set()\n # Test adding authorized users; only users other than \"me\"\n # are actually added.\n for i in range(len(all_users_hosts)):\n user_names = all_users_hosts[:i]\n expected_user_set |= set(user_names)\n expected_user_set -= {csc_user_host}\n add_some_str = \"+ \" + \", \".join(user_names)\n await self.remote.cmd_setAuthList.set_start(\n authorizedUsers=add_some_str, timeout=STD_TIMEOUT\n )\n await self.assert_next_sample(\n self.remote.evt_authList,\n authorizedUsers=\", \".join(sorted(expected_user_set)),\n nonAuthorizedCSCs=\"\",\n )\n\n # Removing authorized users should have the expected effect\n for i in range(len(all_users_hosts)):\n user_names = all_users_hosts[:i]\n expected_user_set -= set(user_names)\n remove_some_str = \"- \" + \", \".join(user_names)\n await self.remote.cmd_setAuthList.set_start(\n authorizedUsers=remove_some_str, timeout=STD_TIMEOUT\n )\n await self.assert_next_sample(\n self.remote.evt_authList,\n authorizedUsers=\", \".join(sorted(expected_user_set)),\n nonAuthorizedCSCs=\"\",\n )\n\n async def test_heartbeat(self) -> None:\n async with self.make_csc(initial_state=salobj.State.STANDBY):\n self.csc.heartbeat_interval = 0.1\n timeout = self.csc.heartbeat_interval * 5\n await self.remote.evt_heartbeat.next(flush=True, timeout=STD_TIMEOUT)\n await self.remote.evt_heartbeat.next(flush=True, timeout=timeout)\n await self.remote.evt_heartbeat.next(flush=True, timeout=timeout)\n await self.remote.evt_heartbeat.next(flush=True, timeout=timeout)\n\n async def test_bin_script_run(self) -> None:\n \"\"\"Test running the Test CSC from the bin script.\n\n Note that the bin script calls class method ``amain``.\n \"\"\"\n for initial_state, override in (\n (None, None),\n (salobj.State.STANDBY, None),\n (salobj.State.DISABLED, \"all_fields.yaml\"),\n (salobj.State.ENABLED, \"\"),\n ):\n index = self.next_index()\n with self.subTest(initial_state=initial_state, override=override):\n await self.check_bin_script(\n name=\"Test\",\n index=index,\n exe_name=\"run_test_csc\",\n initial_state=initial_state,\n override=override,\n )\n\n async def test_bin_script_duplicate(self) -> None:\n index = next(index_gen)\n exe_name = \"run_test_csc\"\n exe_path = shutil.which(exe_name)\n if exe_path is None:\n raise AssertionError(\n f\"Could not find bin script {exe_name}; did you setup or install this package?\"\n )\n\n args = [exe_name, str(index), \"--state\", \"standby\"]\n\n async with salobj.Domain() as domain, salobj.Remote(\n domain=domain,\n name=\"Test\",\n index=index,\n include=[\"summaryState\"],\n ) as self.remote:\n process1 = await asyncio.create_subprocess_exec(\n *args,\n stderr=subprocess.PIPE,\n )\n try:\n await self.assert_next_summary_state(\n salobj.State.STANDBY, timeout=STD_TIMEOUT\n )\n # Start a duplicate CSC and wait for it to quit early.\n process2 = await asyncio.create_subprocess_exec(\n *args,\n stderr=subprocess.PIPE,\n )\n try:\n await asyncio.wait_for(process2.wait(), timeout=STD_TIMEOUT)\n assert process2.returncode is not None\n assert process2.returncode > 0\n assert process2.stderr is not None # make mypy happy\n try:\n errbytes = await asyncio.wait_for(\n process2.stderr.read(), timeout=STD_TIMEOUT\n )\n assert b\"found another instance\" in errbytes\n except asyncio.TimeoutError:\n raise AssertionError(\"timed out trying to read process2 stderr\")\n except asyncio.TimeoutError:\n process2.terminate()\n await asyncio.wait_for(process2.wait(), timeout=STD_TIMEOUT)\n raise AssertionError(\"CSC 2 did not die in time.\")\n finally:\n if process1.returncode is None:\n process1.terminate()\n await asyncio.wait_for(process1.wait(), timeout=STD_TIMEOUT)\n else:\n # CSC 1 quit early; try to print stderr, then fail.\n try:\n assert process1.stderr is not None # make mypy happy\n errbytes = await asyncio.wait_for(\n process1.stderr.read(), timeout=STD_TIMEOUT\n )\n print(\"Subprocess stderr: \", errbytes.decode())\n except Exception as e:\n print(f\"Could not read subprocess stderr: {e}\")\n raise AssertionError(\"CSC 1 process terminated early\")\n\n async def test_bin_script_version(self) -> None:\n \"\"\"Test running the Test CSC from the bin script.\n\n Note that the bin script calls class method ``amain``.\n \"\"\"\n index = self.next_index()\n exec_path = pathlib.Path(__file__).parents[1] / \"bin\" / \"run_test_csc\"\n\n process = await asyncio.create_subprocess_exec(\n str(exec_path),\n str(index),\n \"--version\",\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n try:\n stdout, stderr = await asyncio.wait_for(\n process.communicate(), timeout=STD_TIMEOUT\n )\n assert stdout.decode()[:-1] == salobj.__version__\n await asyncio.wait_for(process.wait(), timeout=STD_TIMEOUT)\n assert process.returncode == 0\n finally:\n if process.returncode is None:\n process.terminate()\n warnings.warn(\n \"Killed a process that was not properly terminated\", RuntimeWarning\n )\n\n async def test_log_level(self) -> None:\n \"\"\"Test that specifying a log level to make_csc works.\"\"\"\n # If specified then log level is the value given.\n async with self.make_csc(\n initial_state=salobj.State.STANDBY, log_level=logging.DEBUG\n ):\n assert self.csc.log.getEffectiveLevel() == logging.DEBUG\n\n async with self.make_csc(\n initial_state=salobj.State.STANDBY, log_level=logging.WARNING\n ):\n assert self.csc.log.getEffectiveLevel() == logging.WARNING\n\n # At this point log level is WARNING; now check that by default\n # log verbosity is increased (log level decreased) to INFO.\n async with self.make_csc(initial_state=salobj.State.STANDBY):\n assert self.csc.log.getEffectiveLevel() == logging.INFO\n\n async def test_setArrays_command(self) -> None:\n async with self.make_csc(initial_state=salobj.State.ENABLED):\n # until the controller gets its first setArrays\n # it will not send any arrays events or telemetry\n assert not self.csc.evt_arrays.has_data\n assert not self.csc.tel_arrays.has_data\n assert not self.remote.evt_arrays.has_data\n assert not self.remote.tel_arrays.has_data\n assert self.remote.evt_arrays.get() is None\n assert self.remote.tel_arrays.get() is None\n\n # send the setArrays command with random data\n arrays_dict = self.csc.make_random_arrays_dict()\n await self.remote.cmd_setArrays.set_start(\n **arrays_dict, timeout=STD_TIMEOUT\n )\n cmd_data_sent = self.remote.cmd_setArrays.data\n\n # see if new data was broadcast correctly\n evt_data = await self.remote.evt_arrays.next(\n flush=False, timeout=STD_TIMEOUT\n )\n self.csc.assert_arrays_equal(cmd_data_sent, evt_data)\n tel_data = await self.remote.tel_arrays.next(\n flush=False, timeout=STD_TIMEOUT\n )\n self.csc.assert_arrays_equal(cmd_data_sent, tel_data)\n\n assert self.csc.evt_arrays.has_data\n assert self.csc.tel_arrays.has_data\n assert self.remote.evt_arrays.has_data\n assert self.remote.tel_arrays.has_data\n\n # also test get\n self.csc.assert_arrays_equal(cmd_data_sent, self.remote.tel_arrays.get())\n self.csc.assert_arrays_equal(cmd_data_sent, self.remote.evt_arrays.get())\n\n async def test_setScalars_command(self) -> None:\n async with self.make_csc(initial_state=salobj.State.ENABLED):\n # until the controller gets its first setArrays\n # it will not send any arrays events or telemetry\n assert not self.csc.evt_scalars.has_data\n assert not self.csc.tel_scalars.has_data\n assert not self.remote.evt_scalars.has_data\n assert not self.remote.tel_scalars.has_data\n assert self.remote.evt_scalars.get() is None\n assert self.remote.tel_scalars.get() is None\n\n # send the setScalars command with random data\n scalars_dict = self.csc.make_random_scalars_dict()\n await self.remote.cmd_setScalars.set_start(\n **scalars_dict, timeout=STD_TIMEOUT\n )\n cmd_data_sent = self.remote.cmd_setScalars.data\n\n # see if new data is being broadcast correctly\n evt_data = await self.remote.evt_scalars.next(\n flush=False, timeout=STD_TIMEOUT\n )\n self.csc.assert_scalars_equal(cmd_data_sent, evt_data)\n tel_data = await self.remote.tel_scalars.next(\n flush=False, timeout=STD_TIMEOUT\n )\n self.csc.assert_scalars_equal(cmd_data_sent, tel_data)\n\n assert self.csc.evt_scalars.has_data\n assert self.csc.tel_scalars.has_data\n assert self.remote.evt_scalars.has_data\n assert self.remote.tel_scalars.has_data\n\n # also test get\n self.csc.assert_scalars_equal(cmd_data_sent, self.remote.tel_scalars.get())\n self.csc.assert_scalars_equal(cmd_data_sent, self.remote.evt_scalars.get())\n\n async def test_fault_state_transitions(self) -> None:\n \"\"\"Test CSC state transitions into fault and out again.\n\n Going into the fault state is done via the ``fault`` command.\n \"\"\"\n for initial_state in salobj.State:\n if initial_state == salobj.State.OFFLINE:\n # Not a valid initial state\n continue\n if initial_state == salobj.State.FAULT:\n # The ``fault`` command does nothing if TestCsc\n # is already in the FAULT state\n continue\n with self.subTest(initial_state=initial_state):\n async with self.make_csc(initial_state=initial_state):\n await self.assert_next_summary_state(initial_state)\n await self.assert_next_sample(\n topic=self.remote.evt_errorCode, errorCode=0, errorReport=\"\"\n )\n\n # Issue the ``fault`` command\n # and check the state and error code.\n await self.remote.cmd_fault.start(timeout=STD_TIMEOUT)\n await self.assert_next_summary_state(salobj.State.FAULT)\n await self.assert_next_sample(\n topic=self.remote.evt_errorCode, errorCode=1\n )\n\n # Issue the ``standby`` command to recover.\n await self.remote.cmd_standby.start(timeout=STD_TIMEOUT)\n await self.assert_next_summary_state(salobj.State.STANDBY)\n await self.assert_next_sample(\n topic=self.remote.evt_errorCode, errorCode=0, errorReport=\"\"\n )\n\n async def test_fault_method(self) -> None:\n \"\"\"Test BaseCsc.fault with and without optional arguments.\"\"\"\n async with self.make_csc(initial_state=salobj.State.STANDBY):\n await self.assert_next_summary_state(salobj.State.STANDBY)\n await self.assert_next_sample(\n topic=self.remote.evt_errorCode, errorCode=0, errorReport=\"\"\n )\n\n code = 52\n report = \"Report for error code\"\n traceback = \"Traceback for error code\"\n\n # if an invalid code is specified then errorCode is not output\n # but the CSC stil goes into a FAULT state\n await self.csc.fault(code=\"not a valid code\", report=report)\n await self.assert_next_summary_state(salobj.State.FAULT)\n with pytest.raises(asyncio.TimeoutError):\n await self.remote.evt_errorCode.next(\n flush=False, timeout=NODATA_TIMEOUT\n )\n\n await self.remote.cmd_standby.start(timeout=STD_TIMEOUT)\n await self.assert_next_sample(\n topic=self.remote.evt_errorCode, errorCode=0, errorReport=\"\"\n )\n await self.assert_next_summary_state(salobj.State.STANDBY)\n\n # if code is specified then errorReport is output;\n # first test with report and traceback specified,\n # then without, to make sure those values are not cached\n await self.csc.fault(code=code, report=report, traceback=traceback)\n await self.assert_next_summary_state(salobj.State.FAULT)\n await self.assert_next_sample(\n topic=self.remote.evt_errorCode,\n errorCode=code,\n errorReport=report,\n traceback=traceback,\n )\n\n # Try a disallowed command and check that the error report\n # is part of the traceback.\n with salobj.assertRaisesAckError(result_contains=report):\n await self.remote.cmd_wait.set_start(duration=5, timeout=STD_TIMEOUT)\n\n await self.remote.cmd_standby.start(timeout=STD_TIMEOUT)\n await self.assert_next_sample(\n topic=self.remote.evt_errorCode, errorCode=0, errorReport=\"\"\n )\n await self.assert_next_summary_state(salobj.State.STANDBY)\n\n await self.csc.fault(code=code, report=\"\")\n await self.assert_next_summary_state(salobj.State.FAULT)\n await self.assert_next_sample(\n topic=self.remote.evt_errorCode,\n errorCode=code,\n errorReport=\"\",\n traceback=\"\",\n )\n\n await self.remote.cmd_standby.start(timeout=STD_TIMEOUT)\n await self.assert_next_sample(\n topic=self.remote.evt_errorCode, errorCode=0, errorReport=\"\"\n )\n await self.remote.cmd_exitControl.start(timeout=STD_TIMEOUT)\n\n async def test_fault_problems(self) -> None:\n \"\"\"Test BaseCsc.fault when report_summary_state misbehaves.\"\"\"\n for doraise, report_first in itertools.product((False, True), (False, True)):\n with self.subTest(doraise=doraise, report_first=report_first):\n index = self.next_index()\n async with FailInReportFaultCsc(\n index=index, doraise=doraise, report_first=report_first\n ) as csc, salobj.Remote(\n domain=csc.domain, name=\"Test\", index=index\n ) as remote:\n await self.assert_next_summary_state(\n salobj.State.ENABLED, remote=remote\n )\n await self.assert_next_sample(\n topic=remote.evt_errorCode, errorCode=0, errorReport=\"\"\n )\n\n code = 51\n report = \"Report for error code\"\n traceback = \"Traceback for error code\"\n await csc.fault(code=code, report=report, traceback=traceback)\n\n await self.assert_next_summary_state(\n salobj.State.FAULT, remote=remote\n )\n await self.assert_next_sample(\n topic=remote.evt_errorCode,\n errorCode=code,\n errorReport=report,\n traceback=traceback,\n )\n\n # make sure FAULT state and errorCode are only sent once\n with pytest.raises(asyncio.TimeoutError):\n await remote.evt_summaryState.next(\n flush=False, timeout=NODATA_TIMEOUT\n )\n with pytest.raises(asyncio.TimeoutError):\n await remote.evt_errorCode.next(\n flush=False, timeout=NODATA_TIMEOUT\n )\n\n async def test_make_csc_timeout(self) -> None:\n \"\"\"Test that setting the timeout argument to make_csc works.\"\"\"\n with pytest.raises(asyncio.TimeoutError):\n # Use such a short timeout that make_csc times out\n async with self.make_csc(initial_state=salobj.State.STANDBY, timeout=0):\n pass\n\n async def test_standard_state_transitions(self) -> None:\n \"\"\"Test standard CSC state transitions.\n\n The initial state is STANDBY.\n The standard commands and associated state transitions are:\n\n * start: STANDBY to DISABLED\n * enable: DISABLED to ENABLED\n\n * disable: ENABLED to DISABLED\n * standby: DISABLED or FAULT to STANDBY\n * exitControl: STANDBY to OFFLINE (quit)\n \"\"\"\n async with self.make_csc(\n initial_state=salobj.State.STANDBY, config_dir=TEST_CONFIG_DIR\n ):\n await self.check_standard_state_transitions(\n enabled_commands=(\"setArrays\", \"setScalars\", \"wait\"),\n skip_commands=(\"fault\",),\n override=\"all_fields.yaml\",\n )\n","repo_name":"lsst-ts/ts_salobj","sub_path":"tests/test_csc_communication.py","file_name":"test_csc_communication.py","file_ext":"py","file_size_in_byte":32772,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"35103866259","text":"import sys\nsys.path.append('../')\nimport os\nimport csv\nimport pandas as pd\n#from tqdm import tqdm\nfrom collections import defaultdict\nfrom nltk.tokenize import RegexpTokenizer\n\ntokenizer = RegexpTokenizer(r'\\w+')\n\ndef load_description(version='mimic3'):\n\n icd_des_dic = defaultdict(str)\n data_dir = os.path.join(os.pardir,'mimicdata')\n if version == 'mimic2':\n with open('%s/%s/MIMIC_ICD9_mapping'%(data_dir,version),'r') as f:\n r = csv.reader(f)\n for row in r:\n row = row[0].split(';')\n des = [t.lower() for t in tokenizer.tokenize(row[2]) if not t.isnumeric()]\n icd_des_dic[str(row[1])] = des\n else:\n diag_des = os.path.join(data_dir,'D_ICD_DIAGNOSES.csv')\n pro_des = os.path.join(data_dir,'D_ICD_PROCEDURES.csv')\n icd_des = os.path.join(data_dir, 'ICD9_descriptions')\n with open(diag_des,'r') as df:\n reader = csv.reader(df)\n next(reader) #header\n for row in reader:\n code = reformat(row[1],True)\n if code not in icd_des_dic.keys():\n des = [t.lower() for t in tokenizer.tokenize(row[3]) if not t.isnumeric()]\n icd_des_dic[code] = des\n #icd_des_dic[code] = ' '.join(des)\n\n with open(pro_des,'r') as pf:\n reader = csv.reader(pf)\n next(reader)\n for row in reader:\n code = reformat(row[1],False)\n if code not in icd_des_dic.keys():\n #des = row[-1]\n\n des = [t.lower() for t in tokenizer.tokenize(row[3]) if not t.isnumeric()]\n icd_des_dic[code] = des\n #icd_des_dic[code] = ' '.join(des)\n\n with open(icd_des,'r') as f:\n f.readline() #header\n all_lines = f.readlines()\n\n for line in all_lines:\n row = line.strip().split()\n code = row[0]\n if code not in icd_des_dic.keys():\n des = ' '.join(row[1:])\n des = [t.lower() for t in tokenizer.tokenize(des) if not t.isnumeric()]\n icd_des_dic[code] = des\n #icd_des_dic[code] = ' '.join(des)\n\n\n return icd_des_dic\n\n\ndef reformat(code, is_diag):\n\n code = ''.join(code.split('.'))\n if is_diag:\n if code.startswith('E'):\n if len(code) > 4:\n code = code[:4] + '.' + code[4:]\n else:\n if len(code) > 3:\n code = code[:3] + '.' + code[3:]\n else:\n code = code[:2] + '.' + code[2:]\n return code\n\n\n\n\n","repo_name":"BingChen-gdcz/Msc_project","sub_path":"dataprocess/load_descriptions.py","file_name":"load_descriptions.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73096532944","text":"# resolvido 100% \n\nfim = False\nwhile not fim:\n h = list(map(int, input().split()))\n if sum(h) == 0:\n fim = True\n else:\n if h[0] == h[2]: # mesma hora\n if h[1] == h[3]: # mesmo minuto\n tempo = 1440\n elif h[1] > h[3]: # passaram 23 horas e falta pouco pra completar um hora cheia\n tempo = 1440 - ((h[0]*60) + h[1]) + (h[2]*60 + h[3])\n else: \n tempo = h[1] - h[3] # passaram minutos\n \n elif h[0] > h[2]: # passou das 00:00\n if h[1] == h[3]: # mesmo minuto\n tempo = 1440 - (h[0] - h[2]) * 60\n elif h[1] > h[3]: # falta pouco pra completar uma hora cheia\n tempo = 1440 - ((h[0]*60) + h[1]) + (h[2]*60 + h[3])\n else:\n tempo = ((h[0]*60) + h[1]) - (1440 + (h[2]*60 + h[3])) # passaram minutos \n \n else: # passaram horas\n if h[1] == h[3]: # mesmo minuto\n tempo = (h[0] - h[2]) * 60\n elif h[1] > h[3]: # falta pouco pra completar uma hora cheia\n tempo = (h[0]*60 + h[1]) - (h[2]*60 + h[3])\n else:\n tempo = (h[0]*60 + h[1]) - (h[2]*60 + h[3]) # passaram minutos\n \n print(abs(tempo))\n\n\n'''\nCasos de teste:\n\nEntrada:\n1 5 3 5\n23 59 0 34\n21 33 21 10\n0 0 0 0\n\nSaída:\n120\n35\n1417\n\nEntrada:\n23 27 1 59 \n22 30 8 30\n21 00 8 00\n0 0 0 0\n\nSaída:\n152\n600\n660\n\n\n\n'''\n","repo_name":"felipexrn/programacao","sub_path":"python/lacos/1103_alarme.py","file_name":"1103_alarme.py","file_ext":"py","file_size_in_byte":1292,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4187723302","text":"from builtins import object\nfrom decimal import Decimal\nfrom lxml.etree import fromstring\nfrom zope.interface import implementer\nimport requests\n\nfrom ..interfaces import TaxRateException\nfrom ..interfaces import ITaxHandler\n\n\nWA_STATE_TAX_API_URL = 'http://dor.wa.gov/AddressRates.aspx'\n\n\n@implementer(ITaxHandler)\nclass WAStateTaxHandler(object):\n \"\"\"provide dynamically adjusted tax rate information\n\n Requires a shopping cart to construct. That cart is assumed to have\n \"ship_to\" and \"data\" parameters (both mappings).\n\n The tax rate will be fetched according to the address information\n present in the ship_to address.\n\n Uses the WA state tax rate service:\n\n http://dor.wa.gov/Content/FindTaxesAndRates/RetailSalesTax/DestinationBased/ClientInterface.aspx\n\n \"\"\"\n\n label = u'WA State Tax Handler'\n\n def get_tax_rates(self, cart):\n ship_to = cart.ship_to\n if ship_to.get('state') not in ('WA', 'Washington'):\n return {}\n\n rate = fetch_rate(\n ship_to.get('street', None),\n ship_to.get('city', None),\n ship_to.get('postal_code', None),\n )\n\n if not rate:\n return {}\n\n return {'WA State Sales Tax': rate}\n\n\ndef fetch_rate(street, city, postal_code):\n params = {\n 'city': city,\n 'zip': postal_code,\n 'addr': street,\n 'output': 'xml'\n }\n\n if not (params['city'] and params['zip'] and params['addr']):\n raise TaxRateException('Missing street, city, or postal_code')\n\n resp = requests.get(WA_STATE_TAX_API_URL, params=params)\n if not resp.ok:\n # report http errors but wrap in our exception to simplify catching\n msg = \"HTTP Error: {}: {}\".format(resp.status_code, resp.reason)\n raise TaxRateException(msg)\n\n result = fromstring(resp.content)\n\n if result.attrib['code'] not in ('0', '1', '2', '3'):\n # result of 4 or 5 means the API call failed for some reason.\n msg = \"Tax API call failed: {}\".format(\n result.attrib.get('debughint', 'No API Information Available')\n )\n raise TaxRateException(msg)\n\n rate = result.attrib['rate'] or '0'\n return Decimal(rate)\n","repo_name":"jazkarta/jazkarta.shop","sub_path":"jazkarta/shop/tax/wa_state.py","file_name":"wa_state.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"26718415484","text":"from helpers import *\nfrom skimage.morphology import flood\ncoords = [tuple(line_to_numbers(line)) for line in read_day(18).split('\\n')]\n\nsample_coords = [tuple(line_to_numbers(line)) for line in '''\n2,2,2\n1,2,2\n3,2,2\n2,1,2\n2,3,2\n2,2,1\n2,2,3\n2,2,4\n2,2,6\n1,2,5\n3,2,5\n2,1,5\n2,3,5\n'''.strip().split('\\n')]\n\ndef count_faces(coords: list[tuple[int,int,int]], count_gaps=True):\n\t# we pad to ensure the outer edges are fine\n\tfield = np.full((25,25,25), -1 if count_gaps else 0, dtype=np.int8)\n\t# indices = np.array(coords, dtype=np.int8).T + 1\n\t# field[*indices] = 1\n\tfor (x,y,z) in coords:\n\t\tfield[x+1,y+1,z+1] = 1\n\n\tif not count_gaps:\n\t\tfield[flood(field, (0,0,0), connectivity=1)] = -1\n\n\tsurface_area = np.logical_and(field[:-1,:,:] == 1, field[ 1:,:,:] == -1).sum()\n\tsurface_area += np.logical_and(field[ 1:,:,:] == 1, field[:-1,:,:] == -1).sum()\n\tsurface_area += np.logical_and(field[:,:-1,:] == 1, field[:, 1:,:] == -1).sum()\n\tsurface_area += np.logical_and(field[:, 1:,:] == 1, field[:,:-1,:] == -1).sum()\n\tsurface_area += np.logical_and(field[:,:,:-1] == 1, field[:,:, 1:] == -1).sum()\n\tsurface_area += np.logical_and(field[:,:, 1:] == 1, field[:,:,:-1] == -1).sum()\n\treturn surface_area\n\nprint(f'Part 1 (sample): {count_faces(sample_coords)}')\nprint(f'Part 1: {count_faces(coords)}')\n\n\nprint(f'Part 2 (sample): {count_faces(sample_coords, False)}')\nt0 = perf_counter_ns()\nprint(f'Part 2: {count_faces(coords, False)}')\nt1 = perf_counter_ns()\n\nfor i in range(1000):\n\tcount_faces(coords, False)\nt2 = perf_counter_ns()\nfor i in range(10000):\n\tcount_faces(coords, False)\nt3 = perf_counter_ns()\n\nprint(f'Part 2 x1: {(t1-t0)/1000_000:.3f}ms')\nprint(f'Part 2 x1000: {(t2-t1)/1000_000:.3f}ms')\nprint(f'Part 2 x10000: {(t3-t2)/1000_000:.3f}ms')\n","repo_name":"Birdulon/AdventOfCode","sub_path":"2022/day18.py","file_name":"day18.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42376106472","text":"if __name__ == \"__main__\":\n\n from pathlib import Path\n\n import pandas as pd\n\n from human_flow.data.bike_station import LOCATIONS\n\n DATA_DIR = Path(__file__).parents[1] / \"data-raw\" / \"raw\" / \"bike\"\n START = pd.Timestamp(\"2019-07-15 00:00:00\")\n END = pd.Timestamp(\"2019-09-16 00:00:00\")\n\n df1 = pd.read_csv(\n str(DATA_DIR / \"2019-07.csv\"), parse_dates=[\"Departure\", \"Return\"]\n )\n df2 = pd.read_csv(\n str(DATA_DIR / \"2019-08.csv\"), parse_dates=[\"Departure\", \"Return\"]\n )\n df3 = pd.read_csv(\n str(DATA_DIR / \"2019-09.csv\"), parse_dates=[\"Departure\", \"Return\"]\n )\n\n df = pd.concat([df1, df2, df3])\n df.columns = [\n \"departure_time\",\n \"return_time\",\n \"departure_station_id\",\n \"departure_station_name\",\n \"return_station_id\",\n \"return_station_name\",\n \"distance\",\n \"duration\",\n ]\n df = df.sort_values(by=[\"departure_time\"])\n df = df.dropna()\n df = df.loc[(df.departure_time >= START) & (df.return_time <= END)]\n\n no_location_ids = set(df.departure_station_id.unique()) - set(LOCATIONS)\n\n for id_ in no_location_ids:\n df = df[df.departure_station_id != id_]\n df = df[df.return_station_id != id_]\n\n df[\"start_lat\"] = df.apply(\n lambda row: LOCATIONS[row.departure_station_id][2], axis=1\n )\n df[\"start_lon\"] = df.apply(\n lambda row: LOCATIONS[row.departure_station_id][1], axis=1\n )\n df[\"end_lat\"] = df.apply(lambda row: LOCATIONS[row.return_station_id][2], axis=1)\n df[\"end_lon\"] = df.apply(lambda row: LOCATIONS[row.return_station_id][1], axis=1)\n\n RESULT_CSV = (\n Path(__file__).parents[1] / \"data\" / \"clean\" / \"bike\" / \"2019-07-09.csv\"\n )\n df.to_csv(RESULT_CSV, index=False)\n","repo_name":"jerry-git/city-dna","sub_path":"scripts/bike_data.py","file_name":"bike_data.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"23781548660","text":"import aiohttp_jinja2\nimport json\nfrom aiohttp import web, WSMsgType\n\n\n@aiohttp_jinja2.template('socket_list.html')\nasync def socket_list(request):\n \"\"\"\n Метод отображения формы и списка вопросов.\n \"\"\"\n\n return {'title': 'Список вопросов'}\n\n\nasync def websocket_handler(request):\n \"\"\"\n Метод обработки запросов websocket.\n На вход принимает строку, содержащую JSON.\n Парсит её. И отдаёт в ответ JSON, который\\n\n на стороне клиента считывается.\n \"\"\"\n clients = {}\n ws = web.WebSocketResponse()\n await ws.prepare(request)\n clients[ws] = 0\n\n async for msg in ws:\n if msg.type == WSMsgType.TEXT:\n json_object = json.loads(msg.data)\n \n if json_object['text'] == 'close':\n await ws.close()\n else:\n json_object['text'] = f'{json_object[\"text\"]}'\n clients[ws] += 1\n json_object['last_number'] = clients[ws]\n await ws.send_json(json_object)\n \n elif msg.type == aiohttp.WSMsgType.ERROR:\n print(f'ws соединение закрыто с ошибкой {ws.exception()}')\n\n del clients[ws]\n\n return ws","repo_name":"vanobl/simple-websocket","sub_path":"views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73230114066","text":"from unittest import TestCase\nimport datetime\n\nfrom jskafka.consumer_subscribe import ConsumerSubscribe\nimport logging\n\n\nclass TestConsumer(TestCase):\n #logging.basicConfig(level=logging.DEBUG)\n\n #topic = 'RandomSM2_t2'\n topic = 'Grane10k4'\n group_id = 'Python-Grupe5'\n auto_offset_reset = 'earliest'\n\n def test_get_message(self):\n\n dy = datetime.datetime.now()\n\n consumer = ConsumerSubscribe(topic=self.topic, group_id=self.group_id, auto_offset_reset=self.auto_offset_reset)\n\n i = 1\n run = True\n while run:\n message = consumer.get_message()\n if message != None:\n print(f'{i} partition {message.partition()} offset {message.offset()}')\n\n if (not i % 100):\n print('*****************************************')\n print(f'{i} used {datetime.datetime.now() - dy}')\n print(f'time pr. massage {(datetime.datetime.now() - dy) / i}')\n print(message.value())\n run = False\n\n i = i + 1\n\n consumer.close()\n\n\n\n","repo_name":"lindvarl/docker-compose-kafka","sub_path":"jskafka/test/test_consumer_subscribe.py","file_name":"test_consumer_subscribe.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29266620055","text":"\"\"\"\nIn this module, when accessing the attributes of page objects, only methods and attributes described\nin AbstractBasePage can be used\n\"\"\"\nfrom functools import wraps\nfrom inspect import ismethod\nfrom typing import Tuple, List, Set\n\nfrom combo_e2e.config import config\nfrom combo_e2e.helpers.exceptions import BasePageException\nfrom selenium.common.exceptions import StaleElementReferenceException, WebDriverException, NoSuchElementException\nfrom selenium.webdriver.remote.webelement import WebElement\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\n\nDATA_E2E_ATTRIBUTE_NAME = config.DATA_E2E_ATTRIBUTE\n\n\nclass WebElementProxyException(Exception):\n def __init__(self, msg=None, element=None):\n self.msg = msg\n self.element = element\n\n def __str__(self):\n exception_msg = \"Message: %s\\n\" % self.msg\n if self.element is not None:\n exception_msg += \"Occurred at element: %s\" % self.element\n return exception_msg\n\n\nclass WebElementProxy(WebElement):\n \"\"\"\n This class proxies access to the WebElement instance, implementing additional logic,\n to ensure that angular application was loaded\n \"\"\"\n page = None\n \"\"\"through this attribute, all methods of the BasePage page can be accessed\"\"\"\n _obj: WebElement = None\n \"\"\"contains instance of the WebElement class, access to which is proxied\"\"\"\n locator: Tuple[str, str] = None\n \"\"\"string representation for repeated element search (required in some WebElement methods)\"\"\"\n attr_name: str = None\n \"\"\"the name of the attribute in the page that this object is associated with\n (set only if the object is received via a handle)\"\"\"\n\n # noinspection PyMissingConstructor\n def __init__(self, page, by, value, target_object, attr_name=None):\n if isinstance(target_object, WebElementProxy):\n raise BasePageException('target_object already is instance WebElementProxy')\n object.__setattr__(self, 'page', page)\n object.__setattr__(self, '_obj', target_object)\n object.__setattr__(self, 'locator', (by, value))\n object.__setattr__(self, 'attr_name', attr_name)\n\n def __getattribute__(self, name: str):\n if proxy_has_attr(name):\n attr = object.__getattribute__(self, name)\n else:\n attr = getattr(self._obj, name)\n\n if ismethod(attr) and not name.startswith('__'):\n decorator = catch_not_attach_to_session(self)\n return decorator(attr)\n return attr\n\n def __setattr__(self, name, value):\n if proxy_has_attr(name):\n object.__setattr__(self, name, value)\n return\n setattr(self._obj, name, value)\n\n def __delattr__(self, name):\n if proxy_has_attr(name):\n object.__delattr__(self, name)\n return\n return delattr(self._obj, name)\n\n @property\n def value(self):\n return self._obj.get_attribute('value')\n\n def until(self, condition, *args, **kwargs):\n self.page.wait.until(\n condition(self.locator, *args, **kwargs)\n )\n\n def until_not(self, condition, *args, **kwargs):\n self.page.wait.until_not(\n condition(self.locator, *args, **kwargs)\n )\n\n def click(self, focus_on_opened_tab: bool = True):\n \"\"\"\n wait for the element to be available and click on it \n (does not wait for the completion of something after the click)\n :focus_on_opened_tab: Whether it is needed to focus on a new tab if it's going to be open\n :return:\n \"\"\"\n self.until(EC.element_to_be_clickable)\n self._obj.click()\n if focus_on_opened_tab:\n self.page.focus_on_last_opened_tab()\n\n def click_and_wait(self, focus_on_opened_tab: bool = True):\n \"\"\"\n performs a standard click on the element, but after click waits for the completion of the running action.\n Completion signal - no page and table loaders\n :focus_on_opened_tab: Whether it is needed to focus on a new tab if it's going to be open\n :return:\n \"\"\"\n self.click(focus_on_opened_tab=focus_on_opened_tab)\n self.page.wait_loaders_hidden()\n\n @property\n def page_wait(self):\n \"\"\"\n implements access to the wait object of the page, if waiting for something needs to be implemented\n :return:\n \"\"\"\n return self.page.wait\n\n def _reload_target_object(self) -> None:\n \"\"\"\n Overloads the original WebElement. It is necessary, because elements even on an unreloaded page can be removed\n from the selenium session (this happens because in almost any action angular removes and adds DOM elements)\n :return:\n \"\"\"\n if self.attr_name and self.attr_name in self.page._cached_attrs:\n self.page._cached_attrs.pop(self.attr_name, None)\n obj = self.page._find_element(*self.locator)\n\n object.__setattr__(self, '_obj', obj)\n # adding element back to the page cache, so that it won't be searched again while accessed next time\n # thought the descriptor\n if self.attr_name:\n self.page._cached_attrs[self.attr_name] = self\n\n\ndef get_subclass_attributes() -> Set[str]:\n \"\"\"\n Helper that returns attribute names only of the WebElementProxy proxy class\n :return:\n \"\"\"\n if hasattr(get_subclass_attributes, '__cached_attrs'):\n return get_subclass_attributes.__cached_attrs\n\n bases = WebElementProxy.__bases__\n if len(bases) > 1:\n raise NotImplemented('It works only with one parent classes')\n attrs = set(WebElementProxy.__dict__.keys())\n setattr(get_subclass_attributes, '__cached_attrs', attrs)\n return attrs\n\n\ndef proxy_has_attr(name: str) -> bool:\n \"\"\"\n Same as hasattrs for WebElementProxy, implemented outside the class to avoid looping\n in the __getattribute method__\n :param name:\n :return:\n \"\"\"\n if name in get_subclass_attributes():\n return True\n return False\n\n\ndef catch_not_attach_to_session(current_obj: WebElementProxy):\n \"\"\"\n a decorator that allows WebElement instance overloading if it disappears from the browser session.\n the WebElement instance is stored in the WebElementProxy object, so we overload\n only selenium object, while the WebElementProxy instance remains the same, which allows\n to avoind BasePage objects recreation\n :param current_obj:\n :return:\n \"\"\"\n def decorator(function):\n @wraps(function)\n def wrapper(*args, **kwargs):\n try:\n return function(*args, **kwargs)\n except StaleElementReferenceException:\n current_obj._reload_target_object()\n return function(*args, **kwargs)\n except NoSuchElementException:\n raise\n except WebDriverException as ex:\n raise WebElementProxyException(str(ex), current_obj.attr_name or 'Object didnt attach to Page')\n\n return wrapper\n\n return decorator\n\n\nclass ElementDescriptor:\n \"\"\"\n Descriptor for WebElementProxy. Allows to implement WebElement lazy loading,\n i. e. the selenium object is created only when class attribute is accessed through this descriptor.\n \"\"\"\n search_by = None\n \"\"\"element search type (by xpath, class, etc.)\"\"\"\n value = None\n \"\"\"the value by which element is searched\"\"\"\n many = None\n \"\"\"whether several elements will be found on the page for this pattern\"\"\"\n __element_name = None\n \"\"\"the name of the base page attribute that stores the descriptor instance\"\"\"\n\n def __init__(self, search_by=None,\n value=None,\n many=False):\n \"\"\"\n The only available method to pass parameters for searching for a web element on a page\n :param search_by: locator type (default: xpath)\n :param value: locator value\n :param many: flag that several elements will be found by the passed locator\n \"\"\"\n self.__element_name = None\n self.search_by = search_by\n self.value = value\n self.many = many\n if self.value and not self.search_by:\n self.search_by = By.XPATH\n self._validate_params()\n\n def _validate_params(self):\n if not self.search_by or not self.value:\n raise BasePageException('[value, search_by] param must be passed to ElementDescriptor')\n\n def __set_name__(self, owner, name):\n self.__element_name = name\n\n def __get__(self, page, objtype=None):\n if page is None:\n return self\n page.check_opened()\n\n cached_attrs = page._cached_attrs\n if cached_attrs.get(self.__element_name) is None:\n cached_attrs[self.__element_name] = self._search_element(page)\n return cached_attrs[self.__element_name]\n\n def __getattribute__(self, item):\n if hasattr(ElementDescriptor, item):\n return object.__getattribute__(self, item)\n raise AttributeError\n\n def _search_element(self, page):\n if self.many:\n elements = page._find_elements(self.search_by, self.value)\n proxy_elements = []\n for item in elements:\n proxy_elements.append(\n WebElementProxy(\n target_object=item,\n page=page,\n by=self.search_by,\n value=self.value,\n attr_name=self.__element_name,\n )\n )\n return proxy_elements\n\n web_element = page._find_element(self.search_by, self.value)\n return WebElementProxy(\n target_object=web_element,\n page=page,\n by=self.search_by,\n value=self.value,\n attr_name=self.__element_name,\n )\n\n\nclass ListOfElementDescriptor:\n \"\"\"\n A descriptor class similar to ElementDescriptor, but to describe a group of elements,\n which differ in indices.\n \n \n This class is needed in order not to multiply the description of such elements in the base page.\n Allows you to describe the elements above like this:\n elements = ListOfElementDescriptor(base_name_parts=['row_'])\n and then any element can be accessed like this:\n elements.get(1)\n elements.get(2)\n Also allows to describe elements like this:\n \n \n You can use end_name_part parametr:\n elements = ListOfElementDescriptor(base_name_parts=['row_'], end_name_part='_foo')\n \"\"\"\n base_name_parts: list = None\n \"\"\"list of common parts of the attribute value of group of elements\"\"\"\n end_name_part: str = None\n \"\"\"the end part of the attribute value of group of elements\"\"\"\n tag_attr_name: str = None\n \"\"\"attribute name by which elements are grouped\"\"\"\n many: bool = None\n \"\"\"flag that multiple elements will be found by full name\"\"\"\n page = None\n # only xpath search is supported for now\n search_by: str = 'xpath'\n\n def __init__(self, base_name_parts: List[str], end_name_part: str = None, many: bool = False,\n tag_attr_name: str = DATA_E2E_ATTRIBUTE_NAME, context=None):\n \"\"\"\n\n :param base_name_parts: list of common parts of the attribute value of group of elements\n :param end_name_part: the end part of the attribute value of group of elements\n :param many: flag that multiple elements will be found by full name\n :param tag_attr_name: attribute name by which elements are grouped\n :param context:\n \"\"\"\n if not isinstance(base_name_parts, list):\n raise BasePageException('base_name_parts must be list of string')\n self.base_name_parts = [name.strip('_') for name in base_name_parts]\n self.end_name_part = end_name_part.strip('_') if end_name_part else end_name_part\n self.many = many\n self.tag_attr_name = tag_attr_name\n self.page = context\n\n def get_by_index(self, *numbers) -> WebElementProxy:\n \"\"\"\n Get an element by its index on the rendered page. Is an interface to get method,\n which restricts numbers to only int type\n :param numbers:\n :return:\n \"\"\"\n if not all([isinstance(num, int) for num in numbers]):\n raise BasePageException('all of parameters must be int')\n return self.get(*numbers)\n\n def get_no_load(self, *numbers) -> ElementDescriptor:\n \"\"\"\n Returns element descriptor.\n Main usage: waiting for the element to appear on the page\n To do this, the descriptor must be passed to the page's wait_accessibility_of () method\n :return:\n \"\"\"\n attr_name = self._make_attr_name(numbers)\n return self._get_attribute_descriptor(attr_name)\n\n def get(self, *numbers) -> WebElementProxy:\n \"\"\"\n Fills base_name_parts with numbers and returns matching item, if any\n :param numbers: list of dynamic parameters to be combined with base_name_parts\n :return:\n \"\"\"\n attr_name = self._make_attr_name(numbers)\n descriptor = self._get_attribute_descriptor(attr_name)\n return descriptor.__get__(self.page)\n\n def get_relative(self, *numbers) -> WebElementProxy:\n attr_name = self._make_attr_name(numbers)\n value = self._print_search_value(attr_name)\n return self.page.get_item_by_xpath(value)\n\n def _get_attribute_descriptor(self, attr_name: str) -> ElementDescriptor:\n if attr_name not in self.page.__dict__:\n descriptor = self._construct_attribute_descriptor(attr_name)\n setattr(self.page, attr_name, descriptor)\n return getattr(self.page, attr_name)\n\n def _construct_attribute_descriptor(self, attr_name: str) -> ElementDescriptor:\n value = self._print_search_value(attr_name)\n descriptor = ElementDescriptor(search_by=self.search_by, value=value, many=self.many)\n descriptor.__set_name__(None, attr_name)\n return descriptor\n\n def _print_search_value(self, attr_name: str) -> str:\n return f'//*[@{self.tag_attr_name}=\"{attr_name}\"]'\n\n def _make_attr_name(self, args):\n params = list(map(str, args))\n if len(params) != len(self.base_name_parts):\n raise BasePageException(f'You pass to get method only {len(params)} params '\n f'but required {len(self.base_name_parts)}')\n\n indexed_names = []\n for val in zip(self.base_name_parts, params):\n indexed_names.append(('_' if val[0] else '').join(val))\n\n if self.end_name_part:\n indexed_names.append(self.end_name_part)\n\n return '_'.join(indexed_names)\n\n def __get__(self, page, objtype=None):\n self.page = page\n return self\n\n def __getitem__(self, item: int):\n if not isinstance(item, int):\n raise BasePageException('ListOfElementDescriptor support only number access to attributes')\n if getattr(self.page, 'nested_table', None) is True:\n return self.get_relative(item)\n return self.get(item)\n","repo_name":"AdCombo/combo-e2e","sub_path":"combo_e2e/pages/base_attributes.py","file_name":"base_attributes.py","file_ext":"py","file_size_in_byte":15278,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"7018846004","text":"import json\nimport logging\nimport os\nimport random\nimport sys\nfrom contextlib import contextmanager\nfrom tempfile import TemporaryDirectory\nfrom typing import Any, Dict, List, Optional\nfrom unittest.mock import patch\n\nimport sockeye.constants as C\nimport sockeye.prepare_data\nimport sockeye.train\nimport sockeye.translate\nimport sockeye.lexicon\nimport sockeye.utils\n\nlogger = logging.getLogger(__name__)\n\n\n_DIGITS = \"0123456789\"\n_MID = 5\n\n\ndef generate_digits_file(source_path: str,\n target_path: str,\n line_count: int = 100,\n line_length: int = 9,\n sort_target: bool = False,\n line_count_empty: int = 0,\n source_text_prefix_token: str = '',\n seed=13):\n source_text_prefix = ''\n if source_text_prefix_token:\n line_length -= 1 # The prefix token takes one.\n source_text_prefix = f\"{source_text_prefix_token}{C.TOKEN_SEPARATOR}\"\n assert line_count_empty <= line_count\n random_gen = random.Random(seed)\n with open(source_path, \"w\") as source_out, open(target_path, \"w\") as target_out:\n all_digits = []\n for _ in range(line_count - line_count_empty):\n digits = [random_gen.choice(_DIGITS) for _ in range(random_gen.randint(1, line_length))]\n all_digits.append(digits)\n for _ in range(line_count_empty):\n all_digits.append([])\n random_gen.shuffle(all_digits)\n for digits in all_digits:\n print(f\"{source_text_prefix}{C.TOKEN_SEPARATOR.join(digits)}\" if digits else '', file=source_out)\n if sort_target:\n digits.sort()\n print(C.TOKEN_SEPARATOR.join(digits), file=target_out)\n\n\ndef generate_json_input_file_with_tgt_prefix(src_path:str, tgt_path: str, json_file_with_tgt_prefix_path: str, \\\n src_factors_path: Optional[List[str]] = None, tgt_factors_path: List[str] = None, seed=13):\n random_gen = random.Random(seed)\n with open(src_path, \"r\") as src_reader, open(tgt_path, \"r\") as tgt_reader:\n with open(json_file_with_tgt_prefix_path, \"w\") as out:\n list_src_factors = None\n list_tgt_factors = None\n\n if src_factors_path is not None:\n list_src_factors = [open(src_factors, \"r\") for src_factors in src_factors_path]\n list_src_factors = [[sf.strip() for sf in src_factors] for src_factors in list_src_factors]\n\n if tgt_factors_path is not None:\n list_tgt_factors = [open(tgt_factors, \"r\") for tgt_factors in tgt_factors_path]\n list_tgt_factors = [[tf.strip().split() for tf in tgt_factors] for tgt_factors in list_tgt_factors]\n\n for i, stdigits in enumerate(zip(src_reader, tgt_reader)):\n src_digits, tgt_digits = stdigits[0].strip(), stdigits[1].strip()\n tgt_prefix = tgt_digits.split()\n if len(tgt_digits) > 0:\n random_pos = random_gen.choice([pos for pos in range(len(tgt_prefix))])\n tgt_prefix = tgt_prefix[:random_pos]\n if tgt_factors_path is not None and len(list_tgt_factors[0][i]) > 0:\n # Another random_pos, which is different to the one used for target prefix\n # With this, target prefix and target factors may have different lengths for testing\n random_pos = random_gen.choice([pos for pos in range(len(list_tgt_factors[0][i]))])\n for k in range(len(list_tgt_factors)):\n list_tgt_factors[k][i] = list_tgt_factors[k][i][:random_pos]\n tgt_prefix = C.TOKEN_SEPARATOR.join(tgt_prefix)\n if src_factors_path is None and tgt_factors_path is None:\n jsone_line = {\"text\": src_digits, \"target_prefix\": tgt_prefix}\n elif src_factors_path is not None and tgt_factors_path is None:\n jsone_line = {\"text\": src_digits, \"factors\": [src_factors[i] for src_factors in list_src_factors], \\\n \"target_prefix\": tgt_prefix}\n elif tgt_factors_path is not None and src_factors_path is None:\n jsone_line = {\"text\": src_digits, \"target_prefix_factors\": [C.TOKEN_SEPARATOR.join(tgt_factors[i]) for tgt_factors in list_tgt_factors], \\\n \"target_prefix\": tgt_prefix}\n else:\n jsone_line = {\"text\": src_digits, \"factors\": [src_factors[i] for src_factors in list_src_factors], \\\n \"target_prefix_factors\": [C.TOKEN_SEPARATOR.join(tgt_factors[i]) for tgt_factors in list_tgt_factors], \\\n \"target_prefix\": tgt_prefix}\n print(json.dumps(jsone_line), file=out)\n\n\ndef generate_low_high_factors(input_path: str, output_path: str):\n \"\"\"\n Writes low/high factor file given a file of digit sequences.\n \"\"\"\n with open(input_path, 'r') as fin, open(output_path, 'w') as fout:\n for line in fin:\n digits = map(int, line.rstrip().split())\n factors = (\"l\" if digit < _MID else \"h\" for digit in digits)\n print(C.TOKEN_SEPARATOR.join(factors), file=fout)\n\n\ndef generate_odd_even_factors(input_path: str, output_path: str):\n \"\"\"\n Writes odd/even factor file given a file of digit sequences.\n \"\"\"\n with open(input_path, 'r') as fin, open(output_path, 'w') as fout:\n for line in fin:\n digits = map(int, line.rstrip().split())\n factors = (\"e\" if digit % 2 == 0 else \"o\" for digit in digits)\n print(C.TOKEN_SEPARATOR.join(factors), file=fout)\n\n\ndef generate_fast_align_lex(lex_path: str):\n \"\"\"\n Generate a fast_align format lex table for digits.\n\n :param lex_path: Path to write lex table.\n \"\"\"\n with open(lex_path, \"w\") as lex_out:\n for digit in _DIGITS:\n print(\"{0}\\t{0}\\t0\".format(digit), file=lex_out)\n\n\nLEXICON_CREATE_PARAMS_COMMON = \"create -i {input} -m {model} -k {topk} -o {lexicon}\"\n\n\n@contextmanager\ndef tmp_digits_dataset(prefix: str,\n train_line_count: int, train_line_count_empty: int, train_max_length: int,\n dev_line_count: int, dev_max_length: int,\n test_line_count: int, test_line_count_empty: int, test_max_length: int,\n sort_target: bool = False,\n seed_train: int = 13, seed_dev: int = 13,\n source_text_prefix_token: str = '',\n with_n_source_factors: int = 0,\n with_n_target_factors: int = 0) -> Dict[str, Any]:\n \"\"\"\n Creates a temporary dataset with train, dev, and test. Returns a dictionary with paths to the respective temporary\n files.\n \"\"\"\n if source_text_prefix_token:\n sockeye.utils.check_condition(with_n_source_factors == 0,\n \"The digits dataset does not support source factors and source text prefix token \"\n \"at the same time.\")\n with TemporaryDirectory(prefix=prefix) as work_dir:\n # Simple digits files for train/dev data\n train_source_path = os.path.join(work_dir, \"train.src\")\n train_target_path = os.path.join(work_dir, \"train.tgt\")\n dev_source_path = os.path.join(work_dir, \"dev.src\")\n dev_target_path = os.path.join(work_dir, \"dev.tgt\")\n test_source_path = os.path.join(work_dir, \"test.src\")\n test_target_path = os.path.join(work_dir, \"test.tgt\")\n test_source_with_target_prefix_path = os.path.join(work_dir, \"test_source_with_target_prefix.json\")\n generate_digits_file(train_source_path, train_target_path, train_line_count, train_max_length,\n line_count_empty=train_line_count_empty, sort_target=sort_target, seed=seed_train,\n source_text_prefix_token=source_text_prefix_token)\n generate_digits_file(dev_source_path, dev_target_path, dev_line_count, dev_max_length, sort_target=sort_target,\n seed=seed_dev, source_text_prefix_token=source_text_prefix_token)\n generate_digits_file(test_source_path, test_target_path, test_line_count, test_max_length,\n line_count_empty=test_line_count_empty, sort_target=sort_target, seed=seed_dev,\n source_text_prefix_token=source_text_prefix_token)\n data = {'work_dir': work_dir,\n 'train_source': train_source_path,\n 'train_target': train_target_path,\n 'dev_source': dev_source_path,\n 'dev_target': dev_target_path,\n 'test_source': test_source_path,\n 'test_target': test_target_path,\n 'test_source_with_target_prefix': test_source_with_target_prefix_path}\n\n if with_n_source_factors > 0:\n data['train_source_factors'] = []\n data['dev_source_factors'] = []\n data['test_source_factors'] = []\n for i in range(with_n_source_factors):\n train_factor_path = train_source_path + \".factors%d\" % i\n dev_factor_path = dev_source_path + \".factors%d\" % i\n test_factor_path = test_source_path + \".factors%d\" % i\n generate_low_high_factors(train_source_path, train_factor_path)\n generate_low_high_factors(dev_source_path, dev_factor_path)\n generate_low_high_factors(test_source_path, test_factor_path)\n data['train_source_factors'].append(train_factor_path)\n data['dev_source_factors'].append(dev_factor_path)\n data['test_source_factors'].append(test_factor_path)\n\n if with_n_target_factors > 0:\n data['train_target_factors'] = []\n data['dev_target_factors'] = []\n data['test_target_factors'] = []\n for i in range(with_n_target_factors):\n train_factor_path = train_target_path + \".factors%d\" % i\n dev_factor_path = dev_target_path + \".factors%d\" % i\n test_factor_path = test_target_path + \".factors%d\" % i\n generate_odd_even_factors(train_target_path, train_factor_path)\n generate_odd_even_factors(dev_target_path, dev_factor_path)\n generate_odd_even_factors(test_target_path, test_factor_path)\n data['train_target_factors'].append(train_factor_path)\n data['dev_target_factors'].append(dev_factor_path)\n data['test_target_factors'].append(test_factor_path)\n\n source_factors_path = None if 'test_source_factors' not in data else data['test_source_factors']\n target_factors_path = None if 'test_target_factors' not in data else data['test_target_factors']\n generate_json_input_file_with_tgt_prefix(test_source_path, test_target_path, test_source_with_target_prefix_path, \\\n source_factors_path, target_factors_path)\n yield data\n\n\nTRAIN_PARAMS_COMMON = \"--use-cpu --max-seq-len {max_len} --source {train_source} --target {train_target}\" \\\n \" --validation-source {dev_source} --validation-target {dev_target} --output {model}\" \\\n \" --seed {seed}\"\n\nPREPARE_DATA_COMMON = \" --max-seq-len {max_len} --source {train_source} --target {train_target}\" \\\n \" --output {output} --pad-vocab-to-multiple-of 16\"\n\nTRAIN_WITH_SOURCE_FACTORS_COMMON = \" --source-factors {source_factors}\"\nDEV_WITH_SOURCE_FACTORS_COMMON = \" --validation-source-factors {dev_source_factors}\"\nTRAIN_WITH_TARGET_FACTORS_COMMON = \" --target-factors {target_factors}\"\nDEV_WITH_TARGET_FACTORS_COMMON = \" --validation-target-factors {dev_target_factors}\"\n\nTRAIN_PARAMS_PREPARED_DATA_COMMON = \"--use-cpu --max-seq-len {max_len} --prepared-data {prepared_data}\" \\\n \" --validation-source {dev_source} --validation-target {dev_target} \" \\\n \"--output {model}\"\n\nTRANSLATE_PARAMS_COMMON = \"--use-cpu --models {model} --input {input} --output {output} \" \\\n \"--output-type json\"\n\nTRANSLATE_WITH_FACTORS_COMMON = \" --input-factors {input_factors}\"\n\nTRANSLATE_WITH_JSON_FORMAT = \" --json-input\"\n\nTRANSLATE_PARAMS_RESTRICT = \"--restrict-lexicon {lexicon} --restrict-lexicon-topk {topk}\"\n\nSCORE_PARAMS_COMMON = \"--use-cpu --model {model} --source {source} --target {target} --output {output} \"\n\nSCORE_WITH_SOURCE_FACTORS_COMMON = \" --source-factors {source_factors}\"\nSCORE_WITH_TARGET_FACTORS_COMMON = \" --target-factors {target_factors}\"\n\n\ndef run_train_translate(train_params: str,\n translate_params: str,\n data: Dict[str, Any],\n use_prepared_data: bool = False,\n max_seq_len: int = 10,\n seed: int = 13) -> Dict[str, Any]:\n \"\"\"\n Train a model and translate a test set. Returns the updated data dictionary containing paths to translation outputs\n and scores.\n\n :param train_params: Command line args for model training.\n :param translate_params: First command line args for translation.\n :param data: Dictionary containing test data\n :param use_prepared_data: Whether to use the prepared data functionality.\n :param max_seq_len: The maximum sequence length.\n :param seed: The seed used for training.\n :return: Data dictionary, updated with translation outputs and scores\n \"\"\"\n work_dir = os.path.join(data['work_dir'], 'train_translate')\n data['model'] = os.path.join(work_dir, \"model\")\n # Optionally create prepared data directory\n if use_prepared_data:\n data['train_prepared'] = os.path.join(work_dir, \"prepared_data\")\n prepare_params = \"{} {}\".format(\n sockeye.prepare_data.__file__,\n PREPARE_DATA_COMMON.format(train_source=data['train_source'],\n train_target=data['train_target'],\n output=data['train_prepared'],\n max_len=max_seq_len))\n if 'train_source_factors' in data:\n prepare_params += TRAIN_WITH_SOURCE_FACTORS_COMMON.format(\n source_factors=\" \".join(data['train_source_factors']))\n if 'train_target_factors' in data:\n prepare_params += TRAIN_WITH_TARGET_FACTORS_COMMON.format(\n target_factors=\" \".join(data['train_target_factors']))\n\n if '--weight-tying-type src_trg' in train_params:\n prepare_params += ' --shared-vocab'\n\n logger.info(\"Preparing data with parameters %s.\", prepare_params)\n with patch.object(sys, \"argv\", prepare_params.split()):\n sockeye.prepare_data.main()\n # Train model\n params = \"{} {} {}\".format(sockeye.train.__file__,\n TRAIN_PARAMS_PREPARED_DATA_COMMON.format(prepared_data=data['train_prepared'],\n dev_source=data['dev_source'],\n dev_target=data['dev_target'],\n model=data['model'],\n max_len=max_seq_len),\n train_params)\n\n if 'dev_source_factors' in data:\n params += DEV_WITH_SOURCE_FACTORS_COMMON.format(dev_source_factors=\" \".join(data['dev_source_factors']))\n if 'dev_target_factors' in data:\n params += DEV_WITH_TARGET_FACTORS_COMMON.format(dev_target_factors=\" \".join(data['dev_target_factors']))\n\n logger.info(\"Starting training with parameters %s.\", train_params)\n with patch.object(sys, \"argv\", params.split()):\n sockeye.train.main()\n else:\n # Train model\n params = \"{} {} {}\".format(sockeye.train.__file__,\n TRAIN_PARAMS_COMMON.format(train_source=data['train_source'],\n train_target=data['train_target'],\n dev_source=data['dev_source'],\n dev_target=data['dev_target'],\n model=data['model'],\n max_len=max_seq_len,\n seed=seed),\n train_params)\n\n if 'train_source_factors' in data:\n params += TRAIN_WITH_SOURCE_FACTORS_COMMON.format(source_factors=\" \".join(data['train_source_factors']))\n if 'train_target_factors' in data:\n params += TRAIN_WITH_TARGET_FACTORS_COMMON.format(target_factors=\" \".join(data['train_target_factors']))\n if 'dev_source_factors' in data:\n params += DEV_WITH_SOURCE_FACTORS_COMMON.format(dev_source_factors=\" \".join(data['dev_source_factors']))\n if 'dev_target_factors' in data:\n params += DEV_WITH_TARGET_FACTORS_COMMON.format(dev_target_factors=\" \".join(data['dev_target_factors']))\n\n logger.info(\"Starting training with parameters %s.\", train_params)\n with patch.object(sys, \"argv\", params.split()):\n sockeye.train.main()\n\n # create Top-K lexicon from simple ttable mapping digit to digit\n ttable_path = os.path.join(data['work_dir'], \"ttable\")\n generate_fast_align_lex(ttable_path)\n lexicon_path = os.path.join(data['work_dir'], \"lexicon\")\n params = \"{} {}\".format(sockeye.lexicon.__file__,\n LEXICON_CREATE_PARAMS_COMMON.format(input=ttable_path,\n model=data['model'],\n topk=20,\n lexicon=lexicon_path))\n with patch.object(sys, \"argv\", params.split()):\n sockeye.lexicon.main()\n data['lexicon'] = lexicon_path\n\n # Translate corpus with the 1st params and scoring output handler to obtain scores\n data['test_output'] = os.path.join(work_dir, \"test.out\")\n data['test_with_target_prefix_output'] = os.path.join(work_dir, \"test_with_target_prefix.out\")\n\n # First set of params (with target prefix in JSON format)\n params = \"{} {} {}\".format(sockeye.translate.__file__,\n TRANSLATE_PARAMS_COMMON.format(model=data['model'],\n input=data['test_source_with_target_prefix'],\n output=data['test_with_target_prefix_output']),\n translate_params)\n params += TRANSLATE_WITH_JSON_FORMAT\n logger.info(\"Translating with params %s\", params)\n with patch.object(sys, \"argv\", params.split()):\n sockeye.translate.main()\n\n # Collect test translate outputs and scores\n data['test_with_target_prefix_outputs'] = collect_translate_output_and_scores(data['test_with_target_prefix_output'])\n\n # Second set of params (without target prefix)\n params = \"{} {} {}\".format(sockeye.translate.__file__,\n TRANSLATE_PARAMS_COMMON.format(model=data['model'],\n input=data['test_source'],\n output=data['test_output']),\n translate_params)\n\n if 'test_source_factors' in data:\n params += TRANSLATE_WITH_FACTORS_COMMON.format(input_factors=\" \".join(data['test_source_factors']))\n\n logger.info(\"Translating with params %s\", params)\n with patch.object(sys, \"argv\", params.split()):\n sockeye.translate.main()\n\n # Collect test inputs\n with open(data['test_source']) as inputs:\n data['test_inputs'] = [line.strip() for line in inputs]\n\n # Collect test references\n with open(data['test_target'], \"r\") as ref:\n data['test_targets'] = [line.strip() for line in ref]\n\n # Collect test translate outputs and scores\n data['test_outputs'] = collect_translate_output_and_scores(data['test_output'])\n assert len(data['test_inputs']) == len(data['test_targets']) == len(data['test_outputs']) == len(data['test_with_target_prefix_outputs'])\n return data\n\n\ndef run_translate_restrict(data: Dict[str, Any], translate_params: str) -> Dict[str, Any]:\n \"\"\"\n Runs sockeye.translate with vocabulary selection and checks if number of outputs are the same as without\n vocabulary selection. Adds restricted outputs and scores to the data dictionary.\n \"\"\"\n translate_mod = sockeye.translate\n out_path = os.path.join(data['work_dir'], \"out-restrict.txt\")\n out_with_target_prefix_path = os.path.join(data['work_dir'], \"out-with-target-prefix-restrict.txt\")\n # Translate corpus with restrict-lexicon\n\n # First set of params (with target prefix in JSON format)\n params = \"{} {} {} {}\".format(translate_mod.__file__,\n TRANSLATE_PARAMS_COMMON.format(model=data['model'],\n input=data['test_source_with_target_prefix'],\n output=out_with_target_prefix_path),\n translate_params,\n TRANSLATE_PARAMS_RESTRICT.format(lexicon=data['lexicon'], topk=1))\n params += TRANSLATE_WITH_JSON_FORMAT\n with patch.object(sys, \"argv\", params.split()):\n translate_mod.main()\n\n # Collect test translate outputs and scores\n data['test_with_target_prefix_outputs_restricted'] = collect_translate_output_and_scores(out_with_target_prefix_path)\n\n # Second set of params (without using target prefix)\n params = \"{} {} {} {}\".format(translate_mod.__file__,\n TRANSLATE_PARAMS_COMMON.format(model=data['model'],\n input=data['test_source'],\n output=out_path),\n translate_params,\n TRANSLATE_PARAMS_RESTRICT.format(lexicon=data['lexicon'], topk=1))\n if 'test_source_factors' in data:\n params += TRANSLATE_WITH_FACTORS_COMMON.format(input_factors=\" \".join(data['test_source_factors']))\n with patch.object(sys, \"argv\", params.split()):\n translate_mod.main()\n\n # Collect test translate outputs and scores\n data['test_outputs_restricted'] = collect_translate_output_and_scores(out_path)\n assert len(data['test_with_target_prefix_outputs_restricted']) == len(data['test_outputs_restricted']) == len(data['test_outputs'])\n return data\n\n\ndef collect_translate_output_and_scores(out_path: str) -> List[Dict]:\n \"\"\"\n Collects json outputs from an output file, produced with the 'json' or nbest output handler.\n \"\"\"\n logger.debug(\"collect_translate_output_and_scores(%s)\", out_path)\n outputs = []\n with open(out_path) as out_fh:\n for line in out_fh:\n line = line.strip()\n logger.debug(\" line: %s\", line)\n outputs.append(json.loads(line))\n return outputs\n","repo_name":"awslabs/sockeye","sub_path":"sockeye/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":23610,"program_lang":"python","lang":"en","doc_type":"code","stars":1192,"dataset":"github-code","pt":"48"} +{"seq_id":"31078717035","text":"class RandomizedCollection:\n\n def __init__(self):\n self.arr = []\n self.hmap = defaultdict(set)\n\n def insert(self, val: int) -> bool:\n self.arr.append(val)\n self.hmap[val].add(len(self.arr)-1)\n \n return len(self.hmap[val]) == 1\n\n def remove(self, val: int) -> bool:\n if len(self.hmap[val]) == 0:\n return False\n \n index = self.hmap[val].pop() \n lastNum = self.arr[-1]\n n = len(self.arr)\n \n self.arr[index] = lastNum\n self.arr.pop()\n \n self.hmap[lastNum].discard(n-1)\n if index != n-1:\n self.hmap[lastNum].add(index)\n \n return True\n \n def getRandom(self) -> int:\n start = 0\n end = len(self.arr)-1\n index = random.randint(start,end)\n \n return self.arr[index]\n\n\n# Your RandomizedCollection object will be instantiated and called as such:\n# obj = RandomizedCollection()\n# param_1 = obj.insert(val)\n# param_2 = obj.remove(val)\n# param_3 = obj.getRandom()","repo_name":"Merwan-J/competetive-programming","sub_path":"0381-insert-delete-getrandom-o1-duplicates-allowed/0381-insert-delete-getrandom-o1-duplicates-allowed.py","file_name":"0381-insert-delete-getrandom-o1-duplicates-allowed.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42181985243","text":"# data preprocessing\r\n# pca\r\n# decision tree\r\n# random forest\r\n# f1 score\r\n\r\n# Importing the libraries\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nfrom sklearn.decomposition import PCA\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.metrics import f1_score\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\n\r\n# Importing the dataset\r\ndataset = pd.read_csv(\"C:/Users/ishme/Downloads/train_drugs.txt\", header=None, sep = \"\\t\")\r\ntest = pd.read_csv(\"C:/Users/ishme/Downloads/test.txt\", header=None, sep = \"\\t\")\r\ny_test_df = pd.read_csv(\"C:/Users/ishme/Downloads/format.txt\", header=None)\r\n\r\n\r\n# Getting the dataframe in right format\r\ntemp_df = dataset[1].str.split(\" \", n = 0, expand = True)\r\nfor col in temp_df.columns: \r\n temp_df[col] = pd.to_numeric(temp_df[col])\r\ndataset = dataset.drop([1], axis = 1)\r\ndf = pd.concat([dataset, temp_df], axis = 1)\r\n\r\n# Dropping NaN columns\r\ndf = df.dropna(axis = 1)\r\n\r\n\r\n# Getting the test dataframe in right format\r\ntemp_df = test[0].str.split(\" \", n = 0, expand = True)\r\nfor col in temp_df.columns: \r\n temp_df[col] = pd.to_numeric(temp_df[col])\r\ntest_df = temp_df\r\n\r\n# Dropping NaN columns\r\ntest_df = test_df.dropna(axis = 1)\r\n\r\n\r\n# Converting \r\nX_train = df.iloc[:, 1:test_df.shape[1]+1].values\r\ny_train = df.iloc[:, 0].values\r\nX_test = test_df.values\r\ny_test = y_test_df.values\r\n\r\n\r\n# Feature Scaling\r\nsc = StandardScaler()\r\nX_train = sc.fit_transform(X_train)\r\nX_test = sc.transform(X_test)\r\n\r\n\r\n#Fitting the PCA algorithm with our Data\r\npca = PCA().fit(X_train)\r\n#Plotting the Cumulative Summation of the Explained Variance\r\nplt.figure()\r\nplt.plot(np.cumsum(pca.explained_variance_ratio_))\r\nplt.xlabel('X Labels')\r\nplt.ylabel('Y Labels') #for each component\r\nplt.title('Plot')\r\nplt.show()\r\n\r\n\r\n# Applying PCA\r\npca = PCA(n_components = 5)\r\nX_train = pca.fit_transform(X_train)\r\nX_test = pca.transform(X_test)\r\nexplained_variance = pca.explained_variance_ratio_\r\n\r\n\r\n# Fitting Random Forest Classification to the Training set\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nclassifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)\r\nclassifier.fit(X_train, y_train)\r\n\r\n# Predicting the Test set results\r\ny_pred = classifier.predict(X_test)\r\n\r\n# Making the Confusion Matrix\r\nfrom sklearn.metrics import confusion_matrix\r\ncm = confusion_matrix(y_test, y_pred)\r\nprint(cm)\r\n\r\n\r\n# F1 Score\r\nf1 = f1_score(y_test, y_pred, average='binary')\r\nprint(f1)\r\n\r\n\r\n\r\n","repo_name":"preetpal725/ActiveDrugs","sub_path":"hw_rf.py","file_name":"hw_rf.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32851478290","text":"from django.utils.importlib import import_module\nfrom django.contrib.sites.models import Site, RequestSite\n\n\nfrom registration.backends.default import DefaultBackend\nfrom registration.models import RegistrationProfile\nfrom registration import signals\n\n\nfrom .. import app_settings\nfrom ..compat import get_user_model\n\n\nclass RegistrationBackend(DefaultBackend):\n def get_form_class(self, request):\n form_class = app_settings.A2_REGISTRATION_FORM_CLASS\n module, form_class = form_class.rsplit('.', 1)\n module = import_module(module)\n return getattr(module, form_class)\n\n def register(self, request, **kwargs):\n \"\"\"\n Given a username, email address and password, register a new\n user account, which will initially be inactive.\n\n Along with the new ``User`` object, a new\n ``registration.models.RegistrationProfile`` will be created,\n tied to that ``User``, containing the activation key which\n will be used for this account.\n\n An email will be sent to the supplied email address; this\n email should contain an activation link. The email will be\n rendered using two templates. See the documentation for\n ``RegistrationProfile.send_activation_email()`` for\n information about these templates and the contexts provided to\n them.\n\n After the ``User`` and ``RegistrationProfile`` are created and\n the activation email is sent, the signal\n ``registration.signals.user_registered`` will be sent, with\n the new ``User`` as the keyword argument ``user`` and the\n class of this backend as the sender.\n\n \"\"\"\n user_model = get_user_model()\n form_kwargs = {'password': kwargs['password1'],}\n for field in user_model._meta.get_all_field_names():\n if field in kwargs:\n form_kwargs[field] = kwargs[field]\n\n if Site._meta.installed:\n site = Site.objects.get_current()\n else:\n site = RequestSite(request)\n new_user = RegistrationProfile.objects.create_inactive_user(form_kwargs, site)\n signals.user_registered.send(sender=self.__class__,\n user=new_user,\n request=request)\n return new_user\n","repo_name":"pombredanne/authentic2","sub_path":"authentic2/registration_backend/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43381292227","text":"import requests\nimport concurrent.futures\nimport itertools\nimport sys\nimport os\nimport re\n\ndef concr(func,data,max_workers=50,thread=None):\n\tthread = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) if not(thread) else thread\n\tdat = list(thread.map(func,data))\n\tif len(dat) and (type(dat[0]) is dict):\n\t\treturn dat\n\telse:\n\t\ttry:\n\t\t\tif len(dat) and dat != None and not(all(map(lambda x: x == None, dat))):\n\t\t\t\treturn list(itertools.chain(*dat))\n\t\t\telse:\n\t\t\t\treturn dat\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tprint(dat)\nmodule_num = sys.argv[1] if len(sys.argv) > 1 else \"2.1\"\n\nbase_url = \"https://tower.la.utexas.edu/reflect?url=http%3A%2F%2Fmedia.laits.utexas.edu%3A8080%2Fvideo_production%2F_hosted%2Fgov_312usfp_sum2015%2Fgov312_topic{}.srt\"\nbase_url_2 = \"https://tower.la.utexas.edu/reflect?url=http%3A%2F%2Fmedia.laits.utexas.edu%3A8080%2Fvideo_production%2F_hosted%2Fgov_312usfp_sp2016%2Fgov312_topic{}.srt\"\nbase_url_3 = \"https://tower.la.utexas.edu/reflect?url=http%3A%2F%2Fmedia.laits.utexas.edu%3A8080%2Fvideo_production%2F_hosted%2Fgov_312usfp_sum2016%2Fgov312_topic{}.srt\"\n\nlinks = [base_url_3,base_url_2,base_url]\n\n\n\ndef process_module(module):\n\tout = \"\"\n\tfor line in module.split(\"\\n\"):\n\t\tnumber = re.match(r\"^\\d{1,3}\\r\",line)\n\t\ttimings = re.match(r\"\\d\\d:\\d\\d.*,\\d.*\\r\",line)\n\t\tif not(number) and not(timings):\n\t\t\tout += \" \" + line.replace(\"\\r\\n\",\" \").replace(\"\\r\",\" \").replace(\"\\n\",\" \").replace(\" \",\" \")\n\treturn out.replace(\" \",\" \").replace(\" \",\" \")\n\n\noverride = {\n\t\"5.4\": \"https://tower.la.utexas.edu/reflect?url=http%3A%2F%2Fmedia.laits.utexas.edu%3A8080%2Fvideo_services%2F_hosted%2Fgov_312usfp_fa2015%2Fbrennan%2Fgov312_topic5.4.srt\"\n}\n\ndef get_module(number,):\n\tif number in override:\n\t\treturn process_module(requests.get(override[number]).text)\n\tfor link in links:\n\t\turl = link.format(number)\n\t\treq = requests.get(url)\n\t\tfound = \"404 Not Found\" not in str(req.content)\n\t\tif(found):\n\t\t\t# return re.sub(r'\\?(?!\")',\".\\n\",re.sub(r'(?:[a-zA-z]{2,})\\.(?!\")',\".\\n\",process_module(req.text)))\n\t\t\treturn process_module(req.text)\n\ndef get_or_create(path):\n\tprint(\"/\".join(path.split(\"/\")[:-1]))\n\tif(not os.path.exists(\"/\".join(path.split(\"/\")[:-1]))):\n\t\tos.makedirs(\"/\".join(path.split(\"/\")[:-1]))\n\treturn open(path,\"w+\")\ndef load_module(module,submodule):\n\t# print(module,submodule)\n\tmod = \"{}.{}\".format(module,submodule)\n\tfetch = get_module(mod)\n\tif(fetch != None):\n\t\thandle = get_or_create(\"./captions/{}/{}-{}.txt\".format(str(module).zfill(3),module,submodule))\n\t\thandle.write(fetch)\n\t\thandle.close()\ndef load_modules():\t\t\t\n\tconcr(lambda x: load_module(*x), [(module,submodule) for module in range(1,25) for submodule in range(0,10)],max_workers=25*10/2)\n\nload_modules()\n\n# print(content.replace(\"\"))\n# content = re.sub(re.compile(r\"(^\\d{1,4}\\n)|(\\d\\d\\:.*)\",re.MULTILINE),\"\",content).replace(\"\\n\",\" \")\n\n# content = re.sub(re.compile(r\"\\\\r\\\\r\\d{1,4}\",re.MULTILINE),\"\",content)\n# print(\"\\nModule {}\".format(module_num))\n# print(content)\n\n\n","repo_name":"evanmosseri/gov312l-caption-parser","sub_path":"get_module.py","file_name":"get_module.py","file_ext":"py","file_size_in_byte":2984,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27229657912","text":"from typing import List\nfrom discord.ext import commands as dc\nfrom watdo.errors import CancelCommand\nfrom watdo.discord import Bot\nfrom watdo.discord.cogs import BaseCog\n\n\nclass Shortcuts(BaseCog, description=\"Speed up your workflow with command shortcuts.\"):\n @dc.hybrid_command() # type: ignore[arg-type]\n async def set_short(self, ctx: dc.Context[Bot], name: str) -> None:\n \"\"\"Set a command shortcut.\"\"\"\n command: List[str] = []\n\n while True:\n if len(command) == 0:\n question = (\n f\"Type in the command to be executed when **{name}** is sent.\\n\"\n \"To finish, type **DONE** or **CANCEL**.\"\n )\n else:\n question = \"Another command:\"\n\n inputs = await self.interview(ctx, questions={question: None})\n inp = inputs[0]\n\n if inp == \"DONE\":\n if len(command) == 0:\n await BaseCog.send(ctx, \"Please put at least one command.\")\n continue\n\n break\n\n if inp == \"CANCEL\":\n raise CancelCommand()\n\n command.append(inp)\n\n await self.db.set_command_shortcut(str(ctx.author.id), name, command)\n\n cs = \"\".join(f\"```\\n{c}\\n```\" for c in command)\n await BaseCog.send(ctx, f\"Command shortcut set ✅\\n**{name}**\\n{cs}\")\n\n @dc.hybrid_command() # type: ignore[arg-type]\n async def shorts(self, ctx: dc.Context[Bot]) -> None:\n \"\"\"Show all your command shortcuts.\"\"\"\n data = await self.db.get_all_command_shortcuts(str(ctx.author.id))\n message = []\n\n for name, command in data.items():\n cs = \"\".join(f\"```\\n{c}\\n```\" for c in command)\n message.append(f\"**{name}**\\n{cs}\")\n\n await BaseCog.send(ctx, \"\\n\".join(message) or \"No command shortcuts ❌\")\n\n @dc.hybrid_command() # type: ignore[arg-type]\n async def delete_short(self, ctx: dc.Context[Bot], name: str) -> None:\n \"\"\"Delete a command shortcut.\"\"\"\n deleted_count = await self.db.delete_command_shortcut(str(ctx.author.id), name)\n\n if deleted_count > 0:\n await BaseCog.send(ctx, \"Deleted ✅\")\n else:\n await BaseCog.send(ctx, f'Command shortcut \"{name}\" not found ❌')\n\n\nasync def setup(bot: Bot) -> None:\n await bot.add_cog(Shortcuts(bot, bot.db))\n","repo_name":"nietsuu/watdo","sub_path":"watdo/discord/cogs/shortcuts.py","file_name":"shortcuts.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"40371176777","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Feb 13 15:12:33 2022\nRésonance en intensite (tension aux bornes de la résistance)\n@author: remimetzdorff\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import odeint\n\n#########################\n# MODÉLISATION DU CIRCUIT\n#########################\n# Circuit RLC série alimenté par le signal sinusoïdal e d'un GBF,\n# On mesure de la tension s aux bornes du condensateur\nomega0 = 1\nQ = 10\nomega = 1 # pulsation du GBF\n\ndef e(t):\n # signal du GBF\n return np.cos(omega * t)\n\ndef de(t):\n # dérivée du signal du GBF\n return -omega * np.sin(omega * t)\n\ndef rlc(V,t):\n # fonction associée à l'équation différentielle\n s, ds = V\n dds = - omega0**2 * s + omega0/Q * (de(t) - ds)\n dV = [ds, dds]\n return dV\n\n########################################\n# RÉSOLUTION ET REPRÉSENTATION GRAPHIQUE\n########################################\nt = np.linspace(-10*Q/omega0,5*2*np.pi/omega,1000) # temps t en seconde\nV = odeint(rlc, [0,0], t)\ns = V[:,0] # tension aux bornes de C\n\nplt.title(\"Résonance en intensité (tension aux bornes de la résistance)\")\nplt.plot(t,e(t),label=\"$e(t)$\")\nplt.plot(t,s,label=\"$s(t)$\")\nplt.xlabel(\"Temps (s)\")\nplt.ylabel(\"Tension (V)\")\n#plt.xlim(0,5*2*np.pi/omega)\nplt.legend()\nplt.show()","repo_name":"remimetzdorff/mp2i","sub_path":"python/chap13-resonance_intensite.py","file_name":"chap13-resonance_intensite.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"fr","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"4037061637","text":"# You are given n words. Some words may repeat. For each word, output its number of occurrences. The output order should correspond with the input order of appearance of the word. See the sample input/output for clarification.\r\n\r\nn = int(input())\r\nwords = []\r\nwhile(n>0):\r\n words.append(input())\r\n n -= 1\r\nunique_words = {}\r\nfor word in words:\r\n if word not in unique_words:\r\n unique_words[word] = 1\r\n else:\r\n unique_words[word] += 1\r\n \r\nprint(len(unique_words))\r\nfor freq in unique_words.values():\r\n print(freq, end=\" \")\r\n ","repo_name":"dscmsit/Problem-Solving-in-any-Language","sub_path":"Word_order.py","file_name":"Word_order.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"73105931984","text":"cook_book = {\n 'Омлет': [\n {'ingredient_name': 'Яйцо', 'quantity': 2, 'measure': 'шт.'},\n {'ingredient_name': 'Молоко', 'quantity': 100, 'measure': 'мл'},\n {'ingredient_name': 'Помидор', 'quantity': 2, 'measure': 'шт'}\n ],\n 'Утка по-пекински': [\n {'ingredient_name': 'Утка', 'quantity': 1, 'measure': 'шт'},\n {'ingredient_name': 'Вода', 'quantity': 2, 'measure': 'л'},\n {'ingredient_name': 'Мед', 'quantity': 3, 'measure': 'ст.л'},\n {'ingredient_name': 'Соевый соус', 'quantity': 60, 'measure': 'мл'}\n ],\n 'Запеченный картофель': [\n {'ingredient_name': 'Картофель', 'quantity': 1, 'measure': 'кг'},\n {'ingredient_name': 'Чеснок', 'quantity': 3, 'measure': 'зубч'},\n {'ingredient_name': 'Сыр гауда', 'quantity': 100, 'measure': 'г'},\n ]\n}\n\n\ndef get_shop_list_by_dishes(dishes, person_count):\n lc_dict2buy = {}\n list_ing4dish = {}\n loc_dict_cook_book = read_cook_book()\n for dish in dishes:\n if (dish == '' or dish == ' '):\n continue\n list_ing4dish = loc_dict_cook_book.get(str(dish)) \n for ing in list_ing4dish:\n if ing['ingridient_name'] not in lc_dict2buy.keys():\n lc_dict2buy[ing['ingridient_name']] = {'measure': ing['measure'], 'quantity': 0}\n lc_dict2buy[ing['ingridient_name']]['quantity'] = lc_dict2buy[ing['ingridient_name']]['quantity'] + ( int(ing['quantity'] )) * person_count\n return lc_dict2buy\n\n\n\ndef get_dishes():\n loc_list = []\n loc_list.append('Запеченный картофель')\n loc_list.append('Омлет')\n return loc_list\n\n\ndef get_persons_num():\n return 2\n\n\ndef read_cook_book():\n loc_dict = {}\n lv_dish_name = ''\n lv_ing_num = 0\n lv_ing_proc = 0\n lv_ingredient_line = ''\n\n with open('cook_book.txt', 'rt', encoding='UTF8') as cook_book_file:\n for line in cook_book_file:\n lv_dish_name = line.strip()\n list_ing4dish = []\n lv_ing_num = cook_book_file.readline().strip()\n lv_ing_proc = 0\n while (int(lv_ing_num) > lv_ing_proc):\n dict_ing_info = {}\n list_ing_info = []\n lv_ingredient_line = cook_book_file.readline().strip()\n list_ing_info = lv_ingredient_line.split('|')\n dict_ing_info['ingridient_name'] = list_ing_info[0]\n dict_ing_info['quantity'] = list_ing_info[1]\n dict_ing_info['measure'] = list_ing_info[2]\n list_ing4dish.append(dict_ing_info)\n lv_ing_proc += 1\n loc_dict[str(lv_dish_name)] = list_ing4dish\n cook_book_file.readline()\n return loc_dict\n\ndef read_cook_book_anc_calc_list():\n loc_dict_cook_book = read_cook_book()\n print(loc_dict_cook_book)\n\n print('Получаем количество блюд и персон')\n loc_list_dishes = get_dishes()\n loc_persons_num = get_persons_num()\n\n print('Формируем список ингридиентов')\n loc_list2buy = get_shop_list_by_dishes(loc_list_dishes, loc_persons_num)\n print(loc_list2buy)\n\nprint('Начало работы программы********************')\nread_cook_book_anc_calc_list()\nprint('Завершение работы программы********************')\n","repo_name":"filipanselmo/file","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24344016900","text":"\"\"\"\nScript that runs the main PowerToken function: poll WEconnect and update Fitbit.\nMeant to be run in Crontab, probably every 5-15 minutes.\\n\n\nUSAGE:\nFirst poll of the day adds new events (poll_and_save)\nSubsequent polls should update completion status (poll_and_update)\n\nCreated by Abigail Franz on 2/28/2018.\\n\nLast modified by Jasmine J on 6/2019.\n\n\"\"\"\nimport sys\nimport math\nimport csv\nfrom datetime import datetime\nfrom database import get_session, close_connection\nfrom database import Activity, Event, User, Day\nfrom database import TALLY, CHARGE, WEIGHT, PLAN\nimport fitbit\nimport weconnect\n\nimport logging, sys\nlogging.basicConfig(stream=sys.stderr, level=logging.DEBUG)\n\nLAST_POLL_TIME = datetime.strptime('23:30', '%H:%M').time()\n\nTALLY=\"tally\"\nCHARGE=\"charge\"\nWEIGHT=\"weight\"\nPLAN=\"plan\"\n\n#import atexit\n#def onExit():\n#\tclose_connection()\n#atexit.register(onExit)\n\n\ndef poll_and_save():\n\t\"\"\"\n\tCheck for new events (and activities) for every saved user and save them to the database \n\t\"\"\"\n\tdb_session = get_session()\n\tusers = db_session.query(User).all()\n\tfor user in users:\n\t\tlogging.debug(\"Polling for {}\".format(user))\n\t\t\n\t\t#Gather events from WC and save to database, including updating new activities added\n\t\tactivity_events = weconnect.get_todays_events(user, db_session)\n\t\tweconnect.add_events_to_db(activity_events, db_session)\n\t\tlogging.info(\"Found {} new events for user {}\".format(len(activity_events), user.wc_id))\n\n\t\t# Add \"progress\" based on metaphor\n\t\tstep_count = 0\n\t\tif user.metaphor == CHARGE:\n\t\t\t#get yesterDay\n\t\t\tyesterDay = user.yesterday()\n\t\t\tif yesterDay is not None:\n\t\t\t\t#get progress from yesterDay, add as starting progress to Fitbit\n\t\t\t\tstep_count = fitbit.update_progress_count(user, yesterDay.computed_progress, db_session)\n\t\t\t\tlogging.info(\"Just added {} prelim steps to {}'s account!\".format(step_count, user.username))\n\t\t\telse:\n\t\t\t\tlogging.info(\"CHARGE: Starting fresh from today: {}\".format(datetime.now()))\n\t\t\n\t\t#Setup new DAY for each user\n\t\tnewDay = create_today(user, 0, step_count, db_session)\n\t\t\n\tlogging.info(\"Completed first POLL for {} users at {}.\".format(len(users), datetime.now()))\n\t\n\tdb_session.commit()\n\tclose_connection(db_session)\n\ndef poll_and_update():\n\t\"\"\"\n\tFor each user in the database:\n\t1. Poll WEconnect to update how many of the user's events for today\n\t have been completed.\n\t---\n\t2. Compute the user's progress for today.\n\t2. If the user has made progress since the last time this script was run,\n\t send the new progress to Fitbit as a walking activity with the following\n\t number of steps: progress * 1,000,000.\n\t\"\"\"\n\tdb_session = get_session()\n\tusers = db_session.query(User).all()\n\ttimestamp = datetime.now()\n\tfor user in users:\n\t\tlogging.info(\"polling for {}\".format(user))\n\t\t\n######\t#if a user for some reason doesn't have DAY setup, then rerun poll_and_save for all\n\t\ttoday = user.thisday()\n\t\tif today is None:\n\t\t\tpoll_and_save()\n\t\t\ttoday = user.thisday()\n\t\t\tlogging.info (\"{} was incomplete. Ran poll-save to add {} day\".format(user, today))\n#######\n\t\t\t\n\t\t# API call to WEconnect activities-with-events\n\t\tactivity_events = weconnect.get_todays_events(user, db_session)\n\t\tif len(activity_events) < 1: \n\t\t\tcontinue\n\t\t\n\t\t#Update all events in the DB\n\t\tnum_completed, event_ids = weconnect.update_db_events(activity_events, db_session)\n\t\tlogging.debug(\"Num activities completed: {} out of {}\".format(num_completed, len(activity_events)))\n\n\t\t#calculate progress if there is a change\n\t\tif len(event_ids) > 0:\n\t\t\tlogging.debug(\"detected {} vs {}\".format(num_completed, today.complete_count))\n\t\t\tif num_completed <= today.complete_count:\n\t\t\t\tlogging.info(\"No more progress made yet.\")\n\t\t\telse:\t\n\t\t\t\t#CALCULATE PROGRESS, BASED ON INDIVIDUAL METAPHOR\n\t\t\t\tprogress, step_count = calculate_progress( user, num_completed, event_ids, db_session)\n\t\t\t\tlogging.info(\"{} made {} progress today, with a updated step count of {}\".format(user.username, progress, step_count))\n\t\t\n\t\t#log today\n\t\tprintout(user, timestamp)\n\t\t\t\n\t\t# on the last poll of the day, create Day total for the user\n\t\t#cur_time = datetime.now().time()\n\t\t#if cur_time > LAST_POLL_TIME:\n\t\t#\tsave_today(user, num_completed, progress, db_session)\n\t\t#save_today(user, num_completed, progress, db_session)\n\n\tclose_connection(db_session)\n\n\ndef calculate_progress(user, num_completed, event_ids, session):\n\tprogress = 0\n\tstep_count = 0\t\n\tif user.metaphor == PLAN:\t\t\t\n\t\t#BASIC PLAN --- PERCENTAGE COMPLETE VS PLANNED\n\t\tprogress = calculate_progress_plan(num_completed, event_ids)\n\t\tlogging.info(\"Today's Percentage Progress for {} is {}\".format(user, progress))\n\t\t\t\n\t\t# Send progress to Fitbit\n\t\tstep_count = fitbit.update_progress_decimal(user, progress, session)\n\t\tlogging.info(\"Just added {} steps to {}'s account!\".format(step_count, user.username))\n\t\t\n\t\t#update percentage for today\n\t\tsave_today(user, num_completed, progress, session)\n\t\t\n\telif user.metaphor == WEIGHT:\t\t\n\t\t#WEIGHTED PROGRESS\n\t\tprogress = calculate_progress_weight(event_ids, session)\n\t\tlogging.info(\"Today's Weighted Progress for {} is {}\".format(user, progress))\n\t\t\n\t\t# Send progress to Fitbit\n\t\t#step_count = fitbit.update_progress_decimal(user, progress, session)\n\t\tlogging.info(\"Just added {} steps to {}'s account!\".format(step_count, user.username))\n\t\t\n\t\t#update percentage for today\n\t\tsave_today(user, num_completed, progress, session)\t\t\n\t\t\t\t\t\n\telif user.metaphor == TALLY:\n\t\t# TALLY - BASIC COUNT\t\n\t\tprogress = calculate_progress_tally(num_completed, event_ids)\n\t\tlogging.info(\"Today's TALLY Progress for {} is {}\".format(user, progress))\n\t\t\t\n\t\t# Send progress to Fitbit\n\t\ttoday = user.thisday()\n\t\tnew_step_count = progress - today.computed_progress\n\t\tstep_count = fitbit.update_progress_count(user, new_step_count, session)\n\t\tlogging.info(\"Just added {} steps to {}'s account!\".format(step_count, user.username))\n\t\t\n\t\t#update step count for today ( one LED per activitiy)\n\t\tsave_today(user, num_completed, step_count, session)\t\t\t\t\n\t\t\t\n\telif user.metaphor == CHARGE:\n\t\t# CHARGE - WEIGHTED COUNT\n\t\ttoday = user.thisday()\n\t\tlogging.debug(\"User {} prior count: {}, now completed {}\".format(user.username, today.complete_count, num_completed ))\n\t\tif today.complete_count < num_completed:\n\t\t\tprogress = calculate_progress_charge(num_completed, event_ids, session) \n\t\t\tprogress_change = progress - today.computed_progress\n\t\t\tlogging.info(\"Today's CHARGE Progress update for {} is {}\".format(user, progress_change))\n\t\t\t\n\t\t\t# Send progress change to Fitbit\n\t\t\t#step_count = fitbit.update_progress_count(user, progress_change, session)\n\t\t\tstep_count = progress\n\t\t\tlogging.info(\"Just added {} steps to {}'s account!\".format(step_count, user.username))\n\n\t\t\t#update step count for today ( weighted*.10 per activity)\n\t\t\tsave_today(user, num_completed, step_count, session)\n\t\telse:\n\t\t\tstep_count = fitbit.get_dashboard_state(user)\n\t\t\tprogress = user.thisday().computed_progress\n\treturn progress, step_count\n\ndef calculate_progress_plan(num_events_completed, event_id_list):\n\tprogress = num_events_completed / float(len(event_id_list))\n\treturn progress #percentage\n\t\t\ndef calculate_progress_weight(event_id_list, session):\n\ttarget= 0\n\tcompleted=0\n\tevs = session.query(Event).filter(Event.eid.in_(event_id_list)).all()\n\tfor ev in evs:\n\t\tweight = ev.activity.weight\n\t\ttarget += weight\n\t\tif ev.completed:\n\t\t\tcompleted += weight\n\n\tprogress = completed / float(target)\n\t#logging.info(\"weighted progress: {} / {} = {}\".format(completed, target, progress ))\n\treturn progress #percentage\n\ndef calculate_progress_tally(num_completed, event_id_list):\n\tmax_steps = 100000\n\tactivity_value = max_steps / 4 # number of LED's visible\n\treturn num_completed * activity_value\t\t#raw number of steps\n\ndef calculate_progress_charge(num_completed, event_id_list, session):\n\tmax_steps = fitbit.DEFAULT_GOAL\n\tactivity_value = fitbit.STEPS_PER_POINT\n\ttotal = 0\n\t\t\n\tevs = session.query(Event).filter(Event.eid.in_(event_id_list)).all()\n\tfor ev in evs:\n\t\tweight = ev.activity.weight\n\t\tif ev.completed:\n\t\t\tweighted_value = activity_value * weight\n\t\t\ttotal += weighted_value\n\treturn total\t\n\n\ndef create_today(user, checkin_count, today_progress, session):\n\t'''\n\ton FIRST Poll, create a new DAY for the user, save the calculated checkin count and progress \n\t'''\n\tthisDay = user.thisday() #prevent duplicates\n\tif thisDay is None:\n\t\tthisDay = Day(user_id=user.id, date=datetime.now().date(), computed_progress=today_progress, complete_count=checkin_count)\t\n\t\tsession.add(thisDay)\n\t\tsession.commit()\n\telse:\n\t\tthisDay.computed_progress = today_progress\n\t\tthisDay.complete_count = checkin_count\n\t\t\n\t#add day id to each event in DB for better searchability\n\ttoday_events = weconnect.get_events_for_user(user, session)\n\tfor ev in today_events:\n\t\tev.day_id = thisDay.id\n\n\tlogging.info(\"Creating this day: {}\".format(thisDay))\t\n\ttry:\t\t\n\t\tsession.commit()\n\texcept:\n\t\tsession.rollback()\n\t\ndef save_today(user, checkin_count, today_progress, session):\n\t'''\n\ton Poll, save the calculated checkin count and progress \n\t'''\n\tthisDay = user.thisday()\n\t#only update if there has been a change\n\tif thisDay.complete_count < checkin_count:\t\n\t\tthisDay.computed_progress = today_progress \n\t\tthisDay.complete_count = checkin_count\n\tsession.commit()\n\n\ndef printout(user, timestamp):\n\t# timestamp, username, metaphor, day.complete_count, day.computed_progress, list of activities and weights, \n\tlogging.debug(\"printing...\")\n\tfile_dict = {}\n\tfile_dict[\"timestamp\"] = timestamp\n\tfile_dict[\"user\"] = user.username\n\tfile_dict[\"metaphor\"] = user.metaphor\n\t\n\tday = user.thisday()\n\tfile_dict[\"checkins\"] = day.complete_count\n\tfile_dict[\"computed_progress\"] = day.computed_progress\n\t\n\tact_list = []\n\tactivities = user.activities.all()\n\tfor a in activities:\n\t\tact = (a.name, a.weight)\n\t\tact_list.append(act)\n\tfile_dict[\"activities\"] = act_list\n\twith open(\"log.csv\", \"a\", newline='') as file:\n\t\tfieldnames = file_dict.keys()\n\t\twriter = csv.DictWriter(file, fieldnames=fieldnames, quotechar='\"', quoting=csv.QUOTE_NONNUMERIC)\n\t\twriter.writerow(file_dict)\n\t\t\n\nif __name__ == \"__main__\":\n\t\t\n\tif len(sys.argv) == 1:\n\t\tpoll_and_save()\n\t\n\telif int(sys.argv[1]) == 0:\n\t\tlogging.info(\"Initiating first poll of the day...\")\n\t\tpoll_and_save()\n\telif int(sys.argv[1]) == 1:\n\t\tlogging.info(\"Initiating update poll at {}\".format(datetime.now()))\n\t\tpoll_and_update()\n\n\t#debug option\n\telif int(sys.argv[1]) == 3:\n\t\tresult = calculate_progress_plan( 3, [1,2,3,4,5])\n\t\tprint(result)\n\t\tresult = calculate_progress_tally(3, [1,2,3,4,5])\n\t\tprint(result)\n\n","repo_name":"jazzij/powertoken","sub_path":"background/polling.py","file_name":"polling.py","file_ext":"py","file_size_in_byte":10462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"860244331","text":"from numpy import *\nimport numpy as np\nA=1.00*random.rand(1000,1000) #随机定义A\nX=1.00*np.ones((1000,1)) #定义初始全1的X\nb=1.00*np.dot(A,X) #生成b\ndef ACG(beta=1000000):\n l=0.00\n xs=X\n x0=2.00*np.ones((1000,1)) #定义初始x\n y0=x0 #定义初始y\n xs=x0 #初始化x\n ys=y0 #初始化y\n yq=y0\n count=0 #迭代计数\n for i in range (1000):\n l=(1+np.sqrt(1+4*l**2))/2 #定义拉姆达\n lj=(1+np.sqrt(1+4*l**2))/2 #更新拉姆达\n gama=(1-l)/lj #得到伽马值\n w=(np.dot(np.transpose(np.dot(np.transpose(A),A)),xs)-np.dot(np.transpose(A),b))/beta #实时梯度\n print(\"x的更新后的值:\")\n print(xs)\n ys=xs-w #更新y的值\n xq=xs #保留这次x值,为下次迭代判断使用\n xs=(1-gama)*ys+gama*yq #根据公式,更新x的值\n yq=ys # 保留这次y值,为下次迭代时作为先前值使用\n count+=1\n if(np.linalg.norm(xs-xq)<=0.001):\n break\n if beta==0: #分母beta不可为0\n break\n if gama>0: #伽马不能大于0\n break\n if np.linalg.norm(np.dot(np.dot(np.transpose(A), A), xs) - np.dot(np.transpose(A), b))<0.001:\n #终止条件,梯度二范数小于0.01\n break\n print(\"FInal!!!!\")\n print(xs) #输出结果最优点\n print(\"迭代次数:%d\"%count)\nif __name__==\"__main__\":\n print(\"Start ACG!\")\n ACG(200000) #开始测试\n","repo_name":"trafficker/8","sub_path":"ACG.py","file_name":"ACG.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35378851653","text":"import pandas as pd\nimport numpy as np\nfrom tensorflow import keras\n\n# Load model\nmodel = keras.models.load_model('model.h5')\n\n# Load test data\ndf_test = pd.read_csv('data/test.csv')\nx_test = df_test.to_numpy()\nx_test = x_test.reshape(df_test.shape[0], 28, 28, 1)\nx_test = x_test.astype('float32')\nx_test /= 255\n\npredictions = model.predict(x_test)\ntest_result = []\nfor i, p in enumerate(predictions):\n test_result.append([int(i+1), int(np.argmax(p))])\n\nnp.savetxt(\n 'data/testResult.csv',\n test_result,\n header='ImageId,Label',\n comments='',\n delimiter=',',\n fmt='%1.1i'\n)\n\n","repo_name":"ThierryBC-24/Digits-Recognizer","sub_path":"Model/model_testing.py","file_name":"model_testing.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29121386957","text":"import pandas as pd\nfrom colorama import Fore, Back\n\nfrom hygia import PreProcessData\nfrom hygia import FeatureEngineering\nfrom hygia import RandomForestModel\nfrom hygia import AnnotateData\n\nfrom hygia.parser.model_parser import ModelParser\nfrom hygia.parser.YAML_parser import YAMLParser\nfrom hygia.parser.pre_processing_parser import PreProcessingParser\nfrom hygia.parser.feature_engineering_parser import FeatureEngineeringParser\nfrom hygia.parser.annotate_data_parser import AnnotateDataParser\n \ndef run_with_config(yaml_path: str):\n initialParser = YAMLParser\n \n preProcessingParser = PreProcessingParser\n preProcessData = PreProcessData()\n \n featureEngineeringParser = FeatureEngineeringParser\n featureEngineering = FeatureEngineering\n \n annotateDataParser = AnnotateDataParser\n annotateData = AnnotateData\n \n modelParser = ModelParser\n randomForestModel = RandomForestModel\n\n config = initialParser(yaml_path).parse()\n \n for data in config['data_path']:\n # Load csv\n separator = config['separator']\n encoding = config['encoding']\n engine = config['engine']\n nrows = config['nrows']\n df = pd.read_csv(data, sep=separator, engine=engine, encoding=encoding, nrows=nrows)\n \n results = pd.DataFrame()\n print(f'{Fore.MAGENTA}------ HYGIA ------{Fore.WHITE}')\n \n # Pre processing \n print(30*'-')\n print(f'{Back.WHITE }{Fore.BLACK}Running PRE PROCESSING...{Back.BLACK }{Fore.WHITE}')\n \n columns_name = list(df.columns)\n \n columns_set, columns_name = preProcessingParser(columns_name).parse(config['pre_processing'])\n for columns in columns_set: \n for key, value in columns.items():\n df = preProcessData.pre_process_data(df, value, key)\n \n # Feature engineering\n print(30*'-')\n print(f'{Back.WHITE }{Fore.BLACK}Running FEATURE ENGINEERING...{Back.BLACK }{Fore.WHITE}')\n \n features_configs, columns_alias = featureEngineeringParser(columns_name).parse(config['feature_engineering'])\n \n for feature_config in features_configs:\n feature_columns = feature_config['columns']\n lang = feature_config['data_lang']\n \n for column in feature_columns:\n dimensions = feature_config['dimensions'][column]\n df = preProcessData.handle_nulls(df, column)\n df = featureEngineering(lang=lang, dimensions=dimensions).extract_features(df, column)\n \n # Annotate Data\n print(30*'-')\n print(f'{Back.WHITE }{Fore.BLACK}Running ANNOTATE DATA...{Back.BLACK }{Fore.WHITE}')\n\n annotate_data_configs = annotateDataParser().parse(config['annotate_data'])\n \n for annotate_data_config in annotate_data_configs:\n columns = annotate_data_config['columns']\n for column in columns: \n thresholds = annotate_data_config['thresholds']\n df = annotateData().annotate_data(df, column, thresholds) \n \n # Model\n print(30*'-')\n print(f'{Back.WHITE }{Fore.BLACK}Running MODEL (KEYBOARD-SMASH)...{Back.BLACK }{Fore.WHITE}')\n \n model_configs = modelParser(columns_alias).parse(config['model'])\n for model_config in model_configs:\n model_columns = model_config['columns']\n trained_model_file = model_config['trained_model_file']\n \n for column in model_columns:\n features_columns = [col for col in df if (col.startswith('feature_ks') or col.startswith('feature_we') or col.startswith('feature_re')) and col.endswith(column)]\n \n randomForest = randomForestModel(trained_model_file)\n # randomForest.train_and_get_scores(df, column, features_columns)\n \n results[column] = df[column]\n results[f'prediction_{column}'] = randomForest.predict(df[features_columns].values)\n \n if(config['output_folder']):\n output_folder = config['output_folder']\n results[results[f'prediction_{column}'] == 1] \\\n .loc[:, results.columns.str.endswith(column)] \\\n .to_csv(f'{output_folder}prediction_{column}.csv')\n print(30*'-')\n print(f'{Fore.GREEN}exporting to {output_folder}prediction_{column}.csv{Fore.WHITE}')\n \n del config['pre_processing']\n del config['feature_engineering']\n del config['annotate_data']\n del config['model']\n \n return results","repo_name":"hygia-org/hygia","sub_path":"hygia/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4746,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"39127896499","text":"import os\nimport openai\nimport urllib.request\n\napi_key = os.environ[\"api_key\"]\nopenai.api_key = api_key\n\n\nmodel_engine = \"text-davinci-003\"\n\nprompt = \"chatGPT에 대해서 설명해줘\"\n\ncompletion = openai.Completion.create(\n engine=model_engine,\n prompt=prompt,\n max_tokens=1024,\n n=1,\n stop=None,\n temperature=0.5,\n )\nprint(completion.choices[0].text)","repo_name":"agape1225/lectures","sub_path":"python web framework/week11/[11]chatgpt/completion.py","file_name":"completion.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18401605292","text":"from Leetcode.OtherAlgorithm.ListNode import *\n\n\ndef find_first_common_node(head1, head2):\n if not head1 or not head2:\n return -1\n\n p1, p2 = head1, head2\n l1, l2 = 0, 0\n while p1:\n p1 = p1.next\n l1 += 1\n while p2:\n p2 = p2.next\n l2 += 1\n\n print(l1, l2)\n p = head1 if l1 > l2 else head2\n q = head2 if l2 <= l1 else head1\n for i in range(abs(l1 - l2)):\n p = p.next\n\n while p and q and p != q:\n if p == q:\n return q.data\n p = p.next\n q = q.next\n\n return -1\n\n\nif __name__ == '__main__':\n # test\n list1_head = ListNode(None)\n for i in range(0, 14, 2):\n p = ListNode(i)\n add_node(list1_head, p)\n\n list2_head = ListNode(None)\n for i in range(1, 11, 3):\n p = ListNode(i)\n add_node(list2_head, p)\n\n p = ListNode(13)\n add_node(list1_head, p)\n add_node(list2_head, p)\n\n for i in range(0, 4, 2):\n p = ListNode(i)\n add_node(list1_head, p)\n\n print_list(list1_head)\n print_list(list2_head)\n\n print(find_first_common_node(list1_head.next, list2_head.next))","repo_name":"wbq9224/Leetcode_Python","sub_path":"AimOffer/FirstCommonNodesInLists.py","file_name":"FirstCommonNodesInLists.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33563540039","text":"# Initialize the Tic-Tac-Toe board\nboard = [\" \" for _ in range(9)]\n\n# Function to display the Tic-Tac-Toe board\ndef display_board(board):\n print(f\"{board[0]} | {board[1]} | {board[2]}\")\n print(\"---------\")\n print(f\"{board[3]} | {board[4]} | {board[5]}\")\n print(\"---------\")\n print(f\"{board[6]} | {board[7]} | {board[8]}\")\n\n# Function to check if a player has won\ndef check_win(board, player):\n # Check rows, columns, and diagonals\n win_conditions = [\n [0, 1, 2], [3, 4, 5], [6, 7, 8], # Rows\n [0, 3, 6], [1, 4, 7], [2, 5, 8], # Columns\n [0, 4, 8], [2, 4, 6] # Diagonals\n ]\n \n for condition in win_conditions:\n if all(board[i] == player for i in condition):\n return True\n return False\n\n# Function to check if the board is full\ndef is_board_full(board):\n return \" \" not in board\n\n# Main game loop\ncurrent_player = \"X\"\n\nwhile True:\n display_board(board)\n move = int(input(f\"Player {current_player}, enter your move (1-9): \")) - 1\n\n if board[move] == \" \":\n board[move] = current_player\n if check_win(board, current_player):\n display_board(board)\n print(f\"Player {current_player} wins!\")\n break\n elif is_board_full(board):\n display_board(board)\n print(\"It's a tie!\")\n break\n current_player = \"O\" if current_player == \"X\" else \"X\"\n else:\n print(\"Invalid move. Try again.\")\n\n","repo_name":"Agniva144/Hackob","sub_path":"Program's_Contributed_By_Contributors/Python_Programs/TicTacToe.py","file_name":"TicTacToe.py","file_ext":"py","file_size_in_byte":1471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34408216186","text":"import math\n\nN, M, K = map(int, input().split())\n\ntmp = 0\nif N//2 < M :\n tmp = N//2\nelse :\n tmp = M\n\nN -= tmp*2\nM -= tmp\n\nif (M+N) > K :\n print(tmp)\nelse :\n K -= (M+N)\n print(tmp - math.ceil(K/3))\n","repo_name":"KimYeonAh/Backjoon","sub_path":"python/Boj_2875.py","file_name":"Boj_2875.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24291026737","text":"#encoding=utf-8\nimport redis\nfrom subprocess import Popen, PIPE\n\nredis_server_info = {\n\t\"host\" : \"localhost\",\n\t\"port\" : 6379,\n\t\"db\" : 0\n}\n\nredis_cli_info = {\n\t\"cmd\" : \"redis-cli\"\n}\n\nr = redis.Redis(host=redis_server_info[\"host\"],\n\t\t\t\tport=redis_server_info[\"port\"],\n\t\t\t\tdb=redis_server_info[\"db\"])\n\n# run redis cli from console\ndef execute_redis_cli(cmd):\n\tredis_cli_exe = redis_cli_info[\"cmd\"]\n\tparams = [redis_cli_exe] + cmd.split(' ')\n\toutput = Popen(params, stdout=PIPE, stderr=PIPE)\n\tif output is not None:\n\t\tstdout = output.stdout.read()\n\t\terr = output.stderr.read()\n\t\toutput.close()\n\t\treturn stdout, err\n\treturn None, None","repo_name":"c4pt0r/redmoon","sub_path":"redismon.py","file_name":"redismon.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11359787850","text":"__author__ = 'alexto@google.com (Alexis O. Torres)'\n\nimport json\nimport webapp2\n\nfrom common.handlers import base\nfrom models import screenshots\nfrom utils import screenshots_util\n\n\nclass UploadHandler(base.BaseHandler):\n \"\"\"Class for handling uploads.\"\"\"\n\n def post(self):\n \"\"\"Handles uploading a new screenshot.\"\"\"\n # Required params.\n data = self.GetRequiredParameter('screenshot_data')\n source = self.GetRequiredParameter('source')\n project = self.GetRequiredParameter('project')\n # Optional params.\n source_id = self.GetOptionalParameter('source_id', '')\n caption = self.GetOptionalParameter('caption', None)\n details = self.GetOptionalParameter('details', None)\n labels = self.GetOptionalParameter('labels', None)\n if labels:\n # If present, labels is a JSON encoded list of strings,\n # decode it.\n labels = json.loads(labels)\n\n screenshot = screenshots.Add(data=data, source=source, source_id=source_id,\n project=project, caption=caption,\n details=details, labels=labels)\n\n screenshot_id = screenshot.key().id()\n screenshot_url = screenshots_util.RetrievalUrl(\n self.request.url, screenshot_id)\n self.response.out.write(\n json.dumps({'id': screenshot_id, 'url': screenshot_url}))\n\n\nclass GetHandler(base.BaseHandler):\n \"\"\"Class for handling fetching a screenshot.\"\"\"\n\n def get(self):\n \"\"\"Handles retrieving an existing screenshot.\"\"\"\n screenshot_id = self.GetRequiredParameter('id')\n screenshot = screenshots.GetById(screenshot_id=screenshot_id)\n if not screenshot:\n self.error(400)\n return\n\n self.response.headers['Content-Type'] = 'image/png'\n self.response.out.write(screenshot.data)\n\n\nclass SearchHandler(base.BaseHandler):\n \"\"\"Class for handling searching for a screenshot.\"\"\"\n\n def get(self):\n \"\"\"Handler retrieving a list of screenshots.\"\"\"\n # Required params.\n source = self.GetRequiredParameter('source')\n # Optional params.\n source_id = self.GetOptionalParameter('source_id', None)\n project = self.GetOptionalParameter('project', None)\n limit = int(self.GetOptionalParameter('max', screenshots.DEFAULT_LIMIT))\n\n matches = screenshots.GetScreenshots(source=source,\n source_id=source_id,\n project=project,\n limit=limit)\n request_url = self.request.url\n result = [screenshots_util.RetrievalUrl(request_url, curr.key().id())\n for curr in matches]\n self.response.out.write(json.dumps(result))\n\n\napp = webapp2.WSGIApplication(\n [('/screenshots/upload', UploadHandler),\n ('/screenshots/fetch', GetHandler),\n ('/screenshots/search', SearchHandler)\n ], debug=True)\n","repo_name":"vinhlh/bite-project","sub_path":"server/handlers/screenshots.py","file_name":"screenshots.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"43747675606","text":"from LOGS.log import Log\nfrom .config import config\nimport psycopg2\nclass DBConnection:\n conn = None\n cur = None\n _instance = None\n\n def __new__(cls):\n if not cls._instance:\n cls._instance = super(DBConnection,cls).__new__(cls)\n return cls._instance\n \n def connect_db(self):\n try:\n params = config()\n self.conn = psycopg2.connect(**params)\n self.cur = self.conn.cursor()\n Log.log_connection(True)\n return True\n except psycopg2.Error as e:\n print(f'{e}')\n return False\n \n def create_table(self,name,table):\n try:\n self.cur.execute(f\"create table if not exists {name}(tid serial primary key,{table}) ;\")\n self.conn.commit()\n Log.log_read()\n return True\n except psycopg2.Error as e:\n print(f'{e}')\n return False\n \n #inserting data through execute\n def insert_data(self,name,field,data):\n try:\n self.cur.execute(f\"insert into {name} {field} values ({data});\")\n self.conn.commit()\n Log.log_insert()\n except psycopg2.Error as e:\n print(f'{e}')\n\n #inserting data through executemany\n def insert_data_executemany(self,insert_query,tup):\n try:\n self.cur.executemany(f\"{insert_query}\", tup)\n self.conn.commit()\n Log.log_insert()\n except psycopg2.Error as e:\n print(f'{e}')\n #inserting data through mogrify\n def insert_data_mogrify(self,table_name,field,x,tup):\n try:\n mogrify_values = ','.join(self.cur.mogrify(x,i).decode('utf-8') for i in tup)\n self.cur.execute(f\"insert into {table_name} {field} values {mogrify_values}\")\n self.conn.commit()\n Log.log_insert()\n except psycopg2.Error as e:\n print(f'{e}')\n def close_db(self):\n try:\n self.conn.close()\n Log.log_close(True)\n except psycopg2.Error as e:\n print(f'{e}')\n","repo_name":"prakashyadava/Task1_V2","sub_path":"Connections/allConn.py","file_name":"allConn.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"59810562","text":"from django.shortcuts import render\nfrom .models import Currency\nfrom datetime import datetime\nfrom django.http import JsonResponse\nfrom .models import Currency\nimport decimal\n\n\ndef exchange_rate(request):\n currency = Currency.objects.all()\n time = datetime.now()\n context = {\n 'Currency': currency,\n 'time': time\n }\n return render(request, 'currency_form.html', context)\n\n\ndef convert_currencies(request):\n if request.is_ajax():\n currency_from = Currency.objects.get(pk=request.POST.get('currency_from'))\n currency_to = Currency.objects.get(pk=request.POST.get('currency_to'))\n currency_value = request.POST.get(\"currency_value\")\n result = decimal.Decimal(float(currency_value)) * (currency_from.course / currency_to.course)\n result = \"{0:.{1}f}\".format(result, 2)\n data = {\n \"result\": result,\n }\n return JsonResponse(data)\n","repo_name":"Nikolay89Veselinov/Python-Django","sub_path":"contrib/currencies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24451816025","text":"from django.db import models\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom apps.authentication.models import User\n\n\nclass Comment(models.Model):\n \"\"\" A class to allow users to comment on questions and\n eventually answers \n \"\"\"\n comment = models.TextField()\n author = models.ForeignKey(User, on_delete=models.CASCADE)\n commented_at = models.DateTimeField(auto_now_add=True)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n def __str__(self):\n return f\"{self.author}\"\n\n class Meta:\n ordering = ['-commented_at']\n","repo_name":"armstrongsouljah/StackOverflow-Clone","sub_path":"apps/comments/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4913635941","text":"from typing import List\n\n\nclass Solution:\n def combinationSum(self, candidates: List[int], target: int) -> List[List[int]]:\n result = []\n\n def backtrack(t, cs, path):\n for i in range(len(cs)):\n if cs[i] > t:\n continue\n\n new_path = path.copy()\n new_path.append(cs[i])\n if cs[i] == t:\n result.append(new_path)\n else:\n backtrack(t - cs[i], cs[i:], new_path)\n\n backtrack(target, candidates, [])\n\n return result","repo_name":"fzdy1914/leetcode","sub_path":"backtrack/39-combination-sum.py","file_name":"39-combination-sum.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2890505694","text":"import re\r\ndef oh():\r\n lines=int()\r\n with open ('text.txt', 'r', encoding='utf-8') as first:\r\n old_text=first.readlines()\r\n for line in old_text:\r\n if '' in line:\r\n break\r\n else:\r\n lines+=1\r\n with open ('endtext.txt', 'w', encoding='utf-8') as second:\r\n lines1=str(lines)\r\n second.write(lines1)\r\ndef oops():\r\n slov={}\r\n with open ('text.txt', 'r', encoding='utf-8') as first:\r\n old_text=first.readlines()\r\n for line in old_text:\r\n if \" 0:\n time.sleep(wait)\n _open.previous = current + wait\n else:\n _open.previous = current\n # Remove None values from the parameters\n for key, value in params.items():\n if value is None:\n del params[key]\n # Tell Entrez that we are using Biopython\n if not \"tool\" in params:\n params[\"tool\"] = \"biopython\"\n # Tell Entrez who we are\n if not \"email\" in params:\n if email!=None:\n params[\"email\"] = email\n # Open a handle to Entrez.\n options = urllib.urlencode(params, doseq=True)\n cgi += \"?\" + options\n handle = urllib.urlopen(cgi)\n\n # Wrap the handle inside an UndoHandle.\n uhandle = File.UndoHandle(handle)\n\n # Check for errors in the first 7 lines.\n # This is kind of ugly.\n lines = []\n for i in range(7):\n lines.append(uhandle.readline())\n for i in range(6, -1, -1):\n uhandle.saveline(lines[i])\n data = ''.join(lines)\n \n if \"500 Proxy Error\" in data:\n # Sometimes Entrez returns a Proxy Error instead of results\n raise IOError(\"500 Proxy Error (NCBI busy?)\")\n elif \"502 Proxy Error\" in data:\n raise IOError(\"502 Proxy Error (NCBI busy?)\")\n elif \"WWW Error 500 Diagnostic\" in data:\n raise IOError(\"WWW Error 500 Diagnostic (NCBI busy?)\")\n elif \"Service unavailable!\" in data :\n #Probably later in the file it will say \"Error 503\"\n raise IOError(\"Service unavailable!\")\n elif \"Bad Gateway!\" in data :\n #Probably later in the file it will say:\n # \"The proxy server received an invalid\n # response from an upstream server.\"\n raise IOError(\"Bad Gateway!\")\n elif data.startswith(\"Error:\") :\n #e.g. 'Error: Your session has expired. Please repeat your search.\\n'\n raise IOError(data.strip())\n elif data.startswith(\"The resource is temporarily unavailable\") :\n #This can occur with an invalid query_key\n #Perhaps this should be a ValueError?\n raise IOError(\"The resource is temporarily unavailable\")\n elif data.startswith(\"download dataset is empty\") :\n #This can occur when omit the identifier, or the WebEnv and query_key\n #Perhaps this should be a ValueError?\n raise IOError(\"download dataset is empty\")\n elif data[:5] == \"ERROR\":\n # XXX Possible bug here, because I don't know whether this really\n # occurs on the first line. I need to check this!\n raise IOError(\"ERROR, possibly because id not available?\")\n # Should I check for 404? timeout? etc?\n return uhandle\n\n_open.previous = 0\n","repo_name":"chapmanb/biosqlweb","sub_path":"app/lib/python/biopython-1.50-py2.5-freebsd-7.1-PRERELEASE-i386.egg/Bio/Entrez/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":12676,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"19201468721","text":"import os\n\nfolder = r'/path/to/file/'\ncount = 0\nfor file_name in os.listdir(folder):\n source = folder + file_name\n destination = folder + str(count) + \".tif\"\n os.rename(source, destination)\n count += 1\n\nprint('All Files Renamed')\n","repo_name":"BNMEZR/immunofluorescence_data","sub_path":"change-name.py","file_name":"change-name.py","file_ext":"py","file_size_in_byte":242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7798916417","text":"import sys\nsys.stdin = open('sample_input.txt')\n\nT = int(input())\nfor tc in range(1, T+1):\n N = int(input())\n a = list(map(int, input().split()))\n # 버블소트를 이용해 정렬을 해서\n for end in range(len(a)-1, 0, -1):\n for i in range(0, end):\n if a[i] > a[i+1]:\n a[i], a[i+1] = a[i+1], a[i]\n\n result = a[-1] - a[0] # 정렬된 리스트의 가장 마지막 값에서 앞의 값을 빼면\n\n print('#{} {}'.format(tc, result))\n","repo_name":"asooso1/ssafy_algorithm","sub_path":"0810/한채은/4828_min_max/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43024595537","text":"\nfrom accounts.api.permissions import IsAdmin\nfrom frontend_views.api.seralizers import FrontEndMenuLinkSerializer, FrontEndMenuLinkTableModelSerializer\nfrom core.api.pagination import StandardResultPagination\nfrom rest_framework import (status, permissions, generics,)\nfrom frontend_views.models import FrontEndMenuLink\nfrom rest_framework.response import Response\n# from rest_framework.views import APIView\nfrom accounts.models import UserProfile\nfrom django.contrib.contenttypes.models import ContentType\n\n\nfrom rest_framework import generics\n\nclass FrontEndMenuLinkListAPIView(generics.ListAPIView):\n permission_classes = (permissions.IsAuthenticated, IsAdmin,)\n serializer_class = FrontEndMenuLinkTableModelSerializer\n pagination_class = StandardResultPagination\n\n def get_queryset(self):\n return FrontEndMenuLink.objects.all()\n\n def get_serializer_context(self, *args, **kwargs):\n context = super(FrontEndMenuLinkListAPIView, self).get_serializer_context(*args, **kwargs)\n context['request'] = self.request\n return context\n\n\nclass FrontEndMenuLinkAPIView(generics.ListAPIView):\n permission_classes = (permissions.IsAuthenticated, IsAdmin,)\n serializer_class = FrontEndMenuLinkSerializer\n\n queryset = FrontEndMenuLink.objects.all()\n\n def get_serializer_context(self, *args, **kwargs):\n context = super(FrontEndMenuLinkAPIView, self).get_serializer_context(*args, **kwargs)\n context['request'] = self.request\n return context\n\n\nclass ObjectsFrontEndMenuLinkListAPIView(generics.ListAPIView):\n permission_classes = (permissions.IsAuthenticated, IsAdmin,)\n serializer_class = FrontEndMenuLinkSerializer\n\n def get(self, request):\n links = FrontEndMenuLink.objects.all()\n return Response(self.serializer_class(links, many=True).data, status=status.HTTP_200_OK)\n\n\n def post(self, request):\n data = request.data\n model_type = data.pop('content_type')\n object_id = data.pop('object_id')\n app_label = data.pop('app_label')\n\n if model_type == 'userprofile':\n model_type = 'user'\n profile = UserProfile.objects.filter(pk=object_id)\n if profile.exists():\n object_id = profile.first().user.id\n\n model_qs = ContentType.objects.filter(app_label=app_label, model=model_type)\n\n if model_qs.exists():\n Model = model_qs.first().model_class()\n record_qs = Model.objects.filter(pk=object_id)\n\n if record_qs.exists():\n record = record_qs.first()\n\n incomin_frontend_views = []\n record_frontend_views = record.frontend_views.all()\n\n existed_frontend_views_ids = [p.id for p in record_frontend_views]\n data_keys = data.keys()\n\n for key in data_keys:\n if data.get(key) in [None, '']:\n continue\n for p_id in data.get(key):\n if not p_id in incomin_frontend_views:\n incomin_frontend_views.append(int(p_id))\n\n for p_id in incomin_frontend_views:\n if p_id not in existed_frontend_views_ids:\n perm = FrontEndMenuLink.objects.filter(pk=p_id)\n record.frontend_views.add(perm.first())\n\n for p in record_frontend_views:\n if not p.id in incomin_frontend_views:\n record.frontend_views.remove(p) \n\n return_data = self.serializer_class(record.frontend_views.all(), many=True)\n return Response(return_data.data, status=status.HTTP_200_OK)\n return Response({\"error\": \"We could not identify the record with id \" + object_id}, status=status.HTTP_200_OK)\n return Response({\"error\": \"No such object \" + model_type}, status=status.HTTP_200_OK)\n\n","repo_name":"wisnercelucus/lerecul-backend","sub_path":"frontend_views/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33406355204","text":"from ulakbus.models import User, ZamanDilimleri\nfrom zengine.lib.test_utils import BaseTestCase\n\n\nclass TestCase(BaseTestCase):\n\n def test_zaman_dilimi(self):\n usr = User.objects.get(username='ders_programi_koordinatoru_1')\n self.prepare_client('/zaman_dilimi_duzenle', user=usr)\n resp = self.client.post()\n for i in range(2):\n if i == 1:\n assert resp.json['objects'][1]['fields']['Zaman Aralığı'] == \"09:00:00-12:00:00\"\n else:\n assert resp.json['objects'][0][0] == 'Gün Dilimi'\n\n resp = self.client.post(cmd='degistir', zaman_dilimi='XUM8nQZv1eJ6cgyDXnvpVG9BmcA')\n\n assert resp.json['forms']['model']['baslangic_saat'] == '09'\n\n if i == 1:\n zaman_dilimi_form = {\n 'baslangic_saat': '10',\n 'baslangic_dakika': '00',\n 'bitis_saat': '12',\n 'bitis_dakika': '00',\n 'gun_dilimi': 'Sabah',\n 'kaydet': 1\n }\n resp = self.client.post(cmd='kayit', form=zaman_dilimi_form)\n\n assert resp.json['msgbox']['msg'] == \"Kaydınız başarıyla gerçekleşti\"\n else:\n resp = self.client.post(cmd='vazgec')\n\n resp = self.client.post(cmd='tamamla', form={'tamamla': 1})\n\n assert resp.json['msgbox']['title'] == 'Kayıt İşleminiz Tamamlanmıştır!'\n\n zd = ZamanDilimleri.objects.get('XUM8nQZv1eJ6cgyDXnvpVG9BmcA')\n\n assert zd.baslama_saat == '10'\n\n zd.baslama_saat = '09'\n zd.save()\n\n assert zd.baslama_saat == '09'\n","repo_name":"zetaops/ulakbus","sub_path":"tests/test_zaman_dilimi.py","file_name":"test_zaman_dilimi.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"tr","doc_type":"code","stars":101,"dataset":"github-code","pt":"48"} +{"seq_id":"39095331443","text":"import torch\nfrom torch import nn, Tensor\nfrom typing import Union, Tuple, List, Iterable, Dict\nimport torch.nn.functional as F\nfrom enum import Enum\nfrom ..SentenceTransformer import SentenceTransformer\n\nclass TripletDistanceMetric(Enum):\n \"\"\"\n The metric for the triplet loss\n \"\"\"\n COSINE = lambda x, y: 1 - F.cosine_similarity(x, y)\n EUCLIDEAN = lambda x, y: F.pairwise_distance(x, y, p=2)\n MANHATTAN = lambda x, y: F.pairwise_distance(x, y, p=1)\n\nclass TripletLoss(nn.Module):\n \"\"\"\n This class implements triplet loss. Given a triplet of (anchor, positive, negative),\n the loss minimizes the distance between anchor and positive while it maximizes the distance\n between anchor and negative. It compute the following loss function:\n\n loss = max(||anchor - positive|| - ||anchor - negative|| + margin, 0).\n\n Margin is an important hyperparameter and needs to be tuned respectively.\n\n For further details, see: https://en.wikipedia.org/wiki/Triplet_loss\n\n :param model: SentenceTransformerModel\n :param distance_metric: Function to compute distance between two embeddings. The class TripletDistanceMetric contains common distance metrices that can be used.\n :param triplet_margin: The negative should be at least this much further away from the anchor than the positive.\n\n Example::\n\n from sentence_transformers import SentenceTransformer, SentencesDataset, LoggingHandler, losses\n from sentence_transformers.readers import InputExample\n\n model = SentenceTransformer('distilbert-base-nli-mean-tokens')\n train_examples = [InputExample(texts=['Anchor 1', 'Positive 1', 'Negative 1']),\n InputExample(texts=['Anchor 2', 'Positive 2', 'Negative 2'])]\n train_dataset = SentencesDataset(train_examples, model)\n train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=train_batch_size)\n train_loss = losses.TripletLoss(model=model)\n \"\"\"\n def __init__(self, model: SentenceTransformer, distance_metric=TripletDistanceMetric.EUCLIDEAN, triplet_margin: float = 5):\n super(TripletLoss, self).__init__()\n self.model = model\n self.distance_metric = distance_metric\n self.triplet_margin = triplet_margin\n\n\n def get_config_dict(self):\n distance_metric_name = self.distance_metric.__name__\n for name, value in vars(TripletDistanceMetric).items():\n if value == self.distance_metric:\n distance_metric_name = \"TripletDistanceMetric.{}\".format(name)\n break\n\n return {'distance_metric': distance_metric_name, 'triplet_margin': self.triplet_margin}\n\n def forward(self, sentence_features: Iterable[Dict[str, Tensor]], labels: Tensor):\n reps = [self.model(sentence_feature)['sentence_embedding'] for sentence_feature in sentence_features]\n\n rep_anchor, rep_pos, rep_neg = reps\n distance_pos = self.distance_metric(rep_anchor, rep_pos)\n distance_neg = self.distance_metric(rep_anchor, rep_neg)\n\n losses = F.relu(distance_pos - distance_neg + self.triplet_margin)\n return losses.mean()","repo_name":"UKPLab/sentence-transformers","sub_path":"sentence_transformers/losses/TripletLoss.py","file_name":"TripletLoss.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":12439,"dataset":"github-code","pt":"48"} +{"seq_id":"23493829016","text":"from cmd.CmdBase import CmdBase\r\nfrom module.MocFactory import MocSingleton\r\nfrom cmd.CmdTextHandle import CmdTextHandle\r\nfrom db.DbInterface import DbSingleton\r\nfrom comm.macro import *\r\nfrom log.log import logger\r\n\r\n\r\nclass AddCmd(CmdBase):\r\n def __init__(self, cmd):\r\n super(AddCmd, self).__init__(cmd)\r\n\r\n @classmethod\r\n def check_para_consist(cls, moc_obj, para_2_val):\r\n para_lst = moc_obj.get_all_para_name()\r\n if len(para_lst) != len(para_2_val):\r\n logger.error(\"para_lst[%s] len is not equal to para_2_val[%s].\" % (para_lst, para_2_val))\r\n return False\r\n\r\n for para in para_lst:\r\n if para not in para_2_val:\r\n logger.error(\"para[%s] not in para_2_val[%s].\" % (para, para_2_val))\r\n return False\r\n return True\r\n\r\n def execute(self):\r\n is_suc, moc_name, para_2_val = CmdTextHandle.parse_cmd(self.cmd)\r\n if not is_suc:\r\n logger.error(\"parse_cmd error, cmd is [%s].\" % self.cmd)\r\n return False, \"AddCmd, Error cmd format....\"\r\n\r\n moc_ins = MocSingleton.get_instance()\r\n moc_obj = moc_ins.get_moc(moc_name)\r\n if moc_obj is None:\r\n logger.error(\"can not get moc, moc_name is [%s].\" % moc_name)\r\n return False, \"AddCmd, Error cmd name....\"\r\n\r\n if not self.check_para_consist(moc_obj, para_2_val):\r\n return False, \"cmd is Add, input para should consist with module....\"\r\n\r\n ret_flag, error_info = moc_obj.pre_add_cmd_check(para_2_val)\r\n if not ret_flag:\r\n logger.error(\"pre_add_cmd_check fail, error info is [%s].\" % error_info)\r\n return ret_flag, error_info\r\n\r\n key_para_lst = moc_obj.get_key_para()\r\n key_dict = {para: val for para, val in para_2_val.items() if para in key_para_lst}\r\n para_dict = {para: val for para, val in para_2_val.items() if para not in key_para_lst}\r\n db_obj = DbSingleton.get_instance().get_db_obj()\r\n return db_obj.execute(DB_ACTION_ADD, moc_name, key_dict, para_dict)\r\n\r\n\r\n\r\n\r\n","repo_name":"Erignik/http_configure","sub_path":"app/cmd/AddCmd.py","file_name":"AddCmd.py","file_ext":"py","file_size_in_byte":2084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74673961424","text":"def alphabet_war(fight):\n d = 0\n i = 0\n left = {'w': 4, 'p': 3, 'b': 2, 's':1}\n right = {'m': 4, 'q': 3, 'd': 2, 'z':1}\n for letter in fight: \n if letter in left:\n i += left[letter]\n elif letter in right:\n d += right[letter] \n if i > d:\n return 'Left side wins!'\n elif d > i:\n return 'Right side wins!'\n else:\n return \"Let's fight again!\"","repo_name":"Jacobo24/Una_hora_al_dia","sub_path":"lucha.py","file_name":"lucha.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23919542964","text":"# 假设按照升序排序的数组在预先未知的某个点上进行了旋转。\n#\n# ( 例如,数组 [0,1,2,4,5,6,7] 可能变为 [4,5,6,7,0,1,2] )。\n#\n# 搜索一个给定的目标值,如果数组中存在这个目标值,则返回它的索引,否则返回 -1 。\n#\n# 你可以假设数组中不存在重复的元素。\n#\n# 你的算法时间复杂度必须是 O(log n) 级别。\n\nclass Solution:\n def search(self, nums: list, target: int) -> int:\n lst_len = len(nums)\n if lst_len == 0:\n return -1\n if lst_len == 1:\n if nums[0] == target:\n return 0\n else:\n return -1\n\n ptr1, ptr2 = 0, lst_len - 1\n if nums[ptr1] < nums[ptr2]:\n return self.dichotomy_get_target(nums, ptr1, ptr2, target)\n else:\n demarcation_point = self.dichotomy_get_point(nums, ptr1, ptr2) #分界点\n if demarcation_point is None:\n if target == nums[0]:\n return 0\n else:\n return -1\n else:\n left = self.dichotomy_get_target(nums, 0, demarcation_point, target)\n if left != -1:\n return left\n return self.dichotomy_get_target(nums, demarcation_point + 1, lst_len - 1, target)\n\n\n\n # print(ptr1, ptr2, nums[ptr1], nums[ptr2])\n\n\n def dichotomy_get_point(self, lst: list, start, end): #二分获得分界点\n if end - start == 1 and lst[start] > lst[end]:\n return start\n elif end - start == 1 and lst[start] == lst[end]:\n return None\n elif end - start == 1:\n return None\n else:\n midd = (end + start) // 2\n if lst[midd] > lst[start]:\n return self.dichotomy_get_point(lst, midd, end)\n elif lst[midd] < lst[start]:\n return self.dichotomy_get_point(lst, start, midd)\n else:\n left = self.dichotomy_get_point(lst, start, midd)\n if left is not None:\n return left\n right = self.dichotomy_get_point(lst, midd, end)\n if right is not None:\n return right\n else:\n return None\n def dichotomy_get_target(self, lst: list, start, end, target):\n if end - start == 1 or end == start:\n if lst[end] != target and lst[start] != target:\n return -1\n else:\n return end if lst[end] == target else start\n else:\n midd = (start + end) // 2\n if lst[midd] > target:\n return self.dichotomy_get_target(lst, start, midd, target)\n if lst[midd] < target:\n return self.dichotomy_get_target(lst, midd, end, target)\n else:\n return midd\n\n\n\n\n\nif __name__ == '__main__':\n print(Solution().search([1, 2, 3, 4, 5], 4))","repo_name":"neko-niko/leetcode","sub_path":"搜索旋转最小数组/搜索旋转最小数组.py","file_name":"搜索旋转最小数组.py","file_ext":"py","file_size_in_byte":2981,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13529560604","text":"\"\"\"Contains common utilities.\"\"\"\r\n\r\nimport abc\r\nimport collections.abc\r\nimport os\r\nimport weakref\r\nfrom copy import copy, deepcopy\r\n\r\nclass AdderWithRefCount(abc.ABC):\r\n \"\"\"Base class for classes that need addition and reference counting of added objects.\r\n\r\n If the derived class overloads the clear() method, the overload must call the base only if\r\n reference counting has been enabled.\r\n \"\"\"\r\n def __init__(self, count = True):\r\n \"\"\"Initialize attributes.\r\n\r\n counter : bool, True if reference counting is enabled\r\n \"\"\"\r\n if not isinstance(count, bool):\r\n raise TypeError(\"error: ctor param 'counter' has to be of type 'bool'\")\r\n\r\n # references to objects that were added to this object\r\n self.__froms = [] if count else None\r\n\r\n # references to objects that this object was added to\r\n self.__tos = [] if count else None\r\n\r\n @abc.abstractmethod\r\n def _op_add(self, other):\r\n \"\"\"Add an object to this one.\r\n\r\n other: subclass of this class\r\n \"\"\"\r\n\r\n @abc.abstractmethod\r\n def __eq__(self, other):\r\n \"\"\"Overloaded '==' operator.\r\n\r\n other: subclass of this class, the object to compare with\r\n\r\n return: bool or NotImplemented\r\n bool : True if the two objects are equal\r\n NotImplemented: if there's a parameter error\r\n \"\"\"\r\n\r\n @abc.abstractmethod\r\n def __bool__(self):\r\n \"\"\"Called when an object is used as a boolean in an expression.\r\n\r\n return: bool, True if object evaluates as True\r\n \"\"\"\r\n\r\n def is_add(self, other):\r\n \"\"\"Check if two objects can be added.\r\n\r\n other: subclass of this class\r\n\r\n return: bool, True if self + other is valid\r\n \"\"\"\r\n return self.__param_error(other, False)\r\n\r\n def add(self, *others):\r\n \"\"\"Add objects to this one.\r\n\r\n other: tuple of subclass of this class\r\n \"\"\"\r\n for other in others:\r\n self.__op_add(other)\r\n\r\n def clear(self):\r\n \"\"\"Clear all references of other objects to self and of self to other objects.\"\"\"\r\n if self.__froms or self.__tos:\r\n for wref in self.__froms:\r\n wref().tos.remove(weakref.ref(self))\r\n for wref in self.__tos:\r\n wref().froms.remove(weakref.ref(self))\r\n self.__froms.clear()\r\n self.__tos.clear()\r\n\r\n def __add__(self, other):\r\n \"\"\"Overloaded '+' operator.\r\n\r\n other: subclass of this class, the object to add from\r\n\r\n return: see __op_add()\r\n \"\"\"\r\n return self.__op_add(other, True)\r\n\r\n def __radd__(self, other):\r\n \"\"\"Overloaded '+' operator.\r\n\r\n Called when '__add__(self, other)' fails because 'self' is not a subclass of this class.\r\n This call results to '__add__(other, self)' where 'other' is a subclass of this class.\r\n This method is added to have better error messaging.\r\n\r\n other: subclass of this class\r\n\r\n return: see __add__()\r\n \"\"\"\r\n return self.__add__(other)\r\n\r\n def __iadd__(self, other):\r\n \"\"\"Overloaded '+=' operator.\r\n\r\n other: subclass of this class, the object to add from\r\n\r\n return: see __op_add()\r\n \"\"\"\r\n return self.__op_add(other)\r\n\r\n def __deepcopy__(self, memo):\r\n \"\"\"Overloaded method of the standard library copy.deepcopy().\r\n\r\n When an object with references is copied custom behavior is required to update the\r\n references. Also, if references are not shallow copied then the program never ends and\r\n continues to consume more and more memory. On my system 70% of 32GB!\r\n\r\n The code was taken from\r\n\r\n https://stackoverflow.com/questions/1500718/how-to-override-the-copy-deepcopy-operations-for-a-python-object/24621200#24621200\r\n\r\n based on Anthony Hatchkins solution.\r\n\r\n memo: dict(int, any)\r\n int: id(obj)\r\n any: type(obj)\r\n\r\n return: subclass of this class, a newly constructed object\r\n \"\"\"\r\n obj = memo.get(id(self), None) # added these 3 lines based on the comments in the article\r\n if obj: # to avoid possible infinite recursion (Antonín Hoskovec)\r\n return obj\r\n\r\n cls = self.__class__\r\n obj = cls.__new__(cls)\r\n memo[id(self)] = obj\r\n for attr, value in self.__dict__.items():\r\n # if shallow copy is not used we have the catastrophy described in the doc string\r\n if '__froms' in attr:\r\n setattr(obj, attr, copy(self.__froms))\r\n elif '__tos' in attr:\r\n setattr(obj, attr, copy(self.__tos))\r\n else: # all other attributes are deepcopied\r\n setattr(obj, attr, deepcopy(value, memo))\r\n\r\n # update references in existing and new object\r\n if self.__froms is not None:\r\n if self:\r\n obj.froms.append(weakref.ref(self))\r\n for wref in obj.froms:\r\n wref().tos.append(weakref.ref(obj))\r\n for wref in obj.tos:\r\n wref().froms.append(weakref.ref(obj))\r\n\r\n return obj\r\n\r\n def __del__(self):\r\n \"\"\"Clear all references of other objects to self and of self to other objects.\r\n \r\n This is useful in case an object goes out of scope and is garbage collected.\r\n \"\"\"\r\n self.clear()\r\n\r\n def _is_add(self, other):\r\n \"\"\"Check if other can be added to this object.\r\n\r\n Default implementation in case a derived class does not need this method.\r\n\r\n other: subclass of this class\r\n\r\n return: bool, True\r\n \"\"\"\r\n return True\r\n\r\n @property\r\n def froms(self):\r\n \"\"\"Return a list of references to objects that were added to this object.\r\n\r\n return: list, list elements are a subclass of this class\r\n \"\"\"\r\n return self.__froms\r\n\r\n @property\r\n def tos(self):\r\n \"\"\"Return a list of references to objects that this object was added to.\r\n\r\n return: list, list elements are a subclass of this class\r\n \"\"\"\r\n return self.__tos\r\n\r\n def __op_add(self, other, op_plus = False):\r\n \"\"\"Add one object to another.\r\n\r\n other : subclass of this class, the object to add from\r\n op_plus: bool, True if operator '+' is used instead of '+='\r\n\r\n return: subclass of this class,\r\n or\r\n NotImplemented, if param error and operator '+' is used\r\n \"\"\"\r\n if self.__param_error(other):\r\n if op_plus: # if param error and operator is '+'\r\n return NotImplemented\r\n\r\n return self # if param error and operator is '+='\r\n\r\n lhs = deepcopy(self) if op_plus else self\r\n\r\n if other:\r\n lhs._op_add(other)\r\n\r\n # the only way to check that ref counting is enabled is to compare to 'not None'\r\n if lhs.froms is not None:\r\n # add references to objects that have been added from and to\r\n lhs.froms.append(weakref.ref(other))\r\n other.tos.append(weakref.ref(lhs))\r\n lhs.froms.extend(other.froms)\r\n for wref in other.froms:\r\n wref().tos.append(weakref.ref(lhs))\r\n\r\n return lhs\r\n\r\n def __param_error(self, other, stdout = True):\r\n \"\"\"Validate parameters.\r\n\r\n other : subclass of this class, the object to add from\r\n stdout: bool, True if errors are to be printed\r\n\r\n return: bool: True if param error\r\n \"\"\"\r\n if other is self:\r\n if stdout:\r\n print(\"error: can't add object to itself\")\r\n return stdout\r\n\r\n if not isinstance(other, type(self)):\r\n if stdout:\r\n print(f\"error: 'other' = '{other}' must be of type \"\r\n f\"'{self.__class__.__module__}.{self.__class__.__name__}'\")\r\n return stdout\r\n\r\n if not self._is_add(other):\r\n if stdout:\r\n print(f\"error: all comparisons in method \"\r\n f\"'{self.__class__.__module__}.{self.__class__.__name__}._is_add()' \"\r\n \"must be true in order to add these two objects\")\r\n return stdout\r\n\r\n # the only way to check that ref counting is enabled is to compare to 'not None'\r\n if self.__froms is not None:\r\n if weakref.ref(other) in self.__froms:\r\n if stdout:\r\n print(\"error: 'other' has already been added to 'self'\")\r\n return stdout\r\n\r\n for wref in other.froms:\r\n if wref is weakref.ref(self):\r\n if stdout:\r\n print(\"error: 'self' has already been added to 'other'\")\r\n return stdout\r\n\r\n if wref in self.froms:\r\n if stdout:\r\n print(\"error: part of 'other' has already been added to 'self'\")\r\n return stdout\r\n\r\n return not stdout\r\n\r\ndef get_filenames(filenames, old_filenames = None ):\r\n \"\"\"Create a valid set of filenames based on an older set.\r\n\r\n filenames : sequence\r\n old_filenames: set of str or None\r\n\r\n return: set(str): valid filenames\r\n \"\"\"\r\n # keep unique filenames only and get their absolute path\r\n filenames = set(os.path.abspath(filename) for filename in filenames)\r\n\r\n for filename in filenames.copy(): # remove filenames that don't exist\r\n if not os.path.exists(filename):\r\n filenames.remove(filename)\r\n print(f\"error: {filename!r} does not exist\\n\")\r\n\r\n filenames -= old_filenames if old_filenames else set() # remove old filenames\r\n\r\n return filenames\r\n\r\ndef in_bisect(sorted_seq, val, pos = False, begin = -1, end = -1):\r\n \"\"\"Search the sorted sequence to find a value.\r\n\r\n The sequence must be sorted ascendingly. Optionally, a begin and end index may be specified if\r\n searching in a subsequence is desired. The default values of begin and end correspond to the\r\n entire sequence.\r\n\r\n sorted_seq: str, list, range or tuple\r\n val : The value to search for. The type of val must be a type that is comparable with the\r\n type of the elements of the sequence.\r\n pos : bool, if True return position even if 'val' is not found\r\n begin : int, <= end and > -2, the begin index, defaults to -1 which is the beginning of the\r\n sequence\r\n end : int, >= begin and > -2, the end index, defaults to -1 which is the end of the\r\n sequence\r\n\r\n return: int or\r\n None if 'pos == False' and no index is found\r\n \"\"\"\r\n if _param_error_bisect(sorted_seq, pos, begin, end):\r\n if pos:\r\n return -1\r\n return None\r\n\r\n if begin == end == -1: # search the entire sequence\r\n begin = 0\r\n end = len(sorted_seq) - 1\r\n\r\n while begin <= end:\r\n middle = (begin + end) // 2\r\n if sorted_seq[middle] == val:\r\n return middle\r\n if sorted_seq[middle] < val:\r\n begin = middle + 1\r\n else:\r\n end = middle - 1\r\n\r\n if pos:\r\n return begin\r\n return None\r\n\r\n_BASE = 10\r\n\r\ndef is_num_palindrome(num, begin = 0, end = 0):\r\n \"\"\"Check if a number is a palindrome.\r\n\r\n If both begin and end are zero all digits of the number are checked.\r\n\r\n num : int\r\n begin: int, the digit to begin from\r\n end : int, the last digit to use\r\n\r\n return: bool, True if num is a palidrome\r\n \"\"\"\r\n # extract the number and the number of digits based on begin and end positions\r\n num, digits = extract(num, begin, end)\r\n\r\n # to find if a number is a palindrome check every pair of digits in the number as follows:\r\n # 1234321 -> 1234321 -> 1234321 -> 1234321 -> it is a palindrome\r\n # ^ ^ ^ ^ ^ ^ ^\r\n # so the max number of pairs is (digits // 2), e.g. the max number of pairs for 1234321 is\r\n # (7 // 2) = 3\r\n for pos in range( digits // 2):\r\n # calculate the high order digit\r\n high = (num // (_BASE ** (digits - (pos + 1)))) % _BASE\r\n\r\n # calculate the low order digit\r\n low = (num % (_BASE ** (pos + 1))) // (_BASE ** pos)\r\n\r\n if high != low:\r\n return False\r\n\r\n return True\r\n\r\ndef reverse_num(num, begin = 0, end = 0):\r\n \"\"\"Return the reverse of a number.\r\n\r\n num : int, the number to reverse\r\n begin: int, the digit to begin reversing from\r\n end : int, the last digit to use for reversing\r\n\r\n return: int, the number reversed\r\n \"\"\"\r\n # extract the number and the number of digits based on begin and end positions\r\n num, digits = extract(num, begin, end)\r\n\r\n # to reverse a number reverse every pair of digits in the number like this:\r\n # 1234567 -> 7234561 -> 7634521 -> 7654321\r\n # ^ ^ ^ ^ ^ ^\r\n # (1,7) (2,6) (3,5)\r\n # so the max number of pairs is (digits // 2), e.g. the max number of pairs for 1234567 is\r\n # (7 // 2) = 3\r\n rev = 0\r\n pairs = digits // 2\r\n for pos in range(pairs):\r\n power = _BASE ** (digits - (pos + 1))\r\n\r\n # calculate the high order digit\r\n high = (num // power) % _BASE\r\n\r\n # calculate the low order digit\r\n low = (num % (_BASE ** (pos + 1))) // (_BASE ** pos)\r\n\r\n # Calculate the reversed number based on high and low digit. Note that the low digit has to\r\n # be multiplied by power to become the new high digit\r\n rev += (low * power) + (high * (_BASE ** pos))\r\n\r\n # For numbers with odd number of digits the middle digit is not part of a pair so it is not\r\n # extracted by the loop above. The following statements extract the middle number and add it\r\n # to the reversed number.\r\n if digits % 2:\r\n middle_num = num % (_BASE ** ((digits + 1) // 2))\r\n rev += (middle_num - (middle_num % (_BASE ** (pairs))))\r\n\r\n return rev\r\n\r\ndef extract(num, begin, end):\r\n \"\"\"Extract the number from begin and end positions within the number.\r\n\r\n num : int\r\n begin: int, the digit in num to begin the extraction from\r\n end : int, the last digit in num to use for the extraction\r\n\r\n return: tuple(int, int),\r\n int: number\r\n int: number of digits\r\n \"\"\"\r\n _param_error_num(num, begin, end)\r\n\r\n if num < 0:\r\n num = abs(num)\r\n\r\n # count the number of digits\r\n digits = 0\r\n tmp = num\r\n while tmp > _BASE:\r\n tmp //= _BASE\r\n digits += 1\r\n digits += 1\r\n\r\n if begin:\r\n if begin > digits: # this is a special case where num = 0 and digits = 1\r\n end = begin\r\n elif not end: # if end is unspecified set it to maximum\r\n end = digits\r\n digits = end - begin + 1\r\n num = num // (_BASE ** (begin - 1))\r\n num = num % (_BASE ** digits)\r\n\r\n return num, digits\r\n\r\ndef cmpfiles(file1, file2):\r\n \"\"\"Compare two files based on content. Use Windows 'fc' cmd.\r\n\r\n file1: str, a filename\r\n file2: str, a filename\r\n\r\n return: bool, False if files are the same\r\n \"\"\"\r\n cmd = f\"fc /U {file1} {file2}\" # command on windows to compare two text files\r\n\r\n pipe = os.popen(cmd) # open pipe and initialize with cmd\r\n stat = pipe.close() # get status of cmd, i.e. success or failure\r\n\r\n return bool(stat)\r\n\r\ndef md5(filename):\r\n \"\"\"Create md5 for a file.\r\n\r\n filename: str\r\n\r\n return: str, md5 for the file\r\n \"\"\"\r\n md5_val = ''\r\n\r\n # command on Windows to produce md5 value for a file\r\n cmd = \"certutil -hashfile \" + filename + \" MD5\"\r\n pipe = os.popen(cmd) # open pipe and initialize with cmd\r\n res = pipe.read() # read results of cmd\r\n stat = pipe.close() # get status of cmd, i.e. success or failure\r\n if not stat: # if not failure\r\n begin = res.find('\\n') # find beginning of md5 value\r\n if begin != -1: # -1 means find failed\r\n end = res.find('\\n', begin+1) # find end of md5 value\r\n if end != -1: # -1 means find failed\r\n md5_val = res[begin+1:end] # extract the md5 value\r\n\r\n return md5_val\r\n\r\ndef _param_error_bisect(seq, pos, begin, end):\r\n \"\"\"Validate parameters.\r\n\r\n seq : a sequence\r\n pos : bool\r\n begin: int, > -2 and <= end\r\n end : int, > -2 and >= begin\r\n\r\n return: True if params error is found\r\n \"\"\"\r\n if not isinstance(seq, collections.abc.Sequence):\r\n print(\"error: 'seq' has to be a sequence\")\r\n return True\r\n if not seq:\r\n return True\r\n if not isinstance(pos, bool):\r\n print(\"error: 'pos' must be of 'bool' type\")\r\n return True\r\n if not isinstance(begin, int) or not isinstance(end, int):\r\n print(\"error: 'begin' and 'end' must be of 'int' type\")\r\n return True\r\n if begin > end:\r\n print(\"error: (begin > end) is not allowed\")\r\n return True\r\n if begin < -1 or end < -1:\r\n print(\"error: (begin < -1 or end < -1) is not allowed\")\r\n return True\r\n\r\n return False\r\n\r\ndef _param_error_num(num, begin, end):\r\n \"\"\"Validate parameters.\r\n\r\n num : int\r\n begin: int, the digit in num to begin from\r\n end : int, the last digit in num to use\r\n\r\n exceptions: TypeError , if any parameter is not of type int\r\n ValueError, if begin and/or end have wrong integer values (see below)\r\n\r\n return: bool, False if no error\r\n \"\"\"\r\n if not isinstance(num, int) or not isinstance(begin, int) or not isinstance(end, int):\r\n raise TypeError(\"error: all parameters have to be of type 'int'\")\r\n if begin < 0 or end < 0 or (begin > end and end):\r\n raise ValueError(\"error: (begin < 0 or end < 0 or begin > end) is not allowed\")\r\n if not begin and end:\r\n raise ValueError(\"error: when specifying a range, 'begin' cannot be zero -> \"\r\n f\"[{begin}, {end}]\")\r\n","repo_name":"mperrakis/myrepo","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":18219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3674530408","text":"import unittest\nimport os\nfrom sfa.util.faults import *\nfrom sfa.trust.hierarchy import *\nfrom sfa.util.config import *\n\nBASEDIR = \"test_hierarchy\"\nPURGE_BASEDIR = \"rm -rf test_hierarchy\"\n\nclass TestHierarchy(unittest.TestCase):\n def setUp(self):\n os.system(PURGE_BASEDIR)\n pass\n\n def testInit(self):\n h = Hierarchy(BASEDIR)\n\n def testGetAuthInfo(self):\n h = Hierarchy(BASEDIR)\n\n name = \"planetlab.us.arizona.stork\"\n\n self.assertEqual(h.auth_exists(name), False)\n\n self.assertRaises(MissingAuthority, h.get_auth_info, name)\n\n h.create_auth(name, create_parents=True)\n auth_info = h.get_auth_info(name)\n self.assert_(auth_info)\n\n gid = auth_info.get_gid_object()\n self.assert_(gid)\n self.assertEqual(gid.get_subject(), name)\n\n pubkey = auth_info.get_pkey_object()\n self.assert_(gid)\n\n # try to get it again, make sure it's still there\n auth_info2 = h.get_auth_info(name)\n self.assert_(auth_info2)\n\n gid = auth_info2.get_gid_object()\n self.assert_(gid)\n self.assertEqual(gid.get_subject(), name)\n\n pubkey = auth_info2.get_pkey_object()\n self.assert_(gid)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"planetlab/sfa","sub_path":"tests/testHierarchy.py","file_name":"testHierarchy.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"45062396963","text":"check = True\nwhile check:\n data = list(map(int, input().split()))\n if data.count(0) == 3:\n check = False\n break\n data.sort(reverse=True)\n bigV, s1, s2 = data[0], data[1], data[2]\n if bigV ** 2 == s1**2+s2**2 and bigV > 0 and s1 > 0 and s2 > 0:\n print(\"right\")\n else:\n print(\"wrong\")","repo_name":"healtheloper/dochon_logic","sub_path":"backjoon/math/4153_직각삼각형.py","file_name":"4153_직각삼각형.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34385977951","text":"\"\"\"End to end testing for SEG-Y to MDIO conversion and back.\"\"\"\n\n\nfrom os.path import getsize\n\nimport numpy as np\nimport numpy.testing as npt\nimport pytest\nimport segyio\n\nfrom mdio import MDIOReader\nfrom mdio import mdio_to_segy\nfrom mdio.converters import segy_to_mdio\nfrom mdio.core import Dimension\n\n\n@pytest.mark.parametrize(\"header_locations\", [(17, 13)])\n@pytest.mark.parametrize(\"header_names\", [(\"inline\", \"crossline\")])\n@pytest.mark.parametrize(\"endian\", [\"big\"])\nclass TestImport:\n \"\"\"Import tests.\"\"\"\n\n def test_3d_import(\n self, segy_input, zarr_tmp, header_locations, header_names, endian\n ):\n \"\"\"Test importing a SEG-Y file to MDIO.\"\"\"\n segy_to_mdio(\n segy_path=segy_input.__str__(),\n mdio_path_or_buffer=zarr_tmp.__str__(),\n index_bytes=header_locations,\n index_names=header_names,\n overwrite=True,\n endian=endian,\n )\n\n\nclass TestReader:\n \"\"\"Test reader functionality.\"\"\"\n\n def test_meta_read(self, zarr_tmp):\n \"\"\"Metadata reading tests.\"\"\"\n mdio = MDIOReader(zarr_tmp.__str__())\n assert mdio.binary_header[\"Samples\"] == 1501\n assert mdio.binary_header[\"Interval\"] == 2000\n\n def test_grid(self, zarr_tmp):\n \"\"\"Grid reading tests.\"\"\"\n mdio = MDIOReader(zarr_tmp.__str__())\n grid = mdio.grid\n\n assert grid.select_dim(\"inline\") == Dimension(range(1, 346), \"inline\")\n assert grid.select_dim(\"crossline\") == Dimension(range(1, 189), \"crossline\")\n assert grid.select_dim(\"sample\") == Dimension(range(0, 3002, 2), \"sample\")\n\n def test_get_data(self, zarr_tmp):\n \"\"\"Data retrieval tests.\"\"\"\n mdio = MDIOReader(zarr_tmp.__str__())\n\n assert mdio.shape == (345, 188, 1501)\n assert mdio[0, :, :].shape == (188, 1501)\n assert mdio[:, 0, :].shape == (345, 1501)\n assert mdio[:, :, 0].shape == (345, 188)\n\n def test_inline(self, zarr_tmp):\n \"\"\"Read and compare every 75 inlines' mean and std. dev.\"\"\"\n mdio = MDIOReader(zarr_tmp.__str__())\n\n inlines = mdio[::75, :, :]\n mean, std = inlines.mean(), inlines.std()\n\n npt.assert_allclose([mean, std], [1.0555277e-04, 6.0027051e-01])\n\n def test_crossline(self, zarr_tmp):\n \"\"\"Read and compare every 75 crosslines' mean and std. dev.\"\"\"\n mdio = MDIOReader(zarr_tmp.__str__())\n\n xlines = mdio[:, ::75, :]\n mean, std = xlines.mean(), xlines.std()\n\n npt.assert_allclose([mean, std], [-5.0329847e-05, 5.9406823e-01])\n\n def test_zslice(self, zarr_tmp):\n \"\"\"Read and compare every 225 z-slices' mean and std. dev.\"\"\"\n mdio = MDIOReader(zarr_tmp.__str__())\n\n slices = mdio[:, :, ::225]\n mean, std = slices.mean(), slices.std()\n\n npt.assert_allclose([mean, std], [0.005236923, 0.61279935])\n\n\nclass TestExport:\n \"\"\"Test SEG-Y exporting functionaliy.\"\"\"\n\n def test_3d_export(self, zarr_tmp, segy_export_ibm_tmp, segy_export_ieee_tmp):\n \"\"\"Test 3D export to IBM and IEEE.\"\"\"\n mdio_to_segy(\n mdio_path_or_buffer=zarr_tmp.__str__(),\n output_segy_path=segy_export_ibm_tmp.__str__(),\n out_sample_format=\"ibm32\",\n )\n\n mdio_to_segy(\n mdio_path_or_buffer=zarr_tmp.__str__(),\n output_segy_path=segy_export_ieee_tmp.__str__(),\n out_sample_format=\"ieee32\",\n )\n\n def test_ibm_size_equal(self, segy_input, segy_export_ibm_tmp):\n \"\"\"Check if file sizes match on IBM file.\"\"\"\n assert getsize(segy_input) == getsize(segy_export_ibm_tmp)\n\n def test_ieee_size_equal(self, segy_input, segy_export_ieee_tmp):\n \"\"\"Check if file sizes match on IEEE file.\"\"\"\n assert getsize(segy_input) == getsize(segy_export_ieee_tmp)\n\n def test_ibm_rand_equal(self, segy_input, segy_export_ibm_tmp):\n \"\"\"IBM. Is random original traces and headers match round-trip file?\"\"\"\n with segyio.open(segy_input, ignore_geometry=True) as in_segy:\n in_tracecount = in_segy.tracecount\n in_text = in_segy.text[0]\n in_binary = in_segy.bin\n random_indices = np.random.randint(0, in_tracecount, 100)\n in_trc_hdrs = [in_segy.header[idx] for idx in random_indices]\n in_traces = [in_segy.trace[idx] for idx in random_indices]\n\n with segyio.open(segy_export_ibm_tmp, ignore_geometry=True) as out_segy:\n out_tracecount = out_segy.tracecount\n out_text = out_segy.text[0]\n out_binary = out_segy.bin\n out_trc_hdrs = [out_segy.header[idx] for idx in random_indices]\n out_traces = [out_segy.trace[idx] for idx in random_indices]\n\n assert in_tracecount == out_tracecount\n assert in_text == out_text\n assert in_binary == out_binary\n assert in_trc_hdrs == out_trc_hdrs\n npt.assert_array_equal(in_traces, out_traces)\n\n def test_ieee_rand_equal(self, segy_input, segy_export_ieee_tmp):\n \"\"\"IEEE. Is random original traces and headers match round-trip file?\"\"\"\n with segyio.open(segy_input, ignore_geometry=True) as in_segy:\n in_tracecount = in_segy.tracecount\n in_text = in_segy.text[0]\n in_binary = dict(in_segy.bin) # Cast to dict bc read-only\n in_binary.pop(3225) # Remove format bc comparing IBM / IEEE\n random_indices = np.random.randint(0, in_tracecount, 100)\n in_trc_hdrs = [in_segy.header[idx] for idx in random_indices]\n in_traces = [in_segy.trace[idx] for idx in random_indices]\n\n with segyio.open(segy_export_ieee_tmp, ignore_geometry=True) as out_segy:\n out_tracecount = out_segy.tracecount\n out_text = out_segy.text[0]\n out_binary = dict(out_segy.bin) # Cast to dict bc read-only\n out_binary.pop(3225) # Remove format bc comparing IBM / IEEE\n out_trc_hdrs = [out_segy.header[idx] for idx in random_indices]\n out_traces = [out_segy.trace[idx] for idx in random_indices]\n\n assert in_tracecount == out_tracecount\n assert in_text == out_text\n assert in_binary == out_binary\n assert in_trc_hdrs == out_trc_hdrs\n npt.assert_array_equal(in_traces, out_traces)\n","repo_name":"sanath-2024/mdio-python","sub_path":"tests/integration/test_segy_import_export.py","file_name":"test_segy_import_export.py","file_ext":"py","file_size_in_byte":6300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"9613886252","text":"from django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('server', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='certificateauthority',\n name='display_name',\n field=models.CharField(max_length=30, null=True, blank=True),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='certificateauthority',\n name='name',\n field=models.CharField(unique=True, max_length=50),\n ),\n migrations.AlterField(\n model_name='server',\n name='ca',\n field=models.ForeignKey(related_name='servers', blank=True, to='server.CertificateAuthority', help_text=b'The Certificate Authority of the certificate used in SSL/TLS connections.', null=True, verbose_name=b'CA', on_delete=models.CASCADE),\n ),\n ]\n","repo_name":"mathiasertl/django-xmpp-server-list","sub_path":"server/migrations/0002_auto_20141022_0650.py","file_name":"0002_auto_20141022_0650.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"48"} +{"seq_id":"10415519765","text":"import sqlite3\nimport csv\n\n\"\"\"\nScript to dumb database tables to csv files for database migrations.\n\"\"\"\n\ndef table_to_csv(table_name, column_names):\n raw_sql = 'SELECT {column_string} FROM {table_name}'\n column_string = ', '.join(column_names)\n sql = raw_sql.format(table_name=table_name, \n column_string=column_string)\n filename = 'data/{}.csv'.format(table_name)\n with open(filename, 'w', newline='') as csvfile:\n writer = csv.writer(csvfile)\n cur = conn.cursor()\n cur.execute(sql)\n rows = cur.fetchall()\n writer.writerows(rows)\n\ntables = {'texts': ('id', 'user_id', 'language_id', 'collection_id', 'title', 'text'),\n 'known_words': ('user_id', 'language_id', 'word'),\n 'learning_words': ('user_id', 'language_id', 'word'),\n 'text_word_counts': ('user_id', 'language_id', 'text_id', 'word', 'word_count'),\n 'total_word_counts': ('user_id', 'language_id', 'word', 'word_count'),\n 'collections': ('id', 'user_id', 'language_id', 'name'),\n 'languages': ('id', 'language'),\n 'users': ('id', 'username', 'password')}\n\nconn = sqlite3.connect('reader_app.db')\n\nfor table, column_names in tables.items():\n table_to_csv(table, column_names)\n\nconn.close()","repo_name":"jalmaguer/ReaderApp","sub_path":"dump_tables.py","file_name":"dump_tables.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16015004871","text":"from typing import List\n\n\ndef dfs(temp: List[int], nums: List[int], ans: List[List[int]]):\n if len(temp) == len(nums):\n ans.append(temp[:])\n return\n\n for num in nums:\n if num in temp:\n continue\n temp.append(num)\n dfs(temp, nums, ans)\n temp.pop()\n\n return ans\n\n\ndef permute(nums: List[int]) -> List[List[int]]:\n temp = []\n ans = []\n ans = dfs(temp, nums, ans)\n return ans\n\n\nif __name__ == '__main__':\n print(permute([1, 2, 3]))\n","repo_name":"Dirtytrii/leetcodePython","sub_path":"蓝桥杯备赛/学习/回溯/全排列.py","file_name":"全排列.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38019796668","text":"from django.db import models\nfrom authapp.models import ShopUser\nfrom mainapp.models import Product\nfrom django.utils.functional import cached_property\n\n\nclass Cart(models.Model):\n user = models.ForeignKey(ShopUser, on_delete=models.CASCADE, related_name=\"cart\")\n product = models.ForeignKey(Product, on_delete=models.CASCADE)\n quantity = models.PositiveIntegerField(verbose_name=\"количество\", default=0)\n created_time = models.DateTimeField(verbose_name=\"время\", auto_now_add=True)\n\n\n @property\n def get_items_cached(self):\n return self.user.cart.select_related()\n\n\n @property\n def product_cost(self):\n \"return cost of all products this type\"\n return self.product.price * self.quantity\n\n @property\n def total_quantity(self):\n \"return total quantity for user\"\n _items = self.get_items_cached\n _total_quantity = sum(list(map(lambda x: x.quantity, _items)))\n return _total_quantity\n\n @property\n def total_cost(self):\n \"return total cost for user\"\n _items = self.get_items_cached\n _total_cost = sum(list(map(lambda x: x.product_cost, _items)))\n return _total_cost\n\n\n @classmethod\n def get_items(self, user):\n return Cart.objects.filter(user=user)\n\n def delete(self):\n self.product.quantity += self.quantity\n self.product.save()\n super(self.__class__, self).delete()\n\n def save(self, *args, **kwargs):\n if self.pk:\n old_cart_item = Cart.objects.get(pk=self.pk)\n self.product.quantity -= self.quantity - old_cart_item.quantity\n else:\n self.product.quantity -= self.quantity\n self.product.save()\n super(self.__class__, self).save(*args, **kwargs)\n","repo_name":"rozov-alexander/Django","sub_path":"geekshop/cartproductsapp/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18218400073","text":"#сервер для завдання 1/2\nfrom flask import Flask, request, jsonify\n\napp = Flask(__name__)\n\nmessages = []\n\n@app.route('/send_message', methods=['POST'])\ndef send_message():\n message = request.get_json().get('message')\n if message.lower() == 'Слава нації':\n response = 'Смерть ворогам'\n elif 'нації' in message.lower():\n response = 'Смерть ворогам'\n else:\n response = 'Героям Слава!!'\n messages.append({'user': message, 'bot': response})\n return jsonify({'status': 'OK', 'response': response})\n\n@app.route('/get_messages', methods=['GET'])\ndef get_messages():\n return jsonify({'messages': messages})\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"iwolfer37/homework","sub_path":"Lesson11/server1.py","file_name":"server1.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12246414459","text":"# Kelly Peterson\n# LING 575\n# simple script to find out all files in two directories which share a common line\n# this is used to find email data with the same Message-ID in raw data that was annotated by two different universities\n\nimport sys\nimport os\n# this is for (hopefully) portable file copying\nimport shutil\n\ndef FindLinesStartingWith(path, targetString):\n\tprint('Starting to scan through path : ' + path)\n\tmatches = {}\n\tfor root, dirs, files in os.walk(path):\n\t\tfor filename in files:\n\t\t\tprint(filename)\n\t\t\tfilePath = os.path.join(root, filename)\n\t\t\tfile = open(filePath)\n\t\t\tfor line in file:\n\t\t\t\tif line.startswith(targetString):\n\t\t\t\t\tprint('Found starting line : [' + filePath + ']')\n\t\t\t\t\tmatches[line] = filePath\n\treturn matches\n\t\ntargetString = 'Message-ID:'\npathDictA = FindLinesStartingWith(sys.argv[1], targetString)\npathDictB = FindLinesStartingWith(sys.argv[2], targetString)\n\n# do we want to copy the files which match?\ncopyFiles = 0\nif len(sys.argv) > 3 and sys.argv[3] == '--copy':\n\tcopyFiles = 1\n\n\nprint('*************')\nprint('Done reading files. Finding files with matching lines...')\n# now that we have these dictionaries, let's find which files are in common here...\nfor key, value in pathDictA.items():\n\tif key in pathDictB:\n\t\tprint('match : [' + key + '], match : ' + value)\n\t\tif copyFiles == 1:\n\t\t\t# let's get a path that we can copy this to\n\t\t\ttargetAbsPath = os.path.abspath(os.path.join('.', os.path.basename(value)))\n\t\t\tprint('Copying file from ' + value + ' to ' + targetAbsPath)\n\t\t\tshutil.copy (value, targetAbsPath)","repo_name":"burgersmoke/enron-formality","sub_path":"scripts/common_messages/common_messages.py","file_name":"common_messages.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"36881816375","text":"from sre_constants import SUCCESS\nfrom scripts.helpful_scripts import get_account\nfrom brownie import (\n accounts,\n config,\n network,\n OnChain3dMetadataRenderer,\n OnChain3D,\n)\nimport json\nfrom time import sleep\n\n\ndef deploy_and_create(mint_req=True):\n test_counter = 0\n pass_counter = 0\n account = get_account()\n demo = OnChain3dMetadataRenderer.deploy({\"from\": account})\n\n token = OnChain3D.deploy({\"from\": account})\n print(\"time remaining = \" + str(token.remainingTime()))\n #############\n # with open(\"./solid-json.json\", \"r\") as ss:\n with open(\"./scripts/PlatonicSolids.json\", \"r\") as ss:\n # PlatonicSolids.json\n # q = f.read()\n y = json.load(ss)\n\n for k in range(5):\n i = k\n\n print(y[str(i)][\"name\"])\n demo.solidStruct_IMU(\n y[str(i)][\"tokenId\"],\n y[str(i)][\"name\"],\n y[str(i)][\"vertices\"],\n y[str(i)][\"face_list\"],\n y[str(i)][\"face_polygon\"],\n {\"from\": account},\n )\n sleep(0.5)\n\n # print(t)\n\n # token = OnChain3dTokenPlaceHolder.deploy({\"from\": account})\n token.setMetadataRenderer(demo.address, {\"from\": account})\n sleep(0.5)\n demo.setTargetAddress(token.address, {\"from\": account})\n test_counter += 1\n token.mintToken(1, {\"from\": accounts[0], \"value\": 1e16})\n print(demo.getGeneralSetting(0))\n id = 0\n o = [4 * 2 ** 64, 4 * 2 ** 64, -1 * 2 ** 64]\n opacity = 23\n\n angularSpeed = 0\n\n # wc = 15158332\n wireColor = 10 * 2 ** 8 + 11 * 2 ** 4 + 12\n backColor = 9 * 2 ** 8 + 8 * 2 ** 4 + 7\n\n cl2 = \"F5B041F0E68C\" * 10\n _comp = (\n 2\n + 256 * opacity\n + 2 ** 16 * angularSpeed\n + 2 ** 32 * wireColor\n + 2 ** 48 * backColor\n )\n\n demo.setMinimalSetting(\n id, o, _comp, bytes.fromhex(cl2[0:24]), {\"from\": accounts[0]}\n )\n pass_counter += 1\n print(\"it passed previeew \")\n # print(pre)\n sleep(0.5)\n # pre = demo.previewTokenById(id, o, _comp, bytes.fromhex(cl2[0:24]))\n # pass_counter += 1\n # print(\"it passed previeew \")\n # print(pre)\n print(demo.getGeneralSetting(0))\n sleep(0.5)\n pre = demo.previewTokenById(id, o, _comp, bytes.fromhex(cl2[0:24]))\n sleep(0.5)\n print(cl2[0:24])\n print(_comp)\n print(o)\n\n print(\"time remaining = \")\n print(\"done\")\n\n\ndef main():\n deploy_and_create(True)\n","repo_name":"scinftist/OnChain3d","sub_path":"scripts/testP.py","file_name":"testP.py","file_ext":"py","file_size_in_byte":2412,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31868417711","text":"from flask import render_template, flash, redirect, url_for\nfrom flask.ext.login import login_required\nfrom flask.ext.babel import gettext as _\nfrom setup import bp_amazon, amazon\nfrom forms import AmazonForm, AmazonServerForm\n\n@bp_amazon.route('/amazon')\n@login_required\ndef provider_list():\n list = amazon.get_configurations()\n return render_template('amazon_provider_list.html', list=list)\n\n@bp_amazon.route('/amazon/add', methods=['GET', 'POST'])\n@login_required\ndef provider_add():\n form = AmazonForm()\n if form.validate_on_submit():\n amazon.add_provider(form)\n flash(_('Amazon informations added'))\n return redirect(url_for(\"amazon.provider_list\"))\n return render_template('amazon_provider_add.html', form=form)\n\n@bp_amazon.route('/amazon/edit/', methods=['GET', 'POST'])\n@login_required\ndef provider_edit(id):\n provider = amazon.get_provider(id)\n form = AmazonForm(obj=provider)\n if form.validate_on_submit():\n amazon.edit_provider(form, provider)\n flash(_('Amazon provider edit'))\n return redirect(url_for(\"amazon.provider_list\"))\n return render_template('amazon_provider_edit.html', form=form)\n\n@bp_amazon.route('/amazon/del/')\n@login_required\ndef provider_delete(id):\n amazon.delete_provider(id)\n flash(_('Amazon information deleted'))\n return redirect(url_for(\"amazon.provider_list\"))\n\n@bp_amazon.route('/amazon/server/add', methods=['GET', 'POST'])\n@login_required\ndef server_add():\n form = AmazonServerForm()\n if form.validate_on_submit():\n amazon.add_server(form)\n flash(_('Amazon server added'))\n return redirect(url_for('deploy.deploy_list'))\n return render_template('amazon_server_add.html', form=form)\n\n@bp_amazon.route('/amazon/server/delete/')\n@login_required\ndef server_delete(id):\n amazon.delete_server(id)\n flash(_('Amazon server deleted'))\n return redirect(url_for('deploy.deploy_list'))\n\n@bp_amazon.route('/amazon/server/deploy/')\n@login_required\ndef server_deploy(id):\n amazon.deploy_server(id)\n flash(_('Amazon server deploy launched'))\n return redirect(url_for('deploy.deploy_list'))\n","repo_name":"sboily/xivo-unified-modules","sub_path":"amazon/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25816867287","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.cluster import OPTICS\nimport matplotlib.pyplot as plt\n\n# Load the data\ndata = pd.read_csv(\"../resources/Mall_Customers.csv\")\n\n# Select the relevant columns\nX = data[[\"Annual Income (k$)\", \"Spending Score (1-100)\"]]\n\n# Fit the OPTICS model to the data\nmodel = OPTICS(min_samples=5, xi=.05, min_cluster_size=.05)\nmodel.fit(X)\n\n# Get the cluster labels\nlabels = model.labels_\n\n# Identify the anomalies\nanomalies = X[labels == -1]\n\n# Plot the results\nplt.scatter(X.iloc[:,0], X.iloc[:,1], c=labels, cmap='rainbow', alpha=0.7)\nplt.scatter(anomalies.iloc[:,0], anomalies.iloc[:,1], color='black', marker='x', label='Anomalies')\nplt.legend()\nplt.show()","repo_name":"ivanursul/review-of-anomaly-detection-algorithms","sub_path":"visualization/optics.py","file_name":"optics.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40680873789","text":"conv = input(\"Enter Celsius to Fahrenheit(1) or Fahrenheit to Celsius(2):\\nJust enter 1 or 2 :\")\n\nif conv == \"1\":\n val = int(input(\"Enter the Celsius :\"))\n fah = (val * (9/5)) +32\n print(\"{} degree celsius is equal to {} degree fahrenheit\".format(val,fah))\nelif conv == \"2\":\n val = int(input(\"Enter the fahrenheit :\"))\n cel = (val - 32) * (5/9)\n print(\"{} degree fahrenheit is equal to {} degree celsius\".format(val,cel))\nelse:\n print(\"Enter the valid number:\")\n","repo_name":"ganeshbalajiai/celsiusAndfahrenheit","sub_path":"celsiusfahrenheit.py","file_name":"celsiusfahrenheit.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7981941256","text":"import os\nimport json\nimport shutil\nimport torch\nimport numpy as np\nfrom torch_geometric.data import Dataset, Data\nfrom transformers import AutoTokenizer\nfrom tqdm import tqdm\nfrom utils import preprocess\n\nclass RumorDataset(Dataset):\n def __init__(self, root, split, classes, language='en', max_length=64, transform=None, pre_transform=None, aug = False):\n\n self.split = split\n self.filename = \"{}.json\".format(split)\n self.aug = aug\n self.classes = classes\n self.language = language\n self.root = root\n self.max_length = max_length\n\n self.max_nodes = 100\n\n self.textTokenizer = self._get_tokenizer()\n\n super(RumorDataset, self).__init__(root, transform, pre_transform)\n\n @property\n def raw_file_names(self):\n return self.filename\n\n @property\n def processed_file_names(self):\n with open(self.raw_paths[0], 'r', encoding='utf-8') as f:\n self.data = json.load(f)\n data_len = (len(self.data))\n return [f'data_{self.split}_{i}.pt' for i in range(data_len)]\n\n def download(self):\n download_path = self.raw_dir\n os.makedirs(download_path,exist_ok=True)\n file_path = os.path.join(self.root, self.filename)\n shutil.copy(file_path,download_path)\n\n def process(self):\n with open(self.raw_paths[0], 'r', encoding='utf-8') as f:\n self.data = json.load(f)\n\n for index, tweet in (enumerate(tqdm(self.data))):\n tweet[\"nodes\"] = tweet[\"nodes\"][:self.max_nodes]\n tweet[\"edges\"] = tweet[\"edges\"][:self.max_nodes-1]\n tweet_id = tweet['id']\n \n node_feats = self._get_node_features(tweet[\"nodes\"])\n\n if self.aug:\n edge_index = self._get_adjacency_info(tweet[\"edges\"])\n else:\n edge_index= self._get_adjacency_info1(tweet[\"edges\"])\n\n label = self._get_labels(tweet['label'])\n\n data = Data(x=node_feats,\n edge_index = edge_index, \n y=label,\n id=tweet_id,\n )\n\n torch.save(data,\n os.path.join(self.processed_dir,\n f'data_{self.split}_{index}.pt'))\n\n def _get_tokenizer(self):\n if self.language == 'en':\n return AutoTokenizer.from_pretrained(\"cardiffnlp/twitter-roberta-base\")\n elif self.language == 'cn':\n return AutoTokenizer.from_pretrained(\"hfl/chinese-bert-wwm-ext\")\n # if self.language == 'en':\n # return AutoTokenizer.from_pretrained(\"bert-base-uncased\")\n # elif self.language == 'cn':\n # return AutoTokenizer.from_pretrained(\"bert-base-chinese\")\n\n def _get_node_features(self, nodes):\n texts = [preprocess(node['text']) for node in nodes]\n encoded_input = self.textTokenizer.batch_encode_plus(\n texts, max_length=self.max_length, padding=\"max_length\", truncation=True, return_tensors='pt')\n\n all_node_feats = torch.stack([\n encoded_input[\"input_ids\"], encoded_input[\"attention_mask\"]], dim=-1)\n return all_node_feats\n\n def _get_edge_features(self, edge_len):\n return torch.ones(edge_len, 1)\n\n def _get_adjacency_info1(self, edges):\n edge_indices = []\n \n for edge in edges:\n i = int(edge['from'])\n j = int(edge['to'])\n edge_indices += [[j, i]]\n\n edge_indices = torch.tensor(edge_indices)\n edge_indices = edge_indices.t().to(torch.long).view(2, -1)\n\n return edge_indices\n\n\n def _get_adjacency_info(self, edges):\n edge_indices = []\n \n for edge in edges:\n i = int(edge['from'])\n j = int(edge['to'])\n edge_indices += [[j, i]]\n\n while j != 0:\n for edge2 in edges:\n edge_from = int(edge2['from'])\n edge_to = int(edge2['to'])\n if edge_from == j:\n j = edge_to\n edge_indices += [[j, i]]\n continue\n\n edge_indices = torch.tensor(edge_indices)\n edge_indices = edge_indices.t().to(torch.long).view(2, -1)\n\n return edge_indices\n\n def _get_labels(self, label):\n label = self.classes.index(label)\n label = np.asarray([label])\n return torch.tensor(label, dtype=torch.int32)\n\n def len(self):\n return len(self.data)\n\n def get(self, idx):\n\n data = torch.load(os.path.join(self.processed_dir,\n f'data_{self.split}_{idx}.pt'))\n \n return data","repo_name":"thcheung/CDGTN","sub_path":"data_loader/RumorDataset.py","file_name":"RumorDataset.py","file_ext":"py","file_size_in_byte":4719,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"18303188869","text":"\"\"\"\nJoshua\nCS 30 Period 1\nMay 14, 2023\nThis file contains the module for the game map\nand the method for movement\n\"\"\"\nfrom Joshua_Liu_Enemy import Enemy, Boss\nimport Joshua_Liu_Enemy\nfrom random import *\n\n\nclass GameMap:\n def __init__(self, character, enemymovement):\n self.character = character\n self.DIRECTION = [\"forward\", \"right\", \"left\", \"back\"]\n self.length = randint(4, 6)\n self.height = randint(4, 6)\n self.data = self.generate_map()\n self.layoutmap = self.data[0]\n self.initpos = self.data[1]\n self.enemymovement = enemymovement\n self.roommap = self.create_rooms()\n self.ITEMS = {\n \"Regular Sword\": {\n \"Desc\": \"Simple steel\",\n \"Dmg\": 3\n },\n \"Shield\": {\n \"Desc\": \"Simple wood\",\n },\n \"Gilgamesh\": {\n \"Desc\": \":gilgamesh:\",\n \"Dmg\": 40\n },\n \"Jerma\": {\n \"Desc\": \"Unleash destruction upon your foes\",\n \"Dmg\": 15\n },\n \"Omega Energy Sword\": {\n \"Desc\": \"Super damage!\",\n \"Dmg\": 10\n },\n \"Key\": {\n \"Desc\": \"But what does it sayyyyyyy?!?!\",\n },\n \"Fists\": {\n \"Desc\": \"Good ol fist cuffs\",\n \"Dmg\": 2\n },\n }\n\n def generate_map(self):\n \"\"\"Function for creating a random map\"\"\"\n x = 0 # x-coordinate for map\n y = 0 # y-coordinate for map\n room = [ # Creating the room layout variable\n [randint(1, 5)]\n ]\n # Populating the map with rooms\n while x < self.height:\n while y < self.length:\n # Creating new row for more rooms\n room[x].append(randint(1, 5))\n y += 1\n room.append([randint(1, 5)]) # adding a new row\n y = 0 # reset y\n x += 1 # increment x\n room.pop(-1) # delete superfluous room\n # create index cood\n player_start = randint(0, self.length - 1)\n room[0][player_start] = 0 # creating a starting room\n # creating an exit room\n room[x - 1][randint(0, self.length - 1)] = 6\n self.character.pos = [player_start, 0]\n data = [room, self.character.pos]\n return data\n\n def create_rooms(self):\n \"\"\"\n Method for creating map but with room objects\n rather than integers representing rooms\n \"\"\"\n ROOM_LEGEND = [\n [\"Index\", \"Your starting location!\"],\n [\"Treasure Room\", \"A room with booty!\"],\n [\"Trap Room\", \"ITS A TRAP!\"],\n [\"Monster Room\", \"Run in circles! Your life depends on it!\"],\n [\"Regular Room\", \"Boring\"], [\"Boss Room\", \"R.I.P\"],\n [\"Exit Room\", \"Tataaa!\"]]\n x = 0\n y = 0\n worlb = []\n worlbcpy = [[None]]\n while y < self.height:\n while x < self.length:\n worlb.append(Rooms(\n ROOM_LEGEND[self.layoutmap[y][x]],\n [x, y], self.character, self.enemymovement))\n x += 1\n if y == 0:\n worlbcpy[0] = worlb\n else:\n worlbcpy.append(None)\n worlbcpy[y] = worlb\n worlb = []\n y += 1\n x = 0\n type(worlb)\n return worlbcpy\n\n def move(self):\n \"\"\"Method for handling player movement\"\"\"\n x = 0 # Variable for loop\n # User input loop\n while x == 0:\n print(\"What do you want to do?\")\n # Copy of DIRECTION list with only valid input\n temp = self.DIRECTION[::]\n # Remove invalid dirctions\n if self.character.pos[0] == 0:\n temp.remove(\"left\")\n if self.character.pos[0] == self.length - 1:\n temp.remove(\"right\")\n if self.character.pos[1] == self.height - 1:\n temp.remove(\"forward\")\n if self.character.pos[1] == 0:\n temp.remove(\"back\")\n # Getting user input on direction\n print(\"Your options are the following:\")\n for direction in temp:\n print(direction)\n print(\"Enter 'quit' to exit this menu\")\n print(\"What will you choose?\")\n choice = input()\n # check if choice is valid\n if choice not in temp and not choice == \"quit\":\n print(\"invalid choice\")\n elif \"quit\" in choice:\n # does not exit game, brings user back to previous menu\n x = 1\n else:\n self.roommap[self.character.pos[1]]\\\n [self.character.pos[0]].leave()\n x = 1 # breaking loop this way\n # Seeing what action user chose\n if choice == \"forward\":\n self.character.pos[1] += 1\n elif choice == \"right\":\n self.character.pos[0] += 1\n elif choice == \"left\":\n self.character.pos[0] -= 1\n elif choice == \"back\":\n self.character.pos[1] -= 1\n # Trigger room enter events\n self.roommap[self.character.pos[1]]\\\n [self.character.pos[0]].enter()\n # Set player's current room as this room\n self.character.room = \\\n self.roommap[self.character.pos[1]]\\\n [self.character.pos[0]]\n\n def trap(self):\n \"\"\"\n Method for handling traps\n \"\"\"\n if randint(1, 7) == randint(1, 7): # Dice roll for trap\n print(\"You got hit by a trap!\")\n self.character.take_damage(randint(1, 2))\n\n def exitgame(self):\n \"\"\"\n Method for handling win condition\n \"\"\"\n if \"Key\" in self.character.inventory:\n print(\"Congratulations! You won!!!\")\n print(\"Now get outta here\")\n quit()\n else:\n print(\"You are missing something. Now go look for it.\")\n\n\nclass Rooms:\n def __init__(self, roomtype, pos, character, enemymovement):\n self.first = True\n self.roomtype = roomtype\n self.pos = pos\n self.items = []\n self.character = character\n self.inroom = False\n self.enemymovement = enemymovement\n self.enemie = None # Enemy in room\n self.ITEMS = {\n \"Regular Sword\": {\n \"Desc\": \"Simple steel\",\n \"Dmg\": 3\n },\n \"Shield\": {\n \"Desc\": \"Simple wood\",\n },\n \"Gilgamesh\": {\n \"Desc\": \":gilgamesh:\",\n \"Dmg\": 40\n },\n \"Jerma\": {\n \"Desc\": \"Unleash destruction upon your foes\",\n \"Dmg\": 15\n },\n \"Omega Energy Sword\": {\n \"Desc\": \"Super damage!\",\n \"Dmg\": 10\n },\n \"Key\": {\n \"Desc\": \"But what does it sayyyyyyy?!?!\",\n },\n \"Fists\": {\n \"Desc\": \"Good ol fist cuffs\",\n \"Dmg\": 2\n },\n }\n\n def trap(self):\n \"\"\"\n Method for handling traps\n \"\"\"\n if randint(1, 7) == randint(1, 7): # Dice roll for trap\n print(\"You got hit by a trap!\")\n self.character.take_damage(randint(1, 2))\n\n def exitgame(self):\n \"\"\"\n Method for handling win condition\n \"\"\"\n if \"Key\" in self.character.inventory:\n print(\"Congratulations! You won!!!\")\n print(\"Now get outta here\")\n quit()\n else:\n print(\"You are missing something. Now go look for it.\")\n\n def leave(self):\n \"\"\"\n Method for handling leaving room\n \"\"\"\n self.inroom = False # Setting room as exited\n self.enemymovement.engaged = None # Player no longer engaged\n self.enemymovement.engage = False # Player is no longer engaged\n # Player no longer satisfies win condition\n if \"Leave Dungeon\" in self.character.actions:\n self.character.actions.remove(\"Leave Dungeon\")\n\n def enter(self):\n \"\"\"Method handling for when entering room\"\"\"\n self.inroom = True # Setting player as in room\n # Is this the first time player has entered room?\n if self.first:\n self.first = False # No longer first time\n print(\"\\nYou have discovered a new room\")\n print(f\"You are now in a {self.roomtype[0]}\")\n # Events for Monster Room\n if self.roomtype[0] == \"Monster Room\":\n # Create random enemy object\n enemyname = Joshua_Liu_Enemy.ENEMIESLIST[randint(0, 4)]\n enemy = Enemy(enemyname,\n Joshua_Liu_Enemy.ENEMIES[enemyname],\n [self.pos[1], self.pos[0]],\n self.character)\n print(f\"You have encountered a {enemy.name}\")\n # Setting player as engaged against enemy\n self.enemymovement.engaged = enemy\n # Setting player as engaged\n self.enemymovement.engage = True\n # Setting enemy in room as this enemy\n self.enemie = enemy\n # Events for Boss Room\n elif self.roomtype[0] == \"Boss Room\":\n # Create random boss object\n enemyname = Joshua_Liu_Enemy.BOSSLIST[randint(0, 3)]\n enemy = Boss(enemyname,\n Joshua_Liu_Enemy.BOSS[enemyname],\n [self.pos[1], self.pos[0]],\n self.character)\n print(f\"You have encountered a {enemy.name}\")\n # Setting player as engaged against boss\n self.enemymovement.engaged = enemy\n # Setting player as engaged\n self.enemymovement.engage = True\n # Setting enemy in room as this boss\n self.enemie = enemy\n # Events for Trap Room\n elif self.roomtype[0] == \"Trap Room\":\n self.trap() # Triggering trap\n # Events for Regular and Index Room\n elif self.roomtype[0] == \"Regular Room\" or\\\n self.roomtype == \"Index Room\":\n return\n # Events for Treasure Room\n elif self.roomtype[0] == \"Treasure Room\":\n # Spawning treasure\n treasure = randint(0, 4) # index in list\n itemlist = [] # list of spawnable items\n for key in self.ITEMS:\n itemlist.append(key)\n self.items.append(itemlist[treasure])\n # Chance for a second treasure to spawn in\n if randint(0, 4) == 1:\n treasure = randint(0, 4) # index in list\n itemlist = [] # list of spawnable items\n for key in self.ITEMS:\n itemlist.append(key)\n self.items.append(itemlist[treasure])\n # Events for Exit\n elif self.roomtype[0] == \"Exit Room\":\n print(\"\\nYou feel a need to be here\")\n self.character.actions.append(\"Leave Dungeon\")\n return\n # This is NOT the first time player has been in room\n else:\n print(f\"You are now in a {self.roomtype[0]}\")\n # Events for Monster Room\n if self.roomtype[0] == \"Monster Room\":\n # Is there still an enemy here?\n if self.enemie is not None:\n # Is the enemy dead?\n if self.enemie.hp > 0: # No\n print(self.enemie.hp)\n print(f\"\\nYou have encountered a \"\n f\"{self.enemie.name}\")\n self.enemymovement.engaged = self.enemie\n self.enemymovement.engage = True\n else: # Yes\n del self.enemie # Deleting enemy object\n self.enemie = None\n else:\n pass\n # Events for Boss Room\n elif self.roomtype[0] == \"Boss Room\":\n print(\"Entering boss room\")\n if self.enemie is not None:\n # Has the boss in the room died?\n if self.enemie.hp > 0: # No\n print(self.enemie.hp)\n print(f\"\\nYou have encountered a \"\n f\"{self.enemie.name}\")\n self.enemymovement.engaged = self.enemie\n self.enemymovement.engage = True\n else: # Yes\n del self.enemie # Deleting boss object\n self.enemie = None\n # Events for Trap Room\n elif self.roomtype[0] == \"Trap Room\":\n self.trap()\n # Events for Regular and Index Room\n elif self.roomtype[0] == \"Regular Room\" or \\\n self.roomtype == \"Index Room\":\n return\n # Events for Treasure Room\n elif self.roomtype[0] == \"Treasure Room\":\n return\n # Events for Exit Room\n elif self.roomtype[0] == \"Exit\":\n self.character.actions.append(\"Leave Dungeon\")\n return\n","repo_name":"eeei3/Josh-Game","sub_path":"Joshua_Liu_Map.py","file_name":"Joshua_Liu_Map.py","file_ext":"py","file_size_in_byte":13599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30283663331","text":"from importlib import import_module\nfrom inspect import isclass\nimport os\nfrom os import walk\nfrom os.path import abspath, basename, dirname, join\nfrom sys import modules\n\nfrom flask_sqlalchemy import Model\n\n__all__ = ('get_models', 'load_models')\n\nPROJ_DIR = abspath(join(dirname(abspath(__file__)), '../../'))\nAPP_MODULE = basename(PROJ_DIR)\n\n\ndef get_modules(module):\n \"\"\"Returns all .py modules in given file_dir that are not __init__.\"\"\"\n\n for module_dirname in os.listdir(PROJ_DIR):\n file_dir = abspath(join(PROJ_DIR, module_dirname, module))\n\n for root, dirnames, files in walk(file_dir):\n module_name = root.split(PROJ_DIR)[1]\n mod_path = f'{APP_MODULE}{module_name}'.replace('/', '.')\n for filename in files:\n if filename.endswith('.py') and not filename.startswith('__init__'):\n yield '.'.join([mod_path, filename[0:-3]])\n\n\ndef dynamic_loader(module, compare):\n \"\"\"Iterates over all .py files in `module` directory, finding all classes that\n match `compare` function.\n Other classes/objects in the module directory will be ignored.\n\n Returns unique items found.\n \"\"\"\n items = []\n\n for mod in get_modules(module):\n module = import_module(mod)\n if hasattr(module, '__all__'):\n objs = [getattr(module, obj) for obj in module.__all__]\n items += [o for o in objs if compare(o) and o not in items]\n\n return items\n\n\ndef get_models():\n \"\"\"Dynamic model finder.\"\"\"\n return dynamic_loader('models', is_model)\n\n\ndef is_model(item):\n \"\"\"Determines if `item` is a `db.Model`.\"\"\"\n return isclass(item) and issubclass(item, Model) and not item.__ignore__()\n\n\ndef load_models():\n \"\"\"Load application models for management script & app availability.\"\"\"\n for model in get_models():\n setattr(modules[__name__], model.__name__, model)\n","repo_name":"bekhzod91/snoopy","sub_path":"snoopy/core/database/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"25536867805","text":"\"\"\"\nGiải phương trình và biện luận phương trình ax + b = 0.\n\"\"\"\ndef Show(a, b):\n return -b/a\n\n\nif __name__ == \"__main__\":\n while True:\n try:\n a, b = map(float, input(\"Enter numbers a, b (a != 0): \").split())\n if a == 0:\n raise ValueError\n break\n except:\n print(\"\\nTry Again!\")\n\n print(f\"The solution of equation {a}x + {b} = 0 is {Show(a, b)}\")","repo_name":"trongminh108/1000-exercises","sub_path":"Chapter_3/BT84.py","file_name":"BT84.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24636193942","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('Exercise', '0001_initial'),\n ('Lessons', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Unit',\n fields=[\n ('unit_id', models.AutoField(serialize=False, verbose_name=b'Unit ID', primary_key=True)),\n ('title', models.CharField(max_length=100, verbose_name=b'Unit Title')),\n ('lessons', models.ManyToManyField(to='Lessons.Lesson', blank=True)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RemoveField(\n model_name='lesson',\n name='chapter_title',\n ),\n migrations.RemoveField(\n model_name='lesson',\n name='course_title',\n ),\n migrations.RemoveField(\n model_name='lesson',\n name='lesson_path',\n ),\n migrations.AddField(\n model_name='lesson',\n name='assignments',\n field=models.ManyToManyField(to='Exercise.Exercise', blank=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='lesson',\n name='briefing',\n field=models.TextField(default=b'', verbose_name=b'Briefing'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='lesson',\n name='description',\n field=models.CharField(default=b'', max_length=100, verbose_name=b'Lesson Description'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='lesson',\n name='introduction',\n field=models.TextField(default=b'', verbose_name=b'Introduction'),\n preserve_default=True,\n ),\n ]\n","repo_name":"CodeAton1/codeaton","sub_path":"Lessons/migrations/0002_auto_20150323_1422.py","file_name":"0002_auto_20150323_1422.py","file_ext":"py","file_size_in_byte":1952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18085489943","text":"# prac9.py\r\n# 9. 사용자로부터 일수(날짜 수)를 입력 받아서 그 일수까지 몇 시간, 몇 분, 몇 초가 \r\n# 남았는지 출력하라\r\n\r\ndate = int(input())\r\nhour = date * 24\r\nminute = hour * 60\r\nsec = minute * 60\r\nprint(str(hour) + 'hours ' + str(minute) + ',minutes ' + str(sec) + ',seconds')\r\n\r\n\r\n\r\n","repo_name":"yejinida/study","sub_path":"python/prac9.py","file_name":"prac9.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5643150491","text":"import unittest\n\nfrom kuberoute.dns import Record\nfrom kuberoute.service import (get_host_ip, get_name_record_updates,\n get_pods_for_service, has_label,\n is_node_port_service, is_pod_ready,\n is_pod_running, service_selector)\nfrom kuberoute.util import safeget\n\n\ndef add_if_missing(d, path, value):\n for p in path[:-1]:\n try:\n d = d[p]\n except KeyError:\n d[p] = dict()\n d = d[p]\n try:\n return d[path[-1]]\n except KeyError:\n d[path[-1]] = value\n return value\n\n\nclass MockAPIObject(object):\n def __init__(self, specs):\n self.obj = specs\n\n\nclass MockPodObject(MockAPIObject):\n def __init__(self, specs):\n super().__init__(specs)\n if 'status' not in self.obj:\n self.obj['status'] = {}\n self.obj['status']['phase'] = safeget(specs, 'status', 'phase', default_value='Running')\n try:\n conds = self.obj['status']['conditions']\n except KeyError:\n self.obj['status']['conditions'] = []\n conds = self.obj['status']['conditions']\n if len([ d for d in conds if 'type' in d and d['type'] == 'Ready']) == 0:\n conds += [{\n 'type': 'Ready',\n 'status': 'True',\n }]\n\n\nTESTSERVICE=MockAPIObject({\n 'apiVersion': 'v1',\n 'metadata': {\n 'name': 'testservice',\n 'namespace': 'default',\n 'labels': {\n 'label1': 'value1',\n 'kuberoute_domain': 'domain',\n 'kuberoute_name': 'name',\n 'kuberoute_failover': 'failover.url',\n },\n },\n 'spec': {\n 'selector': {\n 'selectorlabel': 'selectorvalue'\n },\n 'type': 'NodePort',\n },\n})\n\nTESTSERVICE_REPLACE=MockAPIObject({\n 'apiVersion': 'v1',\n 'metadata': {\n 'name': 'testservice',\n 'namespace': 'default',\n 'labels': {\n 'label1': 'value1',\n 'kuberoute_domain': 'domain',\n 'kuberoute_name': '_TEMPLATE_START_name_replace_TEMPLATE_END_',\n 'kuberoute_failover': 'failover.url',\n },\n },\n 'spec': {\n 'selector': {\n 'selectorlabel': 'selectorvalue'\n },\n 'type': 'NodePort',\n },\n})\n\nTESTPOD_IN_SERVICE=MockPodObject({\n 'apiVersion': 'v1',\n 'metadata': {\n 'name': 'testpod',\n 'namespace': 'default',\n 'labels': {\n 'selectorlabel': 'selectorvalue',\n 'slabel2': 'svalue2',\n }\n },\n 'spec': {\n 'containers': [\n {\n 'image': 'testimage',\n 'name': 'testapp'\n }\n ]\n },\n 'status': {\n 'hostIP': '1.0.0.0',\n }\n})\n\nTESTPOD_TERMINATING=MockPodObject({\n 'apiVersion': 'v1',\n 'metadata': {\n 'name': 'testpod',\n 'namespace': 'default',\n 'labels': {\n 'selectorlabel': 'selectorvalue',\n 'slabel2': 'svalue2',\n }\n },\n 'spec': {\n 'containers': [\n {\n 'image': 'testimage',\n 'name': 'testapp'\n }\n ]\n },\n 'status': {\n 'hostIP': '1.0.0.0',\n 'phase': 'Terminating',\n }\n})\n\nTESTPOD_NOT_READY=MockPodObject({\n 'apiVersion': 'v1',\n 'metadata': {\n 'name': 'testpod',\n 'namespace': 'default',\n 'labels': {\n 'selectorlabel': 'selectorvalue',\n 'slabel2': 'svalue2',\n }\n },\n 'spec': {\n 'containers': [\n {\n 'image': 'testimage',\n 'name': 'testapp'\n }\n ]\n },\n 'status': {\n 'hostIP': '1.0.0.0',\n 'conditions': [\n {\n 'type': 'Ready',\n 'status': 'False',\n }\n ]\n }\n})\n\nTESTPOD_NOT_IN_SERVICE=MockPodObject({\n 'apiVersion': 'v1',\n 'metadata': {\n 'name': 'testpod2',\n 'namespace': 'default',\n 'labels': {\n 'slabel2': 'svalue2',\n }\n },\n 'spec': {\n 'containers': [\n {\n 'image': 'testimage',\n 'name': 'testapp'\n }\n ]\n },\n 'status': {\n 'hostIP': '1.0.0.1'\n }\n})\n\nTESTPOD_IN_OTHER_NAMESPACE=MockPodObject({\n 'apiVersion': 'v1',\n 'metadata': {\n 'name': 'testpod',\n 'namespace': 'not-default',\n 'labels': {\n 'selectorlabel': 'selectorvalue',\n 'slabel2': 'svalue2',\n }\n },\n 'spec': {\n 'containers': [\n {\n 'image': 'testimage',\n 'name': 'testapp'\n }\n ]\n },\n 'status': {\n 'hostIP': '1.0.0.0'\n }\n})\n\nTEST_NODE_1={\n 'status': {\n 'addresses': [\n {\n 'address': '1.0.0.0',\n 'type': 'InternalIP',\n },\n ],\n },\n}\n\nTEST_NODE_2={\n 'spec': {\n 'unschedulable': True,\n },\n 'status': {\n 'addresses': [\n {\n 'address': '1.0.0.1',\n 'type': 'InternalIP',\n },\n ],\n },\n}\n\nTEST_NODES = [ TEST_NODE_1, TEST_NODE_2 ]\n\n\nclass ServiceTests(unittest.TestCase):\n def setUp(self):\n self.service = TESTSERVICE\n self.pod = TESTPOD_IN_SERVICE\n self.pod2 = TESTPOD_NOT_IN_SERVICE\n self.pod3 = TESTPOD_IN_OTHER_NAMESPACE\n self.pods = [ self.pod, self.pod2, self.pod3 ]\n self.nodes = TEST_NODES\n\n def test_has_label(self):\n self.assertTrue(has_label('label1', self.service))\n\n def test_service_selector(self):\n selector = service_selector(self.service)\n self.assertEqual(1, len(selector.keys()))\n self.assertTrue('selectorlabel' in selector.keys())\n self.assertTrue('selectorvalue' in selector.values())\n\n def test_get_pods_for_service(self):\n filtered_pods = get_pods_for_service(self.service,\n [self.pod, self.pod2])\n self.assertTrue(\n self.pod in filtered_pods,\n msg='self.pod should be in service'\n )\n self.assertFalse(\n self.pod2 in filtered_pods,\n msg='self.pod2 should not be connected to the service'\n )\n\n def test_get_pods_for_service_empty_list(self):\n filtered_pods = get_pods_for_service(self.service, [])\n self.assertEqual(len(filtered_pods), 0)\n\n\n def test_get_pods_for_service_other_namespace(self):\n filtered_pods = get_pods_for_service(self.service, self.pods)\n self.assertEqual(len(filtered_pods), 1)\n\n\n def test_get_host_ip(self):\n self.assertEqual(get_host_ip(self.pod), '1.0.0.0')\n\n def test_is_node_port_service(self):\n self.assertTrue(is_node_port_service(self.service))\n self.assertFalse(is_node_port_service(self.pod))\n\n def test_get_name_record_updates_empty(self):\n self.assertEqual(\n {},\n get_name_record_updates(\n [],\n [],\n [],\n 'kuberoute_domain',\n 'kuberoute_name',\n 'kuberoute_failover',\n 'kuberoute_quota'\n )\n )\n\n def test_get_name_record_updates(self):\n self.assertEqual(\n {\n 'domain': [Record(\n name='name',\n domain='domain',\n addresses=['1.0.0.0'],\n failover='failover.url'\n )],\n },\n get_name_record_updates(\n [self.service],\n [self.pod, self.pod2],\n self.nodes,\n 'kuberoute_domain',\n 'kuberoute_name',\n 'kuberoute_failover',\n 'kuberoute_quota',\n )\n )\n\n def test_get_name_record_replacement(self):\n self.assertEqual(\n {\n 'domain': [Record(\n name='name',\n domain='domain',\n addresses=['1.0.0.0'],\n failover='failover.url'\n )],\n },\n get_name_record_updates(\n [TESTSERVICE_REPLACE],\n [self.pod, self.pod2],\n self.nodes,\n 'kuberoute_domain',\n 'kuberoute_name',\n 'kuberoute_failover',\n 'kuberoute_quota',\n replacements={ 'name_replace': 'name'}\n )\n )\n\n def test_is_pod_running_false(self):\n self.assertFalse(is_pod_running(TESTPOD_TERMINATING))\n\n def test_is_pod_running_true(self):\n self.assertTrue(is_pod_running(TESTPOD_IN_SERVICE))\n\n def test_is_pod_ready_false(self):\n self.assertFalse(is_pod_ready(TESTPOD_NOT_READY))\n\n def test_is_pod_ready_true(self):\n self.assertTrue(is_pod_ready(TESTPOD_IN_SERVICE))\n\n def test_add_if_missing(self):\n d = {}\n add_if_missing(d,['a'],1)\n self.assertTrue('a' in d)\n\n def test_add_if_missing_nested(self):\n d = {}\n add_if_missing(d,['a','b'],1)\n self.assertTrue('b' in d['a'])\n","repo_name":"schneevonmorgen/kuberoute","sub_path":"tests/test_service.py","file_name":"test_service.py","file_ext":"py","file_size_in_byte":9233,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"29936857769","text":"import cv2\n\nhistogram = []\n\nfor j in range(0, 256):\n histogram.append(0)\n#print(img.shape)\nfor x in range(0, len(img[0])):\n for y in range(0, len(img)):\n histogram[img[y][x]] = histogram[img[y][x]] + 1\nprint(histogram)\n\n","repo_name":"khanhhoaa19/python","sub_path":"Lesson1/Histogram.py","file_name":"Histogram.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70949659987","text":"#!/Users/rosen59250/Desktop/AllCodingStuffs/APIPageFlaskWebsite/bin/python3.8\n\n# MUST GO IN DASHBOARD.HTML:
  • Log Out
  • \n\n# --------------- IMPORTS ----------------\n\nimport pygal\nimport os\nfrom forms import LoginForm, RegisterForm, RevalidateForm\nfrom flask import Flask, redirect, url_for, render_template, request, \\\n jsonify, send_from_directory\nimport googleNewsScraper\nfrom flask_bootstrap import Bootstrap\nfrom flask_sqlalchemy import SQLAlchemy\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom flask_login import LoginManager, UserMixin, login_user, login_required, \\\n logout_user, current_user\nimport jwt\nimport datetime\nfrom functools import wraps\nfrom flask_mail import Mail, Message\nfrom itsdangerous import URLSafeTimedSerializer, SignatureExpired\nfrom flask_wtf.csrf import CSRFProtect\nfrom random import choice\nfrom flask_admin.contrib.sqla import ModelView\nfrom flask_admin import Admin\nexampleQueries = ['coronavirus',\n 'cnn',\n 'fox news',\n 'zoo',\n 'iphone',\n 'android',\n 'forbes',\n 'entertainment',\n 'england',\n 'technology',\n 'bbc',\n 'google',\n 'amazon',\n 'wall street',\n 'latin america',\n 'australia',\n 'science',\n 'physics',\n 'chemistry',\n 'dow jones']\n\n# ---------------- INIT APP -----------------\n\nemailAddress = os.environ.get(\"EMAIL_ADDRESS\")\nsecret = os.environ.get(\"MFS\")\nemailPassword = os.environ.get(\"MAIL_APP_PWORD\")\n#print(emailPassword)\n#print(secret)\n#print(emailAddress)\n\napp = Flask(__name__, subdomain_matching=True)\nBootstrap(app)\n\napp.config[\"SERVER_NAME\"] = \"localhost:5000\"\napp.config['JSON_SORT_KEYS'] = False\napp.config['SQLALCHEMY_DATABASE_URI'] \\\n= 'sqlite://///Users/rosen59250/Desktop/AllCodingStuffs/MediaFetch/database.db'\nSQLALCHEMY_TRACK_MODIFICATIONS = False\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\nadmin = Admin(app, template_mode='bootstrap3')\n\napp.config['SECRET_KEY'] = secret #SET SECRET TO ANYTHING\napp.config['MAIL_SERVER'] = 'smtp.gmail.com'\napp.config['MAIL_PORT'] = 465\napp.config['MAIL_USERNAME'] = emailAddress # EMAIL NEEDED TO SIGN UP\napp.config['MAIL_PASSWORD'] = emailPassword # PASSWORD NEEDED\napp.config['MAIL_USE_TLS'] = False\napp.config['MAIL_USE_SSL'] = True\n\ncsrf = CSRFProtect(app)\ncsrf.init_app(app)\n\nmail = Mail(app)\n\ns = URLSafeTimedSerializer(app.config['SECRET_KEY'])\n\n# ---------------- DATABASE ------------\n\nclass User(UserMixin, db.Model):\n id = db.Column(db.Integer, primary_key=True)\n username = db.Column(db.String(15), unique=True)\n email = db.Column(db.String(50), unique=True)\n password = db.Column(db.String(80))\n admin = db.Column(db.Boolean)\n confirmed_email = db.Column(db.Boolean)\n\nclass Calls(UserMixin, db.Model):\n id = db.Column(db.Integer, primary_key=True, unique=True)\n user = db.Column(db.String(20), primary_key=False, unique=False)\n day = db.Column(db.String(15), unique=False)\n fullTime = db.Column(db.String(40), unique=False)\n query = db.Column(db.String(20000), unique=False)\n timeInfo = db.Column(db.String(20000), unique=False)\n excluding = db.Column(db.String(20000), unique=False)\n requiring = db.Column(db.String(20000), unique=False)\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))\n\n# ---------------- DECORATOR -------------\n\ndef token_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n token = None\n\n if request.args.get('token'):\n token = request.args.get('token')\n if not token:\n return jsonify({'status': 'error', 'error': \\\n 'api token is missing'}), 401\n\n try:\n data = jwt.decode(token, app.config['SECRET_KEY'])\n user = User.query.filter_by(\n username=data['username']).first()\n if not user:\n return jsonify({'status': 'error', 'error': 'api token is invalid'})\\\n , 401\n\n except:\n return jsonify({'status': 'error', 'error': 'api token is invalid'})\\\n , 401\n\n return f(current_user, *args, **kwargs)\n\n return decorated\n\n\ndef validated_required(f):\n @wraps(f)\n def decorated(*args, **kwargs):\n if current_user.confirmed_email:\n print('')\n else:\n form = RevalidateForm()\n # this is temp\n return render_template('invalidated.html', form=form)\n\n return f(current_user, *args, **kwargs)\n\n return decorated\n\n# ----------------- ROUTES -------------\n\ndef timeList(time):\n list = time.split('-')\n for i in range(len(list)):\n list[i] = int(list[i])\n return list\n\n@app.route('/totalGraph.svg')\ndef visual():\n calls = db.session.query(Calls).all()\n\n startStr = calls[0].__dict__['day']\n endStr = calls[len(calls) - 1].__dict__['day']\n\n start = timeList(startStr)\n end = timeList(endStr)\n\n line_chart = pygal.Line(fill=True, \\\n interpolate='cubic', \\\n style=pygal.style.DarkSolarizedStyle)\n line_chart.title = \"Number of API calls per day (all users)\"\n starting = datetime.date(start[0], start[1], start[2])\n ending = datetime.date(end[0], end[1], end[2])\n delta = datetime.timedelta(days=1)\n labels = []\n labels.append(str(starting - delta))\n while starting <= ending:\n labels.append(str(starting))\n starting += delta\n\n line_chart.x_labels = labels\n\n calls_per_day = {label:0 for label in labels}\n for i in range(len(labels)):\n for j in range(len(calls)):\n if calls[j].__dict__['day'] == labels[i]:\n calls_per_day[labels[i]] += 1\n line_chart.add('API Calls', [i for i in calls_per_day.values()])\n\n return line_chart.render(disable_xml_declaration=True, fill=True)\n\n@app.route('/userGraph.svg')\n@login_required\ndef visualUser():\n calls = db.session.query(Calls).all()\n for i in range(len(calls)):\n if calls[i].__dict__['user'] == current_user.username:\n startStr = calls[i].__dict__['day']\n break\n\n for i in range(len(calls)):\n if calls[i].__dict__['user'] == current_user.username:\n endStr = calls[i].__dict__['day']\n\n start = timeList(startStr)\n end = timeList(endStr)\n\n line_chart = pygal.Line(fill=True, \\\n interpolate='cubic', \\\n style=pygal.style.DarkSolarizedStyle)\n line_chart.title = f\"Number of API calls per day (User: {current_user.username})\"\n starting = datetime.date(start[0], start[1], start[2])\n ending = datetime.date(end[0], end[1], end[2])\n delta = datetime.timedelta(days=1)\n labels = []\n labels.append(str(starting - delta))\n while starting <= ending:\n labels.append(str(starting))\n starting += delta\n\n line_chart.x_labels = labels\n\n calls_per_day = {label:0 for label in labels}\n for i in range(len(labels)):\n for j in range(len(calls)):\n if calls[j].__dict__['day'] == labels[i]:\n if calls[j].__dict__['user'] == current_user.username:\n calls_per_day[labels[i]] += 1\n line_chart.add('API Calls', [i for i in calls_per_day.values()])\n\n return line_chart.render(disable_xml_declaration=True, fill=True)\n\n\n@app.route('/')\ndef homePage():\n return render_template('index.html', logged_in=not \\\n current_user.is_authenticated)\n\n\n@app.route('/login/', methods=['GET', 'POST'])\n# @validated_required\ndef login():\n form = LoginForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user:\n if check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n return redirect(url_for('dashboard'))\n\n return render_template('login.html', form=form, \\\n error='Invalid username or password')\n\n return render_template('login.html', form=form, error='')\n\n\n@app.route('/regenerate_email_token/', methods=['GET', 'POST'])\n# @login_required\ndef regen():\n\n form = RevalidateForm()\n\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n\n if user is not None:\n emailConfToken = s.dumps(user.email, salt='email-confirm')\n\n link = url_for(f'confirm_email',\n token=emailConfToken, _external=True)\n\n regenUrl = url_for('regen', _external=True)\n\n msg = Message('Confirm Email (resent)',\n sender='mail4jasonr@gmail.com', \\\n recipients=user.email.split())\n msg.body = f'Hello {user.username},\\n\\nWelcome to MediaFetch!\\n\\n' \\\n f'Your email confirmation link is {link} and it will expire in'\\\n ' 30 minutes.\\n\\n' \\\n f'Press this link to get a new URL:\\n{regenUrl}\\n\\nThanks for' \\\n 'signing up,\\n The MediaFetch Team'\n mail.send(msg)\n\n return render_template('sent.html', logged_in=not \\\n current_user.is_authenticated, correct=True)\n else:\n return render_template('sent.html', logged_in=not current_user\\\n .is_authenticated, correct=False)\n return render_template('invalidated.html', form=form)\n\n\n@app.route('/signup/', methods=['GET', 'POST'])\ndef signup():\n form = RegisterForm()\n\n if form.validate_on_submit():\n hashed_password = generate_password_hash(\n form.password.data, method='sha256')\n if User.query.filter_by(email=form.email.data).count() > 0 or \\\n User.query.filter_by(email=form.username.data).count() > 0:\n return \"username or email already in use\"\n new_user = User(username=form.username.data,\n email=form.email.data, password=hashed_password, \\\n admin=False, confirmed_email=False)\n db.session.add(new_user)\n db.session.commit()\n token = jwt.encode({'username': form.username.data,\n 'email': form.email.data}, app.config['SECRET_KEY'])\n token = str(token).replace(\"b'\", \"\")\n token = str(token).replace(\"'\", \"\")\n\n emailConfToken = s.dumps(form.email.data, salt='email-confirm')\n\n msg = Message('Confirm Email', sender='mail4jasonr@gmail.com',\n recipients=form.email.data.split())\n\n link = url_for(f'confirm_email', token=emailConfToken, _external=True)\n\n regenUrl = url_for('regen', _external=True)\n\n msg.body = f'Hello {form.username.data},\\n\\nWelcome to MediaFetch!\\n\\n'\\\n f'Your email confirmation link is {link} and it will expire '\\\n 'in 30 minutes.\\n\\n' \\\n f'Press this link to get a new URL:\\n{regenUrl}\\n\\nThanks for'\\\n ' signing up,\\n The MediaFetch Team'\n\n mail.send(msg)\n return f'

    New user has been created. Your email \\\n confirmation key is {emailConfToken}
    Your API key \\\n is

    {token}

    '\n # TODO: MAKE THIS A PAGE FOR NEW USER\n\n return render_template('signup.html', form=form)\n# email confirmation route\n\n\n@app.route('/confirm_email//')\ndef confirm_email(token):\n try:\n email = s.loads(token, salt='email-confirm', max_age=30 * 60)\n # user = User.query.filter_by(username=.email)\n current_user.confirmed_email = True\n db.session.commit()\n return 'the token works!'\n except SignatureExpired:\n return 'You waited too long and your confirm email expired'\n# rest of the app\n\n\n@app.route('/dashboard/')\n@login_required # very important for *all* screens behind a login\n@validated_required\ndef dashboard(current_user):\n global exampleQueries\n tokenUser = User.query.filter_by(username=current_user.username).first()\n token = jwt.encode({'username': tokenUser.username,\n 'email': tokenUser.email}, app.config['SECRET_KEY'])\n token = str(token).replace(\"b'\", \"\")\n token = str(token).replace(\"'\", \"\")\n return render_template('dashboard.html', name=current_user.username, \\\n token=token, query=choice(exampleQueries))\n\n@app.route('/api')\n@token_required\ndef api(current_user):\n query = request.args.get('query')\n timeAgo = request.args.get('time')\n excluding = request.args.get('exclude')\n requiring = request.args.get('require')\n if query is None:\n return {\"status\": \"error\",\n \"error\": \"no query made\",\n \"articlesFound\": 0,\n \"articles\": []}\n if timeAgo is None:\n timeAgo = ''\n if excluding is None:\n excluding = ''\n if requiring is None:\n requiring = ''\n token = request.args.get('token')\n time = datetime.datetime.utcnow()\n day = time.strftime('%Y-%m-%d')\n data = jwt.decode(token, app.config['SECRET_KEY'])\n call = Calls(user=data['username'], \\\n day=day, \\\n query=query, \\\n timeInfo=timeAgo, \\\n excluding=excluding, \\\n requiring=requiring, \\\n fullTime=time.ctime())\n db.session.add(call)\n db.session.commit()\n try:\n api = googleNewsScraper.searchGNews(query, timeAgo, excluding, requiring)\n return api\n except:\n data = {}\n data['status'] = 'error'\n data['error'] = 'error on the server. contact us to let us know'\n data['articlesFound'] = 0\n data['articles'] = []\n return data\n\n\n@app.route('/logout/')\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for('homePage'))\n\n\n@app.route('/documentation/',)\ndef documentation():\n logged_in = not current_user.is_authenticated\n if not logged_in:\n token = jwt.encode({'username': current_user.username,\n 'email': current_user.email}, \\\n app.config['SECRET_KEY'])\n token = str(token).replace(\"b'\", \"\")\n token = str(token).replace(\"'\", \"\")\n return render_template('documentation.html', token=token, \\\n logged_in=logged_in)\n else:\n return render_template('documentation.html', token='TOKEN.HERE', \\\n logged_in=logged_in)\n\n\n@app.route('/logo.png')\ndef logo():\n return send_from_directory(os.path.join(app.root_path, 'static'),\n 'images/MediaFetch.png', \\\n mimetype='image/png')\n\n@app.route('/favicon.ico')\ndef favicon():\n return send_from_directory(os.path.join(app.root_path, 'static'),\n 'images/MediaFetch.ico', \\\n mimetype='image/vnd.microsoft.icon')\n\n# ADMIN AND 404\n\n\nclass AdminModelView(ModelView):\n def is_accessible(self):\n\n if current_user.is_authenticated and not current_user.is_anonymous:\n user = User.query.filter_by(username=current_user.username).first()\n if user.admin:\n return True\n else:\n return False\n else:\n return False\n\n\n column_searchable_list = ['username', 'email']\n column_filters = ['admin', 'confirmed_email']\n page_size = 50\n\nclass CallsModelView(ModelView):\n def is_accessible(self):\n\n if current_user.is_authenticated and not current_user.is_anonymous:\n user = User.query.filter_by(username=current_user.username).first()\n if user.admin:\n return True\n else:\n return False\n else:\n return False\n\n\n column_filters = ['id']\n page_size = 50\n\n\n\nadmin.add_view(AdminModelView(User, db.session))\nadmin.add_view(CallsModelView(Calls, db.session))\n\n\n@app.errorhandler(404)\ndef not_found(e):\n return render_template(\"404.html\", logged_in=not \\\n current_user.is_authenticated)\n\n\ndef getApp():\n return app\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n","repo_name":"jso8910/MediaFetch","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":16670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72797612307","text":"'''Uma revendedora de carros usados paga a seus funcionários vendedores um salário fixo por mês, mais uma comissão também fixa para cada carro vendido e mais 5% do valor das vendas por ele efetuadas. Escrever um algoritmo que leia o número de carros por ele vendidos, o valor total de suas vendas, o salário fixo e o valor que ele recebe por carro vendido. Calcule e escreva o salário final do vendedor.'''\r\n\r\ndef main():\r\n #declaração de variáveis\r\n salario = int(0)\r\n comissao = int(0)\r\n \r\n #entrada de dados\r\n qntdvendas = int(input(\"Quantidade de vendas: \"))\r\n totalvendas = int(input(\"Valor total das vendas: \"))\r\n salario = int(input(\"Salário fixo: \"))\r\n comissaofixa = int(input(\"Comissão fixa: \"))\r\n \r\n #processamento\r\n vendasefetuadas = totalvendas * (5 /100)\r\n comissaovendas = qntdvendas * comissaofixa\r\n salariototal = vendasefetuadas + comissaovendas + salario\r\n \r\n #saída de dados\r\n print(f'Salário total: {salariototal}')\r\n return 0\r\n \r\nif __name__ == \"__main__\":\r\n main()","repo_name":"saraivagustavo/LVPs","sub_path":"LVPs-Introdução/lvp9.py","file_name":"lvp9.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35627407655","text":"# This sample tests a case where a __new__ method in a generic class\n# returns an instance of the class but with different type arguments\n# than expected. This is arguably an error case, but pyright needs\n# to handle it gracefully.\n\nfrom __future__ import annotations\nfrom typing import Generic, TypeVar\n\n\nT = TypeVar(\"T\", contravariant=True)\nS = TypeVar(\"S\", contravariant=True)\n\n\nclass ClassA(Generic[T]):\n ...\n\n\nclass ClassB(Generic[S, T], ClassA[T]):\n ...\n\n\nclass ClassC(ClassB[S, T]):\n def __new__(cls, subcon: ClassA[S]) -> ClassC[S, list[S]]:\n ...\n\n\nclass ClassD(ClassB[S, T]):\n def __new__(cls, subcon: ClassA[S]) -> ClassD[S, list[S]]:\n ...\n\n\nc = ClassA[int]()\n\nintermediate = ClassC(c)\nv1 = ClassD(intermediate)\nreveal_type(v1, expected_text=\"ClassD[list[int], list[list[int]]]\")\n\nv2 = ClassD(ClassC(c))\nreveal_type(v2, expected_text=\"ClassD[list[int], list[list[int]]]\")\n","repo_name":"microsoft/pyright","sub_path":"packages/pyright-internal/src/tests/samples/constructor27.py","file_name":"constructor27.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":11208,"dataset":"github-code","pt":"48"} +{"seq_id":"21115672642","text":"import os\nimport re\n\nIN_FILE = \"../1_raw/EnUs_cmudict/cmudict.dict\"\nOUT_FILE_1 = \"../2_preprocessed/EnUs_cmudict/EnUs_dict.txt\"\nOUT_FILE_2 = \"../3_train_and_eval_data/EnUs/EnUs_dict_exclude_polyphone.txt\"\n\nmonophone_dict, polyphone_dict = {}, {}\nwith open(IN_FILE, encoding=\"utf-8\", mode=\"r\") as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n # INPUT line format: word(x) phone1 phone2 phone3 ...\n word_iden, phones = line.strip().split()[0], line.strip().split()[1:]\n if \"(\" in word_iden:\n assert word_iden.count(\"(\") == 1 and word_iden.endswith(\")\"), i\n word, iden = word_iden.split(\"(\")[0], word_iden.split(\"(\")[1].split(\")\")[0]\n try:\n iden_int = int(iden)\n except:\n raise Exception(i)\n if not re.fullmatch(\"^[a-z\\'\\-]+$\", word):\n print(\"Ignoring:\", word)\n continue\n if word in monophone_dict:\n # import pdb; pdb.set_trace()\n # print()\n polyphone_dict[word] = {}\n polyphone_dict[word][\"1\"] = monophone_dict[word]\n monophone_dict.pop(word)\n polyphone_dict[word][iden] = \" \".join(phones).lower()\n else:\n word = word_iden\n if not re.fullmatch(\"^[a-z\\'\\-]+$\", word):\n print(\"Ignoring:\", word)\n continue\n monophone_dict[word] = \" \".join(phones).lower()\n\nwith open(OUT_FILE_1, encoding=\"utf-8\", mode=\"w\") as f:\n # OUTPUT_1 line format: word, pos, identifier, phonemes, morphemes, frequency\n for word, phones in monophone_dict.items():\n f.write(\",\".join([word, \"\", \"\", phones, \"\", \"\"]) + \"\\r\\n\")\n for word, iden_phones in polyphone_dict.items():\n for iden, phones in iden_phones.items():\n f.write(\",\".join([word, \"\", iden, phones, \"\", \"\"]) + \"\\r\\n\")\n\nwith open(OUT_FILE_2, encoding=\"utf-8\", mode=\"w\") as f:\n # OUTPUT_2 line format: word phonemes\n for word, phones in monophone_dict.items():\n f.write(\" \".join([\"EnUs\", word, phones]) + \"\\r\\n\")\n","repo_name":"jzmzhong/TTS_Front_End","sub_path":"200_Grpaheme_to_Phoneme/202_G2P_Model/datasets/scripts/preprocess_EnUs_cmudict.py","file_name":"preprocess_EnUs_cmudict.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"6259149591","text":"import pandas as pd\nimport numpy as np\nimport geopy as gy\nfrom geopy.geocoders import Nominatim\n\n\ndef read_data():\n \"\"\"\n Read data from Github repository. The file is small,\n so we upload it in the Github.\n \"\"\"\n # Read data from github\n natural = pd.read_csv('../data/Natural.csv')\n human = pd.read_csv('../data/human_related.csv')\n economy = pd.read_csv('../data/economy.csv')\n tertiary = pd.read_csv(\"../data/tertiary.csv\")\n\n return natural, human, economy, tertiary\n\n\ndef data_rank(natural, human, economy, tertiary):\n \"\"\"\n Get data ranks for four different categories related to city choices.\n\n natural, human, economy, tertiary: all in DataFrame format.\n \"\"\"\n\n natural['Air_Rank'] = natural['Air'].rank(ascending=1)\n natural['Water_Rank'] = natural['Water_quality'].rank(ascending=1)\n natural['Toxics_Rank'] = natural['Toxics'].rank(ascending=1)\n natural['Hazardous_Rank'] = natural['Hazardous'].rank(ascending=1)\n natural['Green_score_rank'] = natural['Green_score'].rank(ascending=1)\n natural['Green_score_rank'].fillna(\n natural['Green_score_rank'].max() + 1, inplace=True)\n natural['Natural_total_score'] = (\n natural['Air_Rank'] + natural['Water_Rank'] +\n natural['Toxics_Rank'] + natural['Hazardous_Rank'] +\n natural['Green_score_rank'])\n natural['Natural_total_rank'] =\\\n natural['Natural_total_score'].rank(ascending=1)\n human['total crime'] = human['Violent'] +\\\n human['Rape'] + human['Robbery']\n human['crime_rank'] = human['total crime'].rank(ascending=1)\n human['crime_rank'].fillna(\n human['crime_rank'].max() + 1, inplace=True)\n human['hospital_rank'] = human['NumHospital'].rank(ascending=0)\n human['hospital_rank'].fillna(\n human['hospital_rank'].max() + 1, inplace=True)\n human['early_education_rank'] =\\\n human['AvgSATScore'].rank(ascending=0)\n human['early_education_rank'].fillna(\n human['early_education_rank'].max() + 1, inplace=True)\n human['Population'] = pd.to_numeric(\n human['Population'], errors='coerce')\n a = np.multiply(\n human['Percent_graduate_degree'], human['Population'])\n a = a.rank(ascending=0)\n b = human['Colleges'].rank(ascending=0)\n human['advanced_education_rank'] = (a + b).rank(ascending=1)\n human['advanced_education_rank'].fillna(\n human['advanced_education_rank'].max() + 1, inplace=True)\n human['Human_related_rank'] = (\n human['crime_rank'] + human['hospital_rank'] +\n human['early_education_rank'] +\n human['advanced_education_rank']).rank(ascending=1)\n economy['Rank_Unemployment'] =\\\n economy['Percent unemployment'].rank(ascending=1)\n economy['Rank_Sales'] = economy['Local tax rate'].rank(ascending=1)\n economy['Rank_Income'] = economy['Median Income'].rank(ascending=0)\n economy['Rank_Tuition'] = economy['AvgTuition'].rank(ascending=1)\n economy['Rank_Unemployment'].fillna(\n economy['Rank_Unemployment'].max() + 1, inplace=True)\n economy['Rank_Sales'].fillna(\n economy['Rank_Sales'].max() + 1, inplace=True)\n economy['Rank_Income'].fillna(\n economy['Rank_Income'].max() + 1, inplace=True)\n economy['Rank_Tuition'].fillna(\n economy['Rank_Tuition'].max() + 1, inplace=True)\n economy['Sum'] = (economy['Rank_Unemployment'] + economy['Rank_Sales'] +\n economy['Rank_Income'] + economy['Rank_Tuition'])\n economy['Economy_rank'] = economy['Sum'].rank(ascending=1)\n tertiary['Bar_Rank'] = tertiary['Bars'].rank(ascending=0)\n tertiary['Restaurant_Rank'] = tertiary['Restaurant'].rank(ascending=0)\n tertiary['Museums_Rank'] = tertiary['Museums'].rank(ascending=0)\n tertiary['Libraries_Rank'] = tertiary['Libraries'].rank(ascending=0)\n tertiary['Park_Rank'] =\\\n tertiary['Park_acres_per_1000_residents'].rank(ascending=0)\n tertiary['TopRes_Rank'] = tertiary['NumTop200Restau'].rank(ascending=0)\n tertiary['Bar_Rank'].fillna(tertiary['Bar_Rank'].max() + 1, inplace=True)\n tertiary['Restaurant_Rank'].fillna(\n tertiary['Restaurant_Rank'].max() + 1, inplace=True)\n tertiary['Museums_Rank'].fillna(\n tertiary['Museums_Rank'].max() + 1, inplace=True)\n tertiary['Libraries_Rank'].fillna(\n tertiary['Libraries_Rank'].max()+1, inplace=True)\n tertiary['Park_Rank'].fillna(\n tertiary['Park_Rank'].max() + 1, inplace=True)\n tertiary['TopRes_Rank'].fillna(\n tertiary['TopRes_Rank'].max() + 1, inplace=True)\n tertiary['Total_Rank'] = tertiary['Bar_Rank'] +\\\n tertiary['Restaurant_Rank'] + tertiary['Museums_Rank'] +\\\n tertiary['Libraries_Rank'] + tertiary['Park_Rank'] +\\\n tertiary['TopRes_Rank']\n tertiary['Tertiary_Rank'] = tertiary['Total_Rank'].rank(ascending=1)\n\n return natural, human, economy, tertiary\n\n\ndef create_rank(natural, human, economy, tertiary, Lat, Lon):\n \"\"\"\n make all rank into one Dataframe and save as csv file.\n\n All inputs are in DataFrame format.\n \"\"\"\n\n rank = pd.DataFrame()\n rank['Air'] = natural['Air_Rank']\n rank['Water'] = natural['Water_Rank']\n rank['Toxics'] = natural['Toxics_Rank']\n rank['Hazardous'] = natural['Hazardous_Rank']\n rank['Green_score'] = natural['Green_score_rank']\n rank['Natural_total_rank'] = natural['Natural_total_rank']\n rank['City'] = human['City']\n rank['State'] = human['State']\n rank['Population'] = human['Population']\n rank['Crime_rank'] = human['crime_rank']\n rank['Hospital_rank'] = human['hospital_rank']\n rank['Early_education_rank'] = human['early_education_rank']\n rank['University_education_rank'] = human['advanced_education_rank']\n rank['Human_related_rank'] = human['Human_related_rank']\n rank['Rank_unemployment'] = economy['Rank_Unemployment']\n rank['Rank_sale_rate'] = economy['Rank_Sales']\n rank['Rank_Income'] = economy['Rank_Income']\n rank['Rank_Tuition'] = economy['Rank_Tuition']\n rank['Economy_rank'] = economy['Economy_rank']\n rank['Bar_Rank'] = tertiary['Bar_Rank']\n rank['Restaurant_Rank'] = tertiary['Restaurant_Rank']\n rank['Museums_Rank'] = tertiary['Museums_Rank']\n rank['Libraries_Rank'] = tertiary['Libraries_Rank']\n rank['Park_Rank'] = tertiary['Park_Rank']\n rank['TopRes_Rank'] = tertiary['TopRes_Rank']\n rank['Tertiary_Rank'] = tertiary['Tertiary_Rank']\n rank['Latitude'] = Lat\n rank['Longitude'] = Lon\n\n rank.to_csv(\"../data/rank_file.csv\")\n\n return rank\n\n\ndef find_loc(dataframe):\n \"\"\"\n Find latitude and longitude using geopy package.\n Returnn latitude and longitude.\n \"\"\"\n\n geolocator = Nominatim()\n lat = []\n lon = []\n for index, row in dataframe.iterrows():\n loc = geolocator.geocode(\n row['City'] + ' ' + row['State'] + ' United States')\n lat.append(loc.latitude)\n lon.append(loc.longitude)\n return lat, lon\n","repo_name":"UWSEDS-aut17/uwseds-group-city-fynders","sub_path":"cityfynders/data_processing.py","file_name":"data_processing.py","file_ext":"py","file_size_in_byte":6924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32596514710","text":"from __future__ import absolute_import, division, print_function\n\nimport multiprocessing as multi\nimport time\nfrom copy import deepcopy\nfrom functools import partial\nfrom itertools import product\n\nfrom scipy.stats import randint as sp_randint, uniform\n\nimport numpy as np\nfrom sklearn.metrics import classification_report, precision_recall_fscore_support, accuracy_score\nfrom sklearn.model_selection import train_test_split, KFold, RandomizedSearchCV\nfrom sklearn.pipeline import Pipeline\n\nfrom sklearn.naive_bayes import MultinomialNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier, Lasso, ElasticNet\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.svm import SVC, LinearSVC\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neural_network import MLPClassifier\n\nfrom semisuper import transformers\nfrom semisuper.helpers import densify\n\nPARALLEL = True\nRAND_INT_MAX = 1000\nRANDOM_SEED = 134242\n\n\n# ----------------------------------------------------------------\n# Estimators and parameters to evaluate\n# ----------------------------------------------------------------\n\ndef estimator_list():\n l = [\n {\"name\" : \"LinearSVC\",\n \"model\" : LinearSVC(),\n \"params\": {'C' : uniform(0.5, 0.5),\n 'loss': ['hinge', 'squared_hinge']\n }\n },\n {\"name\" : \"LogisticRegression\",\n \"model\" : LogisticRegression(),\n \"params\": {'C' : sp_randint(1, RAND_INT_MAX),\n 'solver' : ['lbfgs'], # ['newton-cg', 'lbfgs', 'liblinear'], # 'sag', 'saga'\n 'class_weight': ['balanced']\n }\n },\n {\"name\" : \"SGDClassifier\",\n \"model\" : SGDClassifier(),\n \"params\": {'loss' : ['hinge', 'log', 'modified_huber', 'squared_hinge'],\n 'class_weight' : ['balanced'],\n 'penalty' : ['l2', 'l1', 'elasticnet'],\n 'learning_rate': ['optimal', 'invscaling'],\n 'max_iter' : [1000], # for sklearn >= 0.19, not 0.18\n 'tol' : [1e-3], # for sklearn >= 0.19, not 0.18\n 'eta0' : uniform(0.01, 0.00001)\n }\n },\n\n ## SVC: slow!\n # {\"name\" : \"SVM_SVC\",\n # \"model\" : SVC(),\n # \"params\": {'C' : sp_randint(1, RAND_INT_MAX),\n # 'kernel' : ['poly', 'rbf', 'sigmoid'],\n # 'class_weight': ['balanced'],\n # 'probability' : [False]\n # }\n # },\n\n ## MNB: bad performance (< 80% avg, < 70% recall)\n # {\"name\" : \"MultinomialNB\",\n # \"model\" : MultinomialNB(),\n # \"params\": {'alpha' : uniform(0, 1),\n # 'fit_prior': [True],\n # }\n # },\n\n ## Lasso, ElascticNet: mix of continuous and discrete labels\n # {\"name\" : \"Lasso\",\n # \"model\" : Lasso(),\n # \"params\": {'alpha' : uniform(0, 1),\n # 'fit_intercept': [True],\n # 'normalize' : [True, False],\n # 'max_iter' : sp_randint(1, RAND_INT_MAX)\n # }\n # },\n # {\"name\" : \"ElasticNet\",\n # \"model\" : ElasticNet(),\n # \"params\": {'alpha' : uniform(0, 1),\n # 'l1_ratio': uniform(0, 1)\n # }\n # },\n ## DecisionTree: bad performance\n # {\"name\" : \"DecisionTreeClassifier\",\n # \"model\" : DecisionTreeClassifier(),\n # \"params\": {\"criterion\" : [\"gini\", \"entropy\"],\n # \"splitter\" : [\"best\", \"random\"],\n # 'max_depth' : sp_randint(1, 1000),\n # 'class_weight': ['balanced']\n # }\n # },\n ## {\"name\" : \"RandomForestClassifier\",\n # \"model\" : RandomForestClassifier(),\n # \"params\": {'n_estimators': sp_randint(1, RAND_INT_MAX),\n # \"criterion\" : [\"gini\", \"entropy\"],\n # 'max_depth' : sp_randint(1, RAND_INT_MAX),\n # 'class_weight': ['balanced']\n # }\n # },\n ## {\"name\" : \"KNeighbors\",\n # \"model\" : KNeighborsClassifier(),\n # \"params\": {'n_neighbors' : sp_randint(1, 40),\n # 'weights' : ['uniform', 'distance'],\n # 'algorithm' : ['auto'],\n # 'leaf_size' : sp_randint(1, RAND_INT_MAX)\n # }\n # },\n ## MLP: crashes\n # {\"name\" : \"MLPClassifier\",\n # \"model\" : MLPClassifier(),\n # \"params\": {'activation' : ['identity', 'logistic', 'tanh', 'relu'],\n # 'solver' : ['lbfgs', 'sgd', 'adam'],\n # 'learning_rate': ['constant', 'invscaling', 'adaptive'],\n # 'max_iter' : [1000],\n # }\n # },\n ]\n\n return l[:1]\n\n\ndef preproc_param_dict():\n d = {\n 'df_min' : [0.001],\n 'df_max' : [1.0],\n 'rules' : [True], # [True, False],\n 'wordgram_range': [(1, 4)], # [(1, 3), (1, 4)], # [None, (1, 2), (1, 3), (1, 4)],\n 'chargram_range': [(2, 6)], # [(2, 5), (2, 6)], # [None, (2, 4), (2, 5), (2, 6)],\n 'feature_select': [\n # transformers.IdentitySelector,\n # partial(transformers.percentile_selector, 'chi2', 30),\n partial(transformers.percentile_selector, 'chi2', 25),\n # partial(transformers.percentile_selector, 'chi2', 20),\n # partial(transformers.percentile_selector, 'f', 30),\n # partial(transformers.percentile_selector, 'f', 25),\n # partial(transformers.percentile_selector, 'f', 20),\n # partial(transformers.percentile_selector, 'mutual_info', 30), # mutual information: worse than rest\n # partial(transformers.percentile_selector, 'mutual_info', 25),\n # partial(transformers.percentile_selector, 'mutual_info', 20),\n # partial(transformers.factorization, 'LatentDirichletAllocation', 100),\n # partial(transformers.factorization, 'TruncatedSVD', 100),\n # partial(transformers.factorization, 'TruncatedSVD', 1000),\n # partial(transformers.factorization, 'TruncatedSVD', 2000), # 10% worse than chi2, slow, SVM iter >100\n # partial(transformers.factorization, 'TruncatedSVD', 3000),\n # partial(transformers.select_from_l1_svc, 1.0, 1e-3),\n # partial(transformers.select_from_l1_svc, 0.5, 1e-3),\n # partial(transformers.select_from_l1_svc, 0.1, 1e-3),\n ]\n }\n return d\n\n\n# ----------------------------------------------------------------\n# Cross validation\n# ----------------------------------------------------------------\n\ndef best_model_cross_val(X, y, fold=10):\n \"\"\"determine best model, cross validate and return pipeline trained on all data\"\"\"\n\n print(\"\\nFinding best model\\n\")\n\n best = get_best_model(X, y)\n\n print(\"\\nCross-validation\\n\")\n\n kf = KFold(n_splits=fold, shuffle=True)\n splits = kf.split(X, y)\n\n # TODO: parallel fix\n # if PARALLEL:\n # with multi.Pool(fold) as p:\n # stats = list(p.map(partial(eval_fold, best, X, y), enumerate(splits), chunksize=1))\n # else:\n # stats = list(map(partial(eval_fold, best, X, y), enumerate(splits)))\n\n stats = list(map(partial(eval_fold, best, X, y), enumerate(splits)))\n\n mean_stats = np.mean(stats, 0)\n print(\"Cross-validation average: p {}, r {}, f1 {}, acc {}\".format(\n mean_stats[0], mean_stats[1], mean_stats[2], mean_stats[3]))\n\n print(\"Retraining model on full data\")\n\n best.fit(X, y)\n\n print(\"Returning final model\")\n\n return best\n\n\n# helper\ndef eval_fold(model, X, y, i_splits):\n \"\"\"helper function for running cross validation in parallel\"\"\"\n\n i, split = i_splits\n X_train, X_test = X[split[0]], X[split[1]]\n y_train, y_test = y[split[0]], y[split[1]]\n\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n\n pr, r, f1, _ = precision_recall_fscore_support(y_test, y_pred)\n acc = accuracy_score(y_test, y_pred)\n\n print(\"Fold no.\", i, \"acc\", acc, \"classification report:\\n\", classification_report(y_test, y_pred))\n return [pr, r, f1, acc]\n\n\n# ----------------------------------------------------------------\n# Model selection\n# ----------------------------------------------------------------\n\ndef get_best_model(X_train, y_train, X_test=None, y_test=None):\n \"\"\"Evaluate parameter combinations, save results and return object with stats of all models\"\"\"\n\n print(\"\\nEvaluating parameter ranges for preprocessor and classifiers\")\n\n if X_test is None or y_test is None:\n X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2, random_state=RANDOM_SEED)\n\n results = {'best': {'f1': -1, 'acc': -1}, 'all': []}\n\n preproc_params = preproc_param_dict()\n estimators = estimator_list()\n\n for wordgram, chargram in product(preproc_params['wordgram_range'], preproc_params['chargram_range']):\n for r in preproc_params['rules']:\n for df_min, df_max in product(preproc_params['df_min'], preproc_params['df_max']):\n for fs in preproc_params['feature_select']:\n\n if wordgram is None and chargram is None:\n break\n\n print(\"\\n----------------------------------------------------------------\",\n \"\\nwords:\", wordgram, \"chars:\", chargram, \"feature selection:\", fs,\n \"df_min, df_max:\", df_min, df_max, \"rules:\", r,\n \"\\n----------------------------------------------------------------\\n\")\n\n start_time = time.time()\n\n X_train_, X_test_, vectorizer, selector = prepare_train_test(trainData=X_train, testData=X_test,\n trainLabels=y_train, rules=r,\n wordgram_range=wordgram,\n feature_select=fs,\n chargram_range=chargram,\n min_df_char=df_min, min_df_word=df_min,\n max_df=df_max)\n\n # fit models\n with multi.Pool(multi.cpu_count()) as p:\n iter_stats = list(p.map(partial(model_eval_record, X_train_, y_train, X_test_, y_test),\n estimators))\n\n # finalize records: remove model, add n-gram stats, update best\n for m in iter_stats:\n m['n-grams'] = {'word': wordgram, 'char': chargram},\n m['rules'] = r,\n m['df_min, df_max'] = (df_min, df_max)\n m['fs'] = fs()\n if m['acc'] > results['best']['acc']:\n results['best'] = deepcopy(m)\n results['best']['vectorizer'] = vectorizer\n results['best']['selector'] = selector\n m.pop('model', None)\n\n results['all'].append(iter_stats)\n\n print(\"Evaluated words:\", wordgram, \"chars:\", chargram,\n \"rules:\", r,\n \"feature selection:\", fs, \"min_df:\", df_min,\n \"in %s seconds\\n\" % (time.time() - start_time))\n\n # print_results(results)\n\n return Pipeline([('vectorizer', results['best']['vectorizer']),\n ('selector', results['best']['selector']),\n ('clf', results['best']['model'])])\n\n\ndef model_eval_record(X_train, y_train, X_test, y_test, model_params, cv=10):\n \"\"\"helper function for finding best model in parallel: evaluate model and return stat object. \"\"\"\n\n random_search = RandomizedSearchCV(model_params['model'],\n param_distributions=model_params['params'],\n n_iter=20,\n n_jobs=-1,\n pre_dispatch='n_jobs',\n cv=cv,\n scoring='f1',\n verbose=0)\n\n random_search.fit(X_train, y_train)\n model = random_search.best_estimator_\n params = random_search.best_params_\n y_pred = model.predict(X_test)\n\n name = model_params['name']\n p, r, f1, _ = precision_recall_fscore_support(y_test, y_pred, average='macro')\n acc = accuracy_score(y_test, y_pred)\n clsr = classification_report(y_test, y_pred)\n\n print(\"\\n{} with params{}:\\nacc: {}, classification report:\\n{}\".format(name, params, acc, clsr))\n return {'name' : name, 'p': p, 'r': r, 'f1': f1, 'acc': acc, 'clsr': clsr,\n 'model': model, 'params': params}\n\n\ndef test_best(results, X_eval, y_eval):\n \"\"\"helper function to evaluate best model on held-out set. returns pipeline with best parameters\"\"\"\n\n best_model = results['best']['model']\n name = results['best']['name']\n\n selector = results['best']['selector']\n vectorizer = results['best']['vectorizer']\n\n if selector:\n transformedTestData = selector.transform(vectorizer.transform(X_eval))\n else:\n transformedTestData = vectorizer.transform(X_eval)\n\n y_pred = best_model.predict(transformedTestData)\n\n p, r, f1, _ = precision_recall_fscore_support(y_eval, y_pred, average='macro')\n acc = accuracy_score(y_eval, y_pred)\n clsr = classification_report(y_eval, y_pred)\n\n print(\"Testing best model on held-out test set:\\n\", name,\n results['best']['n-grams'], results['best']['fs'], \"\\n\",\n 'p={}\\tr={}\\tf1={}\\tacc={}'.format(p, r, f1, acc))\n\n print(\"Classification report:\\n{}\".format(clsr))\n\n return Pipeline([('vectorizer', vectorizer), ('selector', selector), ('clf', best_model)])\n\n\ndef prepare_train_test(trainData, testData, trainLabels, rules=True, wordgram_range=None, feature_select=None,\n chargram_range=None, min_df_char=0.001, min_df_word=0.001, max_df=1.0):\n \"\"\"prepare training and test vectors, vectorizer and selector for validating classifiers\"\"\"\n\n print(\"Fitting vectorizer, preparing training and test data\")\n\n vectorizer = transformers.vectorizer_dx(chargrams=chargram_range, min_df_char=min_df_char, wordgrams=wordgram_range,\n min_df_word=min_df_word, rules=rules, max_df=max_df)\n\n transformedTrainData = vectorizer.fit_transform(trainData)\n transformedTestData = vectorizer.transform(testData)\n\n print(\"No. of features:\", transformedTrainData.shape[1])\n\n selector = None\n if feature_select is not None:\n selector = feature_select()\n selector.fit(transformedTrainData, trainLabels)\n transformedTrainData = selector.transform(transformedTrainData)\n transformedTestData = selector.transform(transformedTestData)\n\n print(\"No. of features after reduction:\", transformedTrainData.shape[1], \"\\n\")\n print()\n return transformedTrainData, transformedTestData, vectorizer, selector\n","repo_name":"nachne/semisuper","sub_path":"semisuper/super_model_selection.py","file_name":"super_model_selection.py","file_ext":"py","file_size_in_byte":15714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38466097322","text":"from bs4 import BeautifulSoup\nimport urllib.request\n# -*- coding: utf-8 -*-\nimport xlwt\n\n# kodowanie arkusza\nbook = xlwt.Workbook(encoding=\"utf-8\")\n\n\nowoce = book.add_sheet(\"sól i cukier\")\nwarzywa = book.add_sheet(\"mąka\")\nsalatki = book.add_sheet(\"makaron, ryż i kasza\")\nziola = book.add_sheet(\"płatki sniadaniowe i musli\")\ngrzyby = book.add_sheet(\"olej, oliwa i ocet\")\nprzyprawy = book.add_sheet(\"przyprawy\")\nsosy = book.add_sheet(\"sosy\")\nkonserwy = book.add_sheet(\"konserwy\")\ndania_gotowe_i_zupy = book.add_sheet(\"dania_gotowe_i_zupy\")\nprzetwory_owocowe_i_miod = book.add_sheet(\"przetwory_owocowe_i_miod\")\nslodycze = book.add_sheet(\"slodycze\")\nsłone_przekąski = book.add_sheet(\"słone_przekąski\")\nkuchnie_swiata = book.add_sheet(\"kuchnie_swiata\")\nzdrowa_zywnosc = book.add_sheet(\"zdrowa_zywnosc\")\n\n\n\narkusz = [owoce,warzywa,salatki,ziola, grzyby,przyprawy, sosy, konserwy,dania_gotowe_i_zupy,\n przetwory_owocowe_i_miod,slodycze,słone_przekąski,kuchnie_swiata,zdrowa_zywnosc]\nlista = []\ncena_laczna = []\nilosc =[]\nwaga = []\nlista_zmiennych =[]\nzakres = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23]\n\nadres = ['https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/sol-i-cukier/all?page=',\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/maka/all?page=',\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/makaron-ryz-i-kasza/all?page=',\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/platki-sniadaniowe-i-musli/all?page=',\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/olej-oliwa-i-ocet/all?page=',\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/przyprawy/all?page=',\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/sosy/all?page=',\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/konserwy/all?page=',\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/dania-gotowe-i-zupy/all?page=',\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/przetwory-owocowe-miod/all?page=',\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/produkty-do-pieczenia/all?page=',\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/slodycze/all?page=',\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/slone-przekaski/all?page=',\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/kuchnie-swiata/all?page='\n 'https://ezakupy.tesco.pl/groceries/pl-PL/shop/art.-spozywcze/zdrowa-zywnosc/all?page=']\n\nfor c in range(len(adres)):\n try:\n for a in range(len(zakres)):\n url = str(adres[c])+str(zakres[a])\n page = urllib.request.urlopen(url)\n soup = BeautifulSoup(page, 'html.parser')\n table = soup.find_all('a',{'class': 'product-tile--title product-tile--browsable'})\n table2 = soup.find_all('span',{'class':'value'})\n table3 = soup.find_all('span', {'class': 'weight'})\n\n for i in range(len(table)):\n\n k = str(table[i])\n\n if 'a class=\"product-tile--title product-tile--browsable\"' in k:\n produkt = k.split()\n\n k1 = k[102:]\n k2 = k1.replace(\"\",'')\n\n lista.append(k2)\n\n for e in range(len(table2)):\n f = str(table2[e])\n\n if 'span class=\"value\" data-auto=\"price-value\"' in f:\n cena = f.split()\n\n f1 = f[44:]\n f1 = f1.replace('','')\n cena_laczna.append(f1)\n\n for gh in range(len(table3)):\n gj = str(table3[gh])\n if '' in gj:\n zmienna = gj.split()\n zmienna = zmienna[1][16:18]\n lista_zmiennych.append(zmienna)\n\n\n except (urllib.error.HTTPError):\n pass\n\n waga = cena_laczna[1::2]\n ilosc = cena_laczna[::2]\n k = arkusz[c]\n pozycja =len(lista)\n for j in range(len(lista)):\n x = j + 1\n k.write(j + 1, 0, lista[j])\n k.write(j+1,1,waga[j])\n if (\"sz\" in lista_zmiennych[j]):\n k.write(j + 1, 2, 'szt')\n elif (\"kg\" in lista_zmiennych[j]):\n k.write(j + 1, 2, 'kg')\n elif (\"l\" in lista_zmiennych[j]):\n k.write(j + 1, 2, 'l')\n elif (\"m\" in lista_zmiennych[j]):\n k.write(j + 1, 2, 'm')\n\n for d in range(len(lista)):\n lista.pop()\n ilosc.pop()\n waga.pop()\n print(len(lista_zmiennych))\n print(len(cena_laczna))\n for g in range(len(cena_laczna)):\n cena_laczna.pop()\n for t in range(len(lista_zmiennych)):\n lista_zmiennych.pop()\n print(\"usuniete \"+str(c))\n k.write(0, 0, \"nazwa\")\n k.write(0, 1, \"cena\")\n k.write(0, 2, \"jednostka\")\n\nbook.save(\"Artykuly_Spozywcze.xls\")\n\n","repo_name":"Jaba033/klasyfikacja_new","sub_path":"Art_spożywcze.py","file_name":"Art_spożywcze.py","file_ext":"py","file_size_in_byte":4970,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39656906456","text":"\r\n\r\ndef isprime(x):\r\n '''Returns True if x is prime and false if x is composite'''\r\n \r\n if x == 1:\r\n return False\r\n \r\n elif x == 2:\r\n return True\r\n \r\n elif x % 2 == 0:\r\n return False\r\n \r\n else:\r\n for y in range(3,int(x**(1/2))+1,2): \r\n if x % y == 0: \r\n return False\r\n return True\r\n\r\n\r\n\r\ndef order(x):\r\n '''Fermats little theorem'''\r\n k = 2\r\n while pow(10,k,x) != 1:\r\n k += 1\r\n if k == x-1:\r\n return True\r\n else:\r\n return False\r\n\r\ndef cycle_finder(x):\r\n \r\n prime_list = []\r\n \r\n for num in range(1,x):\r\n if isprime(num):\r\n prime_list.append(num)\r\n \r\n for prime in prime_list[::-1]:\r\n if order(prime):\r\n return(prime)\r\n\r\n\r\ncycle_finder(1000)","repo_name":"justinmyersdata/ProjectEuler","sub_path":"26_Project_Euler.py","file_name":"26_Project_Euler.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2743550998","text":"def solve(A, B, C, D, E, F):\n n = len(E)\n visited = [[False]*(B + 1) for i in range(A + 1)]\n print(visited)\n queue = []\n for i in range(n):\n queue.append(((E[i], F[i]), (E[i], F[i])))\n dx = [-1, -1, -1, 0, 0, 1, 1, 1]\n dy = [-1, 0, 1, -1, 1, -1, 0, 1]\n\n def isSafe(x, y):\n if x >= 0 and x < A and y >= 0 and y < B:\n return True\n return False\n\n def dist(x, y, a, b):\n return (a - x) ** 2 + (b - y) ** 2\n\n while queue:\n temp, node = queue.pop(0)\n print(temp)\n if visited[temp[0]][temp[1]]:\n continue\n visited[temp[0]][temp[1]] = True\n for i in range(8):\n nx = temp[0] + dx[i]\n ny = temp[1] + dy[i]\n if isSafe(nx, ny) and visited[nx][ny] and dist(nx, ny, node[0], node[1]) <= D * D:\n queue.append(((nx, ny), node))\n if visited[A][B] == True:\n return \"NO\"\n q = [(0, 0)]\n while q:\n temp = q.pop(0)\n if visited[temp[0]][temp[1]]:\n continue\n x = temp[0]\n y = temp[1]\n if x == A and y == B:\n return \"YES\"\n visited[x][y] = True\n for i in range(8):\n nx = x + dx[i]\n ny = y + dy[i]\n if isSafe(nx, ny) and not visited[nx][ny]:\n q.append((nx, ny))\n if visited[A][B]:\n return \"YES\"\n else:\n return \"NO\"\n\nA = 2\nB =3\nC = 1\nD = 1\nE = [ 2 ]\nF = [ 3 ]\nprint(solve(A,B,C,D,E,F))\n\n\n\n\n\n\n\n","repo_name":"nikhil3991/Problem_Solving","sub_path":"Graphs/Valid Path.py","file_name":"Valid Path.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70364917265","text":"#!/usr/bin/python\n# jk 2020\n\nfrom __future__ import print_function\n\nimport ROOT\n\nfrom Tools import *\n\nfrom math import sqrt, pow, log\n\nk2B0S = '2B0S'\nk1B1S = '1B1S'\nk0B2S = '0B2S'\nkAnySel = 'AnySel'\nkNoCuts = 'NoCuts'\n\n# power of 2 to scale the signal in addition:\naddSignalSFsPower = {#'2B0S' : 0.0625, '1B1S' : 0.125, '0B2S' : 1.,\n # to be read from list config files!\n k2B0S : 0., k1B1S : 0., k0B2S : 0,\n # zero means no scaling, not read from file:\n kAnySel : 0., kNoCuts : 0.}\n \n##########################################\ndef ReplaceInStringToEmpty(ss, tags):\n rs = ss + ''\n for tag in tags:\n rs = rs.replace(tag, '')\n return rs\n\n##########################################\ndef ComputeChi2AndKS(hdata, htot, x=0.13, y=0.73):\n # compute chi2 between data and MC:\n chi2 = ROOT.Double(0.)\n # ndf = hdata.GetNbinsX()\n # chi2 = hdata.Chi2Test(htot, \"UU\")\n ndf,chi2 = GetChi2(hdata, htot) # Tools\n ks = hdata.KolmogorovTest(htot)\n #if normalize == 'normalize':\n # ndf = ndf - 1\n ctex = ROOT.TLatex(x, y, '')\n if ndf > 0:\n chi2ndf = chi2 / ndf\n ctex.SetText(x, y, '#chi^{2}/ndf=' + '{:2.2f}'.format(chi2ndf) + ' KS={:1.2f}'.format(ks))\n ctex.SetTextSize(0.05) # 0.055\n ctex.SetNDC()\n return ndf,chi2,ctex\n\n##########################################\ndef DrawNice2DRatio(htot, hdata, gzratioMin, gzratioMax, stuff, iplot = 0, opt = 'box', ratioTag = 'Pseudo-data / Prediction'):\n \n ratio = hdata.Clone(hdata.GetName() + '_ratio2d{}'.format(iplot))\n ratio.Divide(htot)\n # Print2DHisto(ratio)\n\n ratio.GetZaxis().SetRangeUser(gzratioMin, gzratioMax)\n ratio.Draw(opt)\n return ratio\n\n#########################################################\ndef CopyStyle(h1, h2, copyFillAtts = True):\n h2.SetLineColor(h1.GetLineColor())\n h2.SetLineStyle(h1.GetLineStyle())\n h2.SetMarkerColor(h1.GetMarkerColor())\n h2.SetMarkerSize(h1.GetMarkerSize())\n h2.SetMarkerStyle(h1.GetMarkerStyle())\n if copyFillAtts:\n h2.SetFillStyle(h1.GetMarkerStyle())\n h2.SetFillColor(h1.GetMarkerStyle())\n\n\n\n#########################################################\ndef MakeRatioHisto( data, prediction, newname = 'clone' ):\n ratio = data.Clone(newname)\n ratio.Reset()\n CopyStyle(data, ratio, False)\n nbins = data.GetNbinsX()\n #print 'nbins: %i' % (nbins,)\n i = 0\n for n in range( nbins ):\n #x_mc = ROOT.Double()\n y_mc = ROOT.Double()\n #x_data = ROOT.Double()\n y_data = ROOT.Double()\n #x_mc = prediction.GetBinCenter( n+1 )\n y_mc = prediction.GetBinContent( n+1 ) \n #print x_mc, y_mc\n if y_mc == 0.:\n continue\n x_data = data.GetBinCenter( n+1 )\n y_data = data.GetBinContent( n+1 )\n y_err = data.GetBinError( n+1 )\n ratio.SetBinContent( n+1, y_data/y_mc )\n ratio.SetBinError( n+1, y_err / y_mc)\n \n i += 1\n ratio.Scale(1.)\n return ratio\n\n\n \n##########################################\n\ndef DrawNiceRatioWithBand(htot, hdata, hxmin, hxmax, gyratioMin, gyratioMax, stuff, iplot = 0, ratioTag = 'Pseudo-data / Prediction'):\n \n band = MakeOneWithErrors(htot)\n band.SetFillColor(ROOT.kYellow)\n #print('band:')\n #PrintBinContent(band)\n\n # double counting of visual errors on data and perdiction band!!!\n #ratio = hdata.Clone(hdata.GetName() + '_ratio{}'.format(iplot))\n #ratio.Divide(htot)\n ratio = MakeRatioHisto(hdata, htot, hdata.GetName() + '_ratio{}'.format(iplot))\n \n #print('ratio:')\n #PrintBinContent(ratio)\n ratioScaleHisto = ROOT.TH2D(ratio.GetName() + '_tmp', ratio.GetName() + '_tmp' + ';;' + ratioTag,\n ratio.GetNbinsX(), ratio.GetXaxis().GetXmin(), ratio.GetXaxis().GetXmax(),\n 100, gyratioMin, gyratioMax)\n ratioScaleHisto.SetStats(0)\n ratioScaleHisto.GetYaxis().SetTitleOffset(0.45)\n ratioScaleHisto.GetXaxis().SetLabelSize(0.085)\n ratioScaleHisto.GetYaxis().SetLabelSize(0.085)\n ratioScaleHisto.GetXaxis().SetTitle(MakePrettyTitle(ratio.GetXaxis().GetTitle()))\n ratioScaleHisto.GetXaxis().SetTitleSize(0.095)\n ratioScaleHisto.GetYaxis().SetTitleSize(0.095)\n ratioScaleHisto.Draw()\n ratioScaleHisto.GetXaxis().SetRangeUser(hxmin, hxmax)\n\n band.Draw('same e2')\n line = ROOT.TLine(hxmin, 1., hxmax, 1.)\n line.SetLineColor(ROOT.kRed)\n line.Draw()\n ratio.Draw('e1 X0 same')\n arrows = DrawArrowForPointsOutsideYAxisRange(ratio, ratioScaleHisto, hxmin, hxmax)\n stuff.append(arrows)\n\n stuff.append(line)\n stuff.append(ratioScaleHisto)\n \n \n return ratio, band, ratioScaleHisto\n\n\n##########################################\n\ndef DivideByErrorBars(h, diviser):\n for i in xrange(1,h.GetXaxis().GetNbins()+1):\n hval = h.GetBinContent(i)\n herr = h.GetBinError(i)\n bgerr = diviser.GetBinError(i)\n if bgerr > 0:\n err = sqrt( pow(herr,2) + pow(bgerr,2) )\n h.SetBinContent(i, hval/err)\n h.SetBinError(i, 0.)\n else:\n h.SetBinContent(i, 0.)\n h.SetBinError(i, 0.) \n print('ERROR: in DivideByErrorBars of histo {} hbg error is {}!'.format(h.GetName(), bgerr))\n h.Scale(1.)\n\n########################################## \n\ndef DrawSignificance(hbg, hdata, hxmin, hxmax, yMin, yMax, stuff, iplot = 0, ratioTag = 'Signal signif.'):\n \n signifh = hdata.Clone(hdata.GetName() + '_signif{}'.format(iplot))\n signifh.Add(hbg, -1.)\n DivideByErrorBars(signifh, hbg)\n #print('signifh:')\n #PrintBinContent(signifh)\n ratioScaleHisto = ROOT.TH2D(signifh.GetName() + '_tmp', signifh.GetName() + '_tmp' + ';;' + ratioTag,\n signifh.GetNbinsX(), signifh.GetXaxis().GetXmin(), signifh.GetXaxis().GetXmax(),\n 100, yMin, yMax)\n ratioScaleHisto.SetStats(0)\n ratioScaleHisto.GetYaxis().SetTitleOffset(0.45)\n ratioScaleHisto.GetXaxis().SetLabelSize(0.085)\n ratioScaleHisto.GetYaxis().SetLabelSize(0.085)\n ratioScaleHisto.GetXaxis().SetTitle(MakePrettyTitle(signifh.GetXaxis().GetTitle()))\n ratioScaleHisto.GetXaxis().SetTitleSize(0.15)\n ratioScaleHisto.GetYaxis().SetTitleSize(0.095)\n ratioScaleHisto.Draw()\n ratioScaleHisto.GetXaxis().SetRangeUser(hxmin, hxmax)\n\n line0 = ROOT.TLine(hxmin, 0., hxmax, 0.)\n line0.SetLineColor(ROOT.kRed)\n line0.Draw()\n line1p = ROOT.TLine(hxmin, 1., hxmax, 1.)\n line1p.SetLineColor(ROOT.kRed)\n line1p.SetLineStyle(2)\n line1p.Draw()\n line1n = ROOT.TLine(hxmin, -1., hxmax, -1.)\n line1n.SetLineColor(ROOT.kRed)\n line1n.SetLineStyle(2)\n line1n.Draw()\n\n signifh.SetFillColor(ROOT.kRed)\n signifh.SetFillStyle(1111)\n signifh.Draw('hist same X0')\n \n stuff.append([line0, line1p, line1n])\n stuff.append(ratioScaleHisto)\n \n return signifh, ratioScaleHisto\n\n \n##########################################\ndef ScaleHistAndRebin(hist, hname, w, rebin = True):\n if rebin and IsUniformlyBinned(hist):\n if not 'N' in hname:\n if 'DiTopM' in hname or 'Tau' in hname:\n hist.Rebin(5)\n else:\n if not ('Delta' in hname or 'CosTheta' in hname or 'Yboost' in hname or 'Chittbar' in hname):\n #hist.Rebin(4) ## default was 2!\n #else:\n if 'Pt' in hname:\n #if 'LJet' in hname and 'Mass' in hname:\n hist.Rebin(10)\n elif 'Mass' in hname or 'Pout' in hname:\n #if 'LJet' in hname and 'Mass' in hname:\n hist.Rebin(5)\n else:\n hist.Rebin(20)\n \n #norm = ROOT.Double(hist.Integral())\n #if norm > 0.:\n #hist.Scale(w / norm)\n hist.SetStats(0)\n hist.Scale(w)\n","repo_name":"jirikvita/Semiboosted_ttbar","sub_path":"python/xSectTools.py","file_name":"xSectTools.py","file_ext":"py","file_size_in_byte":8301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36722245641","text":"import numpy as np\nimport matplotlib.image\nimport time\n\n\nIMAGESIZE = 512\n\n\ndef interpolate_color(x1, x2, x, C1: list, C2: list):\n \"\"\"\n :param x1: horizontal coordinate of an intersection point between one side of the triangle and the line of the gouraud\n :param x2: the second horizontal coordinate of an intersection point\n :param x: the horizontal coordinate of the point between x1 and x2, which interpolate-color is required\n :param C1: a list that contains the R,G,B colors of the first given point\n :param C2: a list that contains the R,G,B colors of the second given point\n :return: the value is a list that contains interpolate-colors\n\n The algorithm of Gouraud shading is being used in order to fill the triangle.\n According to Gouraud shading algorithm each point's color is calculated through the following type:\n C = m * C1 + (1 - m) * C2, m = (x2 - x)/(x2 - x1)\n \"\"\"\n m = (x2 - x) / (x2 - x1)\n value = m * np.array(C1) + (1 - m) * np.array(C2)\n # print(value)\n\n return value\n\n\ndef bresenham(vertex1: list, vertex2: list):\n x0 = vertex1[0]\n y0 = vertex1[1]\n x1 = vertex2[0]\n y1 = vertex2[1]\n\n inverted = False\n x, y = x0, y0\n dx = abs(x1 - x0)\n dy = abs(y1 - y0)\n if dy > dx:\n dx, dy = dy, dx\n x, y = y, x\n x0, y0 = y0, x0\n x1, y1 = y1, x1\n inverted = True\n\n f = 2 * dy - dx\n\n coordinates = []\n\n if inverted:\n coordinates.append([y, x])\n else:\n coordinates.append([x, y])\n\n for k in range(dx):\n if f > 0:\n if y < y1:\n y = y + 1\n else:\n y = y - 1\n f = f + 2 * (dy - dx)\n else:\n f = f + 2 * dy\n\n if x < x1:\n x = x + 1\n else:\n x = x - 1\n\n if inverted:\n coordinates.append([y, x])\n else:\n coordinates.append([x, y])\n\n return coordinates\n\n\ndef shade_triangle(img: list, verts2d: list, vcolors: list, shade_t: str):\n \"\"\"\n :param img: the image that contains M x N pixels each one of it has 3 colors. It probably contains\n pre-existing triangles.\n :param verts2d: a 3 x 2 list.Each row contains the 2-d coordinates of the triangle's vertices\n :param vcolors: a 3 x 3 list.Each row contains the colors RGB for one of the vertices of the triangle.\n :param shade_t: a string that defines which filling algorithm will be implemented\n :return: Y, a list M x N x 3 that contains for all the triangle's points, their calculated colors(Ri, Gi, Bi)\n \"\"\"\n\n # Call 3 times the bresenham algorithm to shape the triangle\n side1 = bresenham(verts2d[0], verts2d[1])\n side2 = bresenham(verts2d[1], verts2d[2])\n side3 = bresenham(verts2d[2], verts2d[0])\n\n # Bresenham returns a list of points [row, col] for all the points that shape one side\n # Sort each side according to row, from the lower value to the biggest\n # The sort is done in order to know the start and the end of the scanline\n side1 = sorted(side1, key=lambda row: row[0])\n side2 = sorted(side2, key=lambda row: row[0])\n side3 = sorted(side3, key=lambda row: row[0])\n\n # The average color in case flat shading needs to be done\n average_color = [0, 0, 0]\n # If the given shade_t argument is \"flat\" each pixel in the triangle will obtain a unique color,\n # which will be the average color of the triangle's vertices.\n if shade_t == \"flat\":\n # Find average R of the vertices\n average_color[0] = (vcolors[0][0] + vcolors[1][0] + vcolors[2][0]) / 3\n # Find average G of the vertices\n average_color[1] = (vcolors[0][1] + vcolors[1][1] + vcolors[2][1]) / 3\n # Find average B of the vertices\n average_color[2] = (vcolors[0][2] + vcolors[1][2] + vcolors[2][2]) / 3\n\n # Color each point on the triangle's sides\n for pixel in side1:\n img[pixel[1]][pixel[0]] = list(average_color)\n for pixel in side2:\n img[pixel[1]][pixel[0]] = list(average_color)\n for pixel in side3:\n img[pixel[1]][pixel[0]] = list(average_color)\n\n elif shade_t == \"gouraud\":\n # for each point in side 1, calculate its color\n for pixel in side1:\n # If the point is not a vertex, calculate its color by interpolate function\n if pixel not in verts2d:\n # If gradient > 1 reverse rows with cols\n if abs(verts2d[0][0] - verts2d[1][0]) > abs(verts2d[0][1] - verts2d[1][1]):\n img[pixel[1]][pixel[0]] = interpolate_color(verts2d[0][0], verts2d[1][0], pixel[0], vcolors[0],\n vcolors[1])\n # Else don't change them\n else:\n img[pixel[1]][pixel[0]] = interpolate_color(verts2d[0][1], verts2d[1][1], pixel[1], vcolors[0],\n vcolors[1])\n # Else if the point is a vertex its color is known from vcolors list\n else:\n # Find in verts2d array the index that contains the same coordinates as pixel\n index = verts2d.index(pixel)\n # The vcolors[index] contains the color of the vertex\n img[pixel[1]][pixel[0]] = vcolors[index]\n\n # Repeat the same work for the side2 as well\n for pixel in side2:\n if pixel not in verts2d:\n if abs(verts2d[1][0] - verts2d[2][0]) > abs(verts2d[1][1] - verts2d[2][1]):\n # If gradient > 1\n img[pixel[1]][pixel[0]] = interpolate_color(verts2d[1][0], verts2d[2][0], pixel[0], vcolors[1],\n vcolors[2])\n else:\n img[pixel[1]][pixel[0]] = interpolate_color(verts2d[1][1], verts2d[2][1], pixel[1], vcolors[1],\n vcolors[2])\n else:\n index = verts2d.index(pixel)\n img[pixel[1]][pixel[0]] = vcolors[index]\n\n # Repeat the same work for side3 as well\n for pixel in side3:\n if pixel not in verts2d:\n if abs(verts2d[2][0] - verts2d[0][0]) > abs(verts2d[2][1] - verts2d[0][1]):\n # If gradient > 1\n img[pixel[1]][pixel[0]] = interpolate_color(verts2d[2][0], verts2d[0][0], pixel[0], vcolors[2],\n vcolors[0])\n else:\n img[pixel[1]][pixel[0]] = interpolate_color(verts2d[2][1], verts2d[0][1], pixel[1], vcolors[2],\n vcolors[0])\n else:\n index = verts2d.index(pixel)\n img[pixel[1]][pixel[0]] = vcolors[index]\n else:\n print(\"I don't know this shading algorithm\\n\")\n\n # The smallest value of sides' cols is the start of scanline\n scan_start = min(side1[0][0], side2[0][0], side3[0][0])\n # The last elements(index=-1) have the biggest row values and the maximum of them is needed\n # to find where the scanline ends\n scan_end = max(side1[-1][0], side2[-1][0], side3[-1][0])\n\n # Find the active points(the colored)\n for scan in range(scan_start, scan_end + 1):\n # In each iteration this list will be initialized again and hold the active points per scanline\n active_points = []\n for pixel in side1:\n if pixel[0] == scan:\n active_points.append(pixel)\n for pixel in side2:\n if pixel[0] == scan:\n active_points.append(pixel)\n for pixel in side3:\n if pixel[0] == scan:\n active_points.append(pixel)\n\n # If a vertex is found continue to the next loop\n if len(active_points) == 1:\n continue\n else:\n # Find the minimum value of column in active points list\n min_col_active = np.array(active_points).min(axis=0)[1]\n\n # Find the maximum value of column in active points list\n max_col_active = np.array(active_points).max(axis=0)[1]\n\n # For the points between minimum and the maximum column, find the colored one and continue else shade them\n for i in range(min_col_active + 1, max_col_active):\n if [i, scan] in active_points:\n continue\n else:\n if shade_t == \"flat\":\n img[i][scan] = list(average_color)\n elif shade_t == \"gouraud\":\n img[i][scan] = interpolate_color(min_col_active, max_col_active, i,\n img[min_col_active][scan], img[max_col_active][scan])\n\n\ndef render(verts2d: list, faces: list, vcolors: list, depth: list, shade_t: str):\n \"\"\"\n :param verts2d: The list with the triangles' vertices\n :param faces: The list with the K-colored triangles' vertices\n :param vcolors: The L x 3 list with the vertices' colors\n :param depth: The L x 1 list with each vertex's depth\n :param shade_t: string that defines the shading method\n :return:\n \"\"\"\n\n # Initialize the image\n image = [[[1.0 for i in range(3)] for j in range(512)] for k in range(512)]\n\n triangle_colors = []\n depths_of_triangles = []\n for triangle in faces:\n # Search in faces which index has the triangle\n index = faces.index(triangle)\n # Store in faces the coordinates of each triangle's vertices\n faces[index] = [verts2d[triangle[0]], verts2d[triangle[1]], verts2d[triangle[2]]]\n\n # Calculate the depth of the triangle that is shaped by the current vertices\n new_depth = (depth[triangle[0]] + depth[triangle[1]] + depth[triangle[2]]) / 3\n triangle_colors.append([vcolors[triangle[0]], vcolors[triangle[1]], vcolors[triangle[2]]])\n depths_of_triangles.append(new_depth)\n\n # Sort the depths from smaller to bigger to smaller\n depths_of_triangles, faces, triangle_colors = zip(\n *sorted(zip(depths_of_triangles, faces, triangle_colors), key=lambda x: -x[0]))\n\n for triangle in faces:\n shade_triangle(image, triangle, triangle_colors[faces.index(triangle)], shade_t)\n\n return image\n","repo_name":"ErikaKoro/Graphics-1st-assignment","sub_path":"pyCharm/image_manipulation.py","file_name":"image_manipulation.py","file_ext":"py","file_size_in_byte":10366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24864889667","text":"from tkinter import *\r\nfrom tkinter import font\r\n\r\nwindow = Tk()\r\nwindow.geometry(\"1000x600\")\r\nwindow.title(\"Bike-sharing system\")\r\n\r\n# Define fonts\r\nl1_font = font.Font(family=\"Helvetica\", size=32, weight='bold')\r\nl2_font = font.Font(family=\"Helvetica\", size=16)\r\nb_font = font.Font(family=\"Helvetica\", size=16)\r\n\r\n# Label 1 - Welcome Customer\r\nlabel1 = Label(text=\"Charge Vehicle\", font=l1_font, fg=\"black\")\r\nlabel1.place(x=300, y=50)\r\n\r\nlabel2 = Label(text=\"Threshold(0-1)\", font=l2_font)\r\nlabel2.place(x=280, y=185)\r\n\r\ntextbox1 = Entry(font=l2_font)\r\ntextbox1.place(x=450, y=185, width=220, height=30)\r\n\r\nlabel3 = Label(text=\"Note: battery less than or equal to threshold value will be charged\", font = l2_font)\r\nlabel3.place(x = 150, y = 300)\r\n\r\n# Sign Out Button\r\nbutton1 = Button(text=\"Charge\", font=b_font, bg='red', fg='white')\r\nbutton1.place(x=220, y = 440, width=120, height=40)\r\n\r\n# Rent Vehicle Button\r\nbutton2 = Button(text=\"Back\", font=b_font, bg='green', fg='white')\r\nbutton2.place(x=550, y = 440, width=120, height=40)\r\n\r\n\r\nwindow.mainloop()\r\n","repo_name":"20191844323/ProgSD_TeamProject","sub_path":"GUI/Charge Vehicle.py","file_name":"Charge Vehicle.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"4549759498","text":"import unittest\nimport os\nimport shutil\nimport drvschema\nimport subprocess\n\n\nTEST_PROJECT_PATH = os.path.join(os.path.dirname(__file__), 'drv')\nTEST_PROJECT_MODEL_PATH = os.path.join(TEST_PROJECT_PATH, 'drv', 'models.py')\n\n\ndef cleanTestProject():\n \"\"\"\n Removes migrations and sqlite db for the drv test Django project\n \"\"\"\n if not os.path.exists(TEST_PROJECT_PATH):\n raise Exception('Test project %s does not exist!' % TEST_PROJECT_PATH)\n\n try:\n os.unlink(os.path.join(TEST_PROJECT_PATH, 'drv', 'migrations', '0001_initial.py'))\n os.unlink(os.path.join(TEST_PROJECT_PATH, 'db.sqlite3'))\n os.unlink(TEST_PROJECT_MODEL_PATH)\n except Exception as e:\n # sys.stderr.print(\"Failure to remove migrations and sqlitedb: %s\" % str(e))\n pass\n\n\nclass TestDjangoModels(unittest.TestCase):\n\n def setUp(self):\n cleanTestProject()\n\n def tearDown(self):\n cleanTestProject()\n\n def testCharFieldKwargs(self):\n \"\"\"\n Ensure that Django model CharField kwargs are correctly created\n \"\"\"\n appschema = drvschema.DrvSchema({\n 'User': {\n 'first_name': {\n 'required': True,\n 'maxlength': 200,\n 'help': 'User first name',\n },\n }\n })\n\n kwargs = appschema.to('DjangoModelCharFieldKwargs', 'User.first_name')\n self.assertTrue(kwargs['max_length'] == 200)\n self.assertTrue(kwargs['help_text'] == 'User first name')\n self.assertTrue(kwargs['default'] is None)\n self.assertFalse(kwargs['null'])\n self.assertTrue('required' not in kwargs)\n\n def testDateTimeFieldKwargs(self):\n \"\"\"\n Ensure that Django model DateTimeField kwargs are correctly created\n \"\"\"\n appschema = drvschema.DrvSchema({\n 'User': {\n 'created': {\n 'readonly': True,\n 'maxlength': 200,\n 'help': 'Date created',\n },\n }\n })\n\n kwargs = appschema.to('DjangoModelDateTimeFieldKwargs', 'User.created')\n self.assertTrue('maxlength' not in kwargs)\n self.assertTrue(kwargs['help_text'] == 'Date created')\n self.assertFalse(kwargs['editable'])\n self.assertTrue('readonly' not in kwargs)\n\n def testBooleanFieldKwargs(self):\n \"\"\"\n Ensure that Django model DateTimeField kwargs are correctly created\n \"\"\"\n appschema = drvschema.DrvSchema({\n 'User': {\n 'is_enabled': {\n 'default': False,\n 'help': 'Is the user enabled?',\n },\n }\n })\n\n kwargs = appschema.to('DjangoModelBooleanFieldKwargs', 'User.is_enabled')\n self.assertFalse(kwargs['default'])\n self.assertTrue(kwargs['help_text'] == 'Is the user enabled?')\n\n def testMigration(self):\n \"\"\"\n Do an actual migration with a Django model (drv project)\n \"\"\"\n modelstext = r'''\nfrom django.db import models\nfrom drvschema import DrvSchema\n\nAPPSCHEMA = DrvSchema({\n 'Test': {\n 'name': {\n 'required': True,\n 'maxlength': 100,\n 'empty': False,\n 'default': None,\n },\n 'description': {\n 'required': False,\n 'maxlength': 255,\n 'empty': True\n }\n }\n})\n\nclass Test(models.Model):\n name = models.CharField(**APPSCHEMA.to('DjangoModelCharFieldKwargs', 'Test.name'))\n description = models.CharField(**APPSCHEMA.to('DjangoModelCharFieldKwargs', 'Test.description'))\n '''\n with open(TEST_PROJECT_MODEL_PATH, 'w') as f:\n f.write(modelstext)\n\n cmd = './manage.py makemigrations drv && ./manage.py migrate'\n proc = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True, cwd=TEST_PROJECT_PATH)\n stdoutstr, stderrstr = proc.communicate()\n self.assertTrue(proc.returncode == 0, \"Migration failed: \\n%s\\n%s\" % (stdoutstr, stderrstr))\n","repo_name":"harvardinformatics/drvschema","sub_path":"drvschema/test/testDjangoModels.py","file_name":"testDjangoModels.py","file_ext":"py","file_size_in_byte":4105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38173611012","text":"# coding=utf-8\nfrom tastypie.cache import SimpleCache\n\nfrom main.api import ColibriResource\nfrom member.models import Member\n\n\nclass MemberResource(ColibriResource):\n class Meta:\n queryset = Member.objects.all()\n allowed_methods = ['get']\n resource_name = \"member\"\n filtering = {\n \"name\": ('exact', 'startswith', 'iexact', 'istartswith',),\n \"second_name\": ('exact', 'startswith', 'iexact', 'istartswith',),\n \"id\": ('exact',),\n }\n limit = 0\n cache = SimpleCache(timeout=1440)\n","repo_name":"openkratio/proyecto-colibri","sub_path":"member/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"48"} +{"seq_id":"23797495040","text":"\"\"\"\nThis code is used to evaluate our trained model against a number of datasets.\n\"\"\"\nimport functools\nimport argparse\nimport os\nimport itertools\n \nimport tensorflow as tf\n\nfrom model import input_fn, get_model_fn\nfrom dataset_utils import read_label_file\n\nimport scipy.misc\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\n\ndef evaluate(model_dir, dataset_dir):\n \"\"\"\n Begins evaluating the entire architecture.\n \"\"\"\n # Session configuration.\n sess_config = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False,\n intra_op_parallelism_threads=0, # Autocompute how many threads to run\n gpu_options=tf.GPUOptions(force_gpu_compatible=True))\n\n config = tf.contrib.learn.RunConfig(\n session_config=sess_config, model_dir=model_dir)\n\n eval_input_fn = functools.partial(\n input_fn,\n dataset_dir=dataset_dir,\n split_name='validation',\n is_training=False)\n\n # Get the number of classes from the label file\n labels_to_class_names, num_classes = read_label_file(dataset_dir)\n\n classifier = tf.estimator.Estimator(\n model_fn=get_model_fn(num_classes),\n config=config)\n\n # .predict() returns an iterator of dicts;\n y = classifier.predict(input_fn=eval_input_fn)\n\n num_food_image = {}\n\n for pred in y:\n predicted_class = labels_to_class_names[int(pred['classes'])]\n food_dir = '../Validations/%s/%s' % (os.path.basename(\n model_dir), predicted_class)\n\n if not os.path.exists(food_dir):\n os.makedirs(food_dir)\n\n file_name = os.path.join(food_dir, '%s.png' % num_food_image.get(predicted_class, 1))\n\n num_food_image[predicted_class] = num_food_image.get(predicted_class, 1) + 1\n\n scipy.misc.imsave(file_name, pred['features'])\n\n\nif __name__ == '__main__':\n PARSER = argparse.ArgumentParser(\n description='Evaluate a model against a dataset.')\n\n PARSER.add_argument('--model',\n required=True,\n help='The name of the pre-trained model\\'s folder.')\n\n PARSER.add_argument('--dataset',\n required=True,\n help='The folder corresponding to this model\\'s dataset.')\n\n if not os.path.exists(PARSER.parse_args().model):\n raise Exception(\"Path %s doesn't exist.\" % PARSER.parse_args().model)\n\n if not os.path.exists(PARSER.parse_args().dataset):\n raise Exception(\"Path %s doesn't exist.\" % PARSER.parse_args().dataset)\n\n # A (supposed) 5% percent boost in certain GPUs by using faster convolution operations\n os.environ['TF_SYNC_ON_FINISH'] = '0'\n os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'\n\n evaluate(PARSER.parse_args().model, PARSER.parse_args().dataset)\n","repo_name":"WhiteXiezx/Food-Volume-Estimation","sub_path":"Food Detection/src/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":2773,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"48"} +{"seq_id":"5329515180","text":"from kapteyn import maputils\nfrom matplotlib import pylab as plt\n\nheader = {\n'NAXIS': 2 ,\n'NAXIS1': 100 ,\n'NAXIS2': 100 ,\n'CDELT1': -7.165998823000E-03 ,\n'CRPIX1': 5.100000000000E+01 ,\n'CRVAL1': -5.128208479590E+01 ,\n'CTYPE1': 'RA---NCP ' ,\n'CUNIT1': 'DEGREE ' ,\n'CDELT2': 7.165998823000E-03 ,\n'CRPIX2': 5.100000000000E+01 ,\n'CRVAL2': 6.015388802060E+01 ,\n'CTYPE2': 'DEC--NCP ' ,\n'CUNIT2': 'DEGREE ' ,\n}\n\nfig = plt.figure(figsize=(6,5.2))\nframe = fig.add_axes([0.15,0.15,0.8,0.8])\nf = maputils.FITSimage(externalheader=header)\nannim = f.Annotatedimage(frame)\ngrat = annim.Graticule()\ngrat.setp_axislabel(fontstyle='italic') # Apply to all\ngrat.setp_axislabel(\"top\", visible=True, xpos=0.0, ypos=1.0, rotation=180)\ngrat.setp_axislabel(\"left\", \n backgroundcolor='y', \n color='b', \n style='oblique',\n weight='bold', \n ypos=0.3)\ngrat.setp_axislabel(\"bottom\", # Label in LaTeX\n label=r\"$\\mathrm{Right\\ Ascension\\ (2000)}$\", \n fontsize=14)\nannim.plot()\nplt.show()\n","repo_name":"kapteyn-astro/kapteyn","sub_path":"doc/source/EXAMPLES/mu_labeldemo.py","file_name":"mu_labeldemo.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"24056326776","text":"#comment\r\nimport random\r\n\r\ndef insertion(a):\r\n for i in range(1, len(a)):\r\n if a[i] < a[i-1]:\r\n for j in range(i):\r\n if a[i] < a[j]:\r\n a[i], a[j] = a[j], a[i]\r\n\r\na = [i for i in range(10)]\r\nrandom.shuffle(a)\r\n\r\nprint(a)\r\ninsertion(a)\r\nprint(a)\r\n\r\n\"\"\"\r\nmy comment at bottom\r\ncal line 1\r\ncal line 2\r\n\"\"\"\r\n","repo_name":"ux0/algo","sub_path":"insertion.py","file_name":"insertion.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4836128829","text":"import subprocess\nimport re\nimport shutil\nimport os\nfrom aip import AipOcr\nimport json\nimport time\n\n\nPATH = lambda path: os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n path\n )\n)\n\n\ndef swipe_function():\n res = subprocess.run(\"adb shell dumpsys window displays\", shell=True, check=True, stdout=subprocess.PIPE)\n output = str(res.stdout, encoding=\"utf-8\")\n search_obj = re.search(r'(?<=app=)\\d+x\\d+', output, re.M | re.I)\n\n if search_obj:\n pixel_str = search_obj.group()\n print(\"The phone pixel is: \" + pixel_str)\n else:\n raise Exception(\"Cannot get the phone pixel\")\n\n phone_width = int(pixel_str.split(\"x\")[0])\n print(\"The phone width: \" + str(phone_width))\n\n phone_height = int(pixel_str.split(\"x\")[1])\n print(\"The phone height: \" + str(phone_height))\n\n print(\"Complete\")\n\n start_x = phone_width / 2\n start_y = phone_height * 3 / 4\n\n end_x = phone_width / 2\n end_y = phone_height * 1 / 5\n\n os_command = \"adb shell input swipe {} {} {} {}\".format(start_x, start_y, end_x, end_y)\n print(\"Command is: \" + os_command)\n\n scroll_count = 0\n folder_path = get_screen_folder_path_and_clear()\n content = get_conf()\n\n strat_time = time.time()\n while scroll_count < content[\"Scroll_Count\"]:\n # 向上滑动屏幕\n subprocess.run(os_command, shell=True, check=True, stdout=subprocess.PIPE)\n scroll_count = scroll_count + 1\n if scroll_count >= content[\"Start_Count\"]:\n get_screen_capture(folder_path, scroll_count)\n\n to_find_target_screenshot(strat_time)\n\n\ndef parse_xml_contains_str(target_str):\n\n with open(PATH(\"window_dump.xml\"), encoding=\"utf-8\") as f:\n content = f.read()\n\n if content.find(target_str) > -1:\n return True\n else:\n return False\n\n\ndef pull_file_to_current_folder():\n subprocess.run(\"adb shell uiautomator dump --compressed /sdcard/window_dump.xml\",\n shell=True, check=True, stdout=subprocess.PIPE)\n current_file = PATH(\"window_dump.xml\")\n subprocess.run(\"adb pull /sdcard/window_dump.xml \" + current_file, shell=True, check=True, stdout=subprocess.PIPE)\n\n\ndef get_screen_folder_path_and_clear():\n floder_name = \"screenshot\"\n\n folder_path = PATH(floder_name)\n\n if os.path.exists(folder_path):\n shutil.rmtree(folder_path)\n\n os.makedirs(folder_path)\n\n return folder_path\n\n\ndef get_screen_folder_path():\n folder_name = \"screenshot\"\n\n folder_path = PATH(folder_name)\n\n return folder_path\n\n\ndef get_screen_capture(folder_path, count):\n\n current_file = os.path.join(folder_path, \"screenshot_{}.png\".format(str(count)))\n\n os_command = \"adb shell /system/bin/screencap -p /sdcard/screenshot.png&adb pull /sdcard/screenshot.png \" + current_file\n subprocess.run(os_command, shell=True, check=True, stdout=subprocess.PIPE)\n\n\ndef to_find_target_screenshot(start_time):\n print(\"Td find the target screen and please wait since it will take some time\")\n content = get_conf()\n\n APP_ID = content[\"APP_ID\"]\n API_KEY = content[\"API_KEY\"]\n SECRET_KEY = content[\"SECRET_KEY\"]\n Target_Str = content[\"Target_Str\"]\n\n aipOcr = AipOcr(APP_ID, API_KEY, SECRET_KEY)\n\n folder_path = get_screen_folder_path()\n all_screenshot_name_list = os.listdir(folder_path)\n\n is_find = False;\n\n for one_file in all_screenshot_name_list:\n file_name = os.path.join(folder_path, one_file)\n screen_shot_content = get_file_content(file_name)\n result = aipOcr.basicGeneral(screen_shot_content)\n if str(result[\"words_result\"]).find(Target_Str) > -1:\n time_dif = os.path.getctime(file_name) - start_time\n print(\"Success to find the screenshot, is: \" + file_name)\n print(\"The start time is: \" + time.ctime(start_time))\n print(\"The end time is: \" + time.ctime(os.path.getctime(file_name)))\n print(\"The spend time is: \" + str(time_dif))\n is_find = True\n break\n\n if not is_find:\n raise Exception(\"Failed to find target after \" + str(content[\"Scroll_Count\"]) + \"scroll screen\")\n\n\ndef get_conf():\n with open(PATH(\"baidu_api.json\"), encoding=\"utf-8\") as f:\n content = json.load(f)\n\n return content\n\n\ndef get_file_content(screen_shot_file):\n with open(screen_shot_file, 'rb') as fp:\n return fp.read()\n\n\nif __name__ == '__main__':\n swipe_function()\n","repo_name":"sayidkongtao/scripts","sub_path":"swiptest_auto_by_compare_screenshot.py","file_name":"swiptest_auto_by_compare_screenshot.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11662794753","text":"from dataclasses import dataclass\nimport string\n\n\"\"\"\n# The transition states for our Finite State Machine placed in numerical order\n\"\"\"\n\nREJECT = 0\nINTEGER = 1\nDECIMAL = 2\nNEGATIVE = 3\nOPERATOR = 4\nSTRING = 5\nUNKNOWN = 6\nSPACE = 7\n\n\"\"\"\nThe Finite State Machine transition state table. The first row (index 0) \nrepresents a place holder, so the row in the array starts on row 1 instead \nof 0.\n\nThis table to make the states accept or reject different parameters, thus \nchanging its behavior. More states can be added to this table.\n\"\"\"\n\nstateTable=[\n [0, INTEGER, DECIMAL, NEGATIVE, OPERATOR, STRING, UNKNOWN, SPACE],\n [INTEGER, INTEGER, DECIMAL, UNKNOWN, REJECT, REJECT, REJECT, REJECT], # STATE 1\n [DECIMAL, DECIMAL, UNKNOWN, UNKNOWN, REJECT, REJECT, REJECT, REJECT], # STATE 2\n [NEGATIVE, NEGATIVE, NEGATIVE, UNKNOWN, REJECT, REJECT, REJECT, REJECT], # STATE 3\n [OPERATOR, REJECT, REJECT, UNKNOWN, REJECT, STRING, REJECT, REJECT], # STATE 4\n [STRING, STRING, REJECT, REJECT, STRING, STRING, REJECT, REJECT], # STATE 5\n [UNKNOWN, UNKNOWN, UNKNOWN, UNKNOWN, UNKNOWN, UNKNOWN, UNKNOWN, REJECT], # STATE 6\n [SPACE, REJECT, REJECT, REJECT, REJECT, REJECT, REJECT, REJECT] # STATE 7\n ] \n\ndef Lexer(expression):\n #TokenType access\n #access = TokenType(\"\", 0, \"\")\n access = []\n #tokens = TokenType(\"\", 0, \"\")\n tokens = []\n\n currentChar = ' '\n col = REJECT\n currentState = REJECT\n prevState = REJECT\n currentToken = \"\"\n\n #print(expression)\n for x in range(0, len(expression)):\n currentChar = expression[x]\n\n # Get the column number for the current character\n col = Get_FSM_Col(currentChar)\n \n # print(type(col))\n # print(type(currentState))\n\n # Get the current state of the expression\n currentState = stateTable[currentState][col]\n\n if(currentState == REJECT):\n if(prevState != SPACE): # Whitespace is not considered\n token = currentToken\n lexeme = prevState\n lexemeName = GetLexemeName(lexeme)\n access = [token, lexeme, lexemeName]\n\n #print(\"The current access:\", access)\n\n # Appending to the list of tokens\n tokens.append(access)\n \n # Resetting current token\n currentToken = \"\"\n \n else: # Moving on to the next character\n currentToken += currentChar\n x = x+1\n prevState = currentState\n #print(\"The token in this iteration is: \", tokens)\n\n if(currentState != SPACE and currentToken != \"\"):\n # Whitespace is not considered\n token = currentToken\n lexeme = currentState\n lexemeName = GetLexemeName(lexeme)\n access = [token, lexeme, lexemeName]\n\n # Appending to the list of tokens\n tokens.append(access)\n\n return tokens\n\ndef Get_FSM_Col(currentChar):\n \n if(currentChar.isspace()):\n return SPACE\n\n # Check for integer numbers\n elif(currentChar.isdigit()):\n return INTEGER\n\n # Check for real numbers\n elif(currentChar == '.'):\n return DECIMAL\n \n # Check if the number is negative\n elif(currentChar == '-'):\n return NEGATIVE\n\n # Check for characters\n elif(currentChar.isalpha()):\n return STRING\n\n # Check for operators\n elif(ispunct(currentChar)):\n return OPERATOR\n \n # Return UNKOWN if not identified\n return UNKNOWN\n\ndef ispunct(ch):\n # Function to check if punctuation\n return ch in string.punctuation\n\ndef GetLexemeName(lexeme):\n if lexeme == INTEGER:\n return \"INTEGER\"\n elif lexeme == DECIMAL:\n return \"DECIMAL\"\n elif lexeme == NEGATIVE:\n return \"NEGATIVE\"\n elif lexeme == OPERATOR:\n return \"OPERATOR\"\n elif lexeme == STRING:\n return \"STRING\"\n elif lexeme == UNKNOWN:\n return \"UNKNOWN\"\n elif lexeme == SPACE:\n return \"SPACE\"\n else:\n return \"ERROR\"\n\ndef main():\n \n #print(\"\\nPlease enter the name of the file: \")\n fileName = str(input(\"\\nPlease enter the name of the file: \"))\n #f = open(fileName, \"r\")\n\n # Opening the file\n with open(fileName, 'r', encoding='utf-8') as infile:\n # Reading each line from the file\n for line in infile:\n #print(line)\n tokens = Lexer(line)\n for i in tokens:\n print(i[2]+ \" \" + i[0])\n #print(i)\n print(\"\\n\")\n \n #print(tokens)\n\nif __name__ == \"__main__\":\n main()","repo_name":"ashwinn-v/NLP-FSA","sub_path":"1_Tokenizer.py","file_name":"1_Tokenizer.py","file_ext":"py","file_size_in_byte":4727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16613731764","text":"import torch\nimport torchvision\nfrom torch.utils.data import DataLoader,random_split\n\n\n\ndef create_train_validation_dataset(path: str,batch_size: int,validation_rate: float):\n\n # Download training data from the torchvision repo\n train_data = torchvision.datasets.MNIST(\n root = path,\n train = True,\n download = True,\n transform = torchvision.transforms.ToTensor()\n )\n\n total_dataset_length = len(train_data)\n\n class_names = train_data.classes\n # Split the downloaded dataset into a training and a validation folds\n train_data,validation_data = random_split(train_data,[int(total_dataset_length * (1 - validation_rate)),int(total_dataset_length * validation_rate)])\n\n # Create Train and validation dataloaders\n train_loader = DataLoader(\n dataset = train_data,\n batch_size = batch_size,\n shuffle = True\n )\n\n validation_loader = DataLoader(\n dataset = validation_data,\n batch_size = batch_size,\n shuffle = True\n )\n\n print(f'{len(train_data)} training samples were reserved successfully')\n print(f'{len(validation_data)} validation samples were reserved successfully')\n\n return train_loader,validation_loader,class_names\n\n\ndef create_test_dataset(path: str,batch_size: int):\n\n # Download test data from the torchvision repo\n test_data = torchvision.datasets.MNIST(\n root = path,\n train = False,\n download = True,\n transform = torchvision.transforms.ToTensor()\n )\n\n class_names = test_data.classes\n # Create test dataloaders\n test_dataloader = DataLoader(\n dataset = test_data,\n batch_size = batch_size,\n shuffle = False\n )\n\n return test_dataloader,class_names\n\n","repo_name":"azizderbel/autoencoder","sub_path":"download_dataset.py","file_name":"download_dataset.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17828062175","text":"import math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom .base_network import BaseNetwork\n\nclass CustomNorm(nn.Module):\n def __init__(self, dim, norm_type):\n super().__init__()\n if norm_type == \"in\":\n self.norm_layer = nn.InstanceNorm2d(dim, affine=True)\n elif norm_type == \"none\":\n self.norm_layer = nn.Identity()\n def forward(self, x):\n return self.norm_layer(x)\nclass CustomAct(nn.Module):\n def __init__(self, act_type):\n super().__init__()\n if act_type == \"leaky\":\n self.act_layer = nn.LeakyReLU(0.2)\n elif act_type == \"relu\":\n self.act_layer = nn.ReLU()\n def forward(self, x):\n return self.act_layer(x)\nclass ResBlk(nn.Module):\n def __init__(self, in_ch, out_ch, norm_type=\"in\", act_type=\"leaky\", downsample=False, normalize=False):\n super().__init__()\n self.downsample = downsample\n self.normalize = normalize\n self.act_layer = CustomAct(act_type)\n self.conv1 = nn.Conv2d(in_ch, in_ch, 3, 1, 1)\n self.conv2 = nn.Conv2d(in_ch, out_ch, 3, 1, 1)\n if self.normalize:\n self.norm1 = CustomNorm(in_ch, norm_type)\n self.norm2 = CustomNorm(in_ch, norm_type)\n self.shortcut = in_ch != out_ch\n if self.shortcut:\n self.conv_shortcut = nn.Conv2d(in_ch, out_ch, 1, 1, 0, bias=False) \n def _shortcut(self, x):\n if self.shortcut:\n x = self.conv_shortcut(x)\n if self.downsample:\n x = F.avg_pool2d(x, 2)\n return x\n def _residual(self, x):\n if self.normalize:\n x = self.norm1(x)\n x = self.act_layer(x)\n x = self.conv1(x)\n if self.downsample:\n x = F.avg_pool2d(x, 2)\n if self.normalize:\n x = self.norm2(x)\n x = self.act_layer(x)\n x = self.conv2(x)\n return x\n def forward(self, x):\n x = self._shortcut(x) + self._residual(x)\n return x / math.sqrt(2)\nclass AdaIN(nn.Module):\n def __init__(self, dim, style_dim):\n super().__init__()\n self.norm_layer = nn.InstanceNorm2d(dim, affine=False)\n self.fc = nn.Linear(style_dim, dim*2)\n def forward(self, x, style):\n style = self.fc(style).unsqueeze(-1).unsqueeze(-1)\n gamma, beta = torch.chunk(style, chunks=2, dim=1)\n return (1+gamma) * self.norm_layer(x) + beta\nclass AdaINResBlk(nn.Module):\n def __init__(self, in_ch, out_ch, style_dim=64, w_hpf=0, upsample=False, act_type=\"leaky\"):\n super().__init__()\n self.w_hpf = w_hpf\n self.upsample = upsample\n self.shortcut = in_ch != out_ch\n self.conv1 = nn.Conv2d(in_ch, out_ch ,3, 1, 1)\n self.conv2 = nn.Conv2d(out_ch, out_ch, 3, 1, 1)\n self.norm1 = AdaIN(in_ch, style_dim)\n self.norm2 = AdaIN(out_ch, style_dim)\n self.act_layer = CustomAct(act_type)\n if self.shortcut:\n self.conv_shortcut = nn.Conv2d(in_ch, out_ch, 1, 1, 0, bias=False)\n def _shortcut(self, x):\n if self.upsample:\n x = F.interpolate(x, scale_factor=2, mode=\"nearest\")\n if self.shortcut:\n x = self.conv_shortcut(x)\n return x\n def _residual(self, x, style):\n x = self.norm1(x, style)\n x = self.act_layer(x)\n if self.upsample:\n x = F.interpolate(x, scale_factor=2, mode=\"nearest\")\n x = self.conv1(x)\n x = self.norm2(x, style)\n x = self.act_layer(x)\n x = self.conv2(x)\n return x\n def forward(self, x, style):\n out = self._residual(x, style)\n if self.w_hpf == 0: # 하이패스 필터가 적용되면 skip connetion 안함.\n out = (out + self._shortcut(x)) / math.sqrt(2)\n return out\nclass Highpass(nn.Module):\n def __init__(self, w_hpf):\n super().__init__()\n self.register_buffer(\n \"filter\", torch.tensor([[-1,-1,-1],\n [-1,8.0,-1], \n [-1,-1,-1]]) / w_hpf)\n def forward(self, x):\n filter = self.filter.unsqueeze(0).unsqueeze(1).repeat(x.shape[1], 1, 1, 1)\n return F.conv2d(x, filter, padding=1, groups=x.shape[1]) # depthwise convolution\nclass Generator(BaseNetwork):\n def __init__(self, in_ch, out_ch, img_size=256, style_dim=64, max_ngf=512, w_hpf=0):\n super().__init__()\n self.img_size = img_size\n ngf = (2**14) // img_size # 64\n self.from_rgb = nn.Conv2d(in_ch, ngf, 3, 1, 1)\n self.to_rgb = nn.Sequential(\n nn.InstanceNorm2d(ngf, affine=True),\n nn.LeakyReLU(0.2),\n nn.Conv2d(ngf, out_ch, 1, 1, 0)\n )\n n_downsample = int(math.log2(img_size)) - 4 # 최소 해상도는 16x16\n\n self.encoder = nn.ModuleList()\n self.decoder = nn.ModuleList()\n #### downsample & upsample ####\n prev_ngf = ngf\n for i in range(n_downsample):\n ngf = min(prev_ngf*2, max_ngf)\n self.encoder.append(ResBlk(prev_ngf, ngf, norm_type=\"in\", downsample=True, normalize=True))\n self.decoder.insert(0, AdaINResBlk(ngf, prev_ngf, style_dim=style_dim, w_hpf=w_hpf, upsample=True))\n prev_ngf = ngf\n\n #### bottleneck ####\n for i in range(2):\n self.encoder.append(ResBlk(ngf, ngf, norm_type=\"in\", downsample=False, normalize=True))\n self.decoder.insert(0, AdaINResBlk(ngf, ngf, style_dim=style_dim, w_hpf=w_hpf, upsample=False))\n \n if w_hpf > 0:\n self.hpf = Highpass(w_hpf)\n def forward(self, x, style, masks=None):\n x = self.from_rgb(x)\n cache = {}\n for blk in self.encoder:\n if (masks is not None) and (x.shape[2] in [32,64,128]):\n cache[x.shape[2]] = x\n x = blk(x)\n for blk in self.decoder:\n x = blk(x, style)\n if (masks is not None) and (x.shape[2] in [32,64,128]):\n mask = masks[0] if x.shape[2] in [32] else masks[1]\n mask = F.interpolate(mask, size=x.shape[2], model=\"bilinear\")\n x = x + self.hpf(mask * cache[x.shape[2]])\n return self.to_rgb(x)\nclass Discriminator(BaseNetwork):\n def __init__(self, in_ch, img_size=256, n_domains=2, max_ndf=512):\n super().__init__()\n ndf = 2**14 // img_size # 64\n n_downsample = int(math.log2(img_size)) - 2 # 무조건 해상도를 4로 맞춤\n blk = []\n blk.append(nn.Conv2d(in_ch, ndf, 3, 1, 1))\n prev_ndf = ndf\n for _ in range(n_downsample):\n ndf = min(prev_ndf*2, max_ndf)\n blk.append(ResBlk(prev_ndf, ndf, downsample=True))\n prev_ndf = ndf\n blk.append(nn.LeakyReLU(0.2))\n blk.append(nn.Conv2d(ndf, ndf, 4, 1, 0)) # [BS x ndf x 1 x 1]\n blk.append(nn.LeakyReLU(0.2))\n self.blk = nn.Sequential(*blk)\n self.fc = nn.Linear(ndf, n_domains)\n def forward(self, x, y):\n BS = x.shape[0]\n out = self.blk(x) # [BS x ndf x 1 x 1]\n out = out.flatten(1)\n out = self.fc(out) # [BS x n_domain]\n idx = torch.arange(BS).to(y.device)\n out = out[idx, y]\n return out\nclass MappingNetwork(BaseNetwork):\n def __init__(self, latent_dim=16, style_dim=64, n_domains=2):\n super().__init__()\n shared_layer = []\n shared_layer.append(nn.Linear(latent_dim, 512))\n shared_layer.append(nn.ReLU())\n for _ in range(3):\n shared_layer.append(nn.Linear(512, 512))\n shared_layer.append(nn.ReLU())\n self.shared_layer = nn.Sequential(*shared_layer)\n \n self.unshared_layers = nn.ModuleList()\n for _ in range(n_domains):\n unshared_layer = nn.Sequential(\n nn.Linear(512,512),\n nn.ReLU(),\n nn.Linear(512, 512),\n nn.ReLU(),\n nn.Linear(512, 512),\n nn.ReLU(),\n nn.Linear(512, style_dim)\n )\n self.unshared_layers.append(unshared_layer)\n def forward(self, z, y):\n style = self.shared_layer(z)\n styles = []\n for layer in self.unshared_layers:\n styles.append(layer(style))\n styles = torch.stack(styles, dim=1) # [BS x n_domains x style_dim]\n BS = styles.shape[0]\n idx = torch.arange(BS).to(y.device)\n return styles[idx, y]\nclass StyleEncoder(BaseNetwork):\n def __init__(self, in_ch, img_size=256, style_dim=64, n_domains=2, max_nef=512):\n super().__init__()\n nef = 2**14 // img_size\n n_downsample = int(math.log2(img_size)) - 2 # 무조건 해상도를 4로 맞춤\n blk = []\n blk.append(nn.Conv2d(in_ch, nef, 3, 1, 1))\n prev_nef = nef\n for _ in range(n_downsample):\n nef = min(prev_nef*2, max_nef)\n blk.append(ResBlk(prev_nef, nef, downsample=True))\n prev_nef = nef\n blk.append(nn.LeakyReLU(0.2))\n blk.append(nn.Conv2d(nef, nef, 4, 1, 0)) # [BS x ndf x 1 x 1]\n blk.append(nn.LeakyReLU(0.2))\n self.blk = nn.Sequential(*blk)\n\n self.unshared_layers = nn.ModuleList()\n for _ in range(n_domains):\n self.unshared_layers.append(nn.Linear(nef, style_dim))\n def forward(self, x, y):\n x = self.blk(x)\n x = x.flatten(1)\n styles = []\n for layer in self.unshared_layers:\n styles.append(layer(x))\n styles = torch.stack(styles, dim=1) # [BS x n_domains x style_dim]\n BS = styles.shape[0]\n idx = torch.arange(BS).to(y.device)\n return styles[idx, y]\n \n \n\n\n\n \n \n \n\n\n\n\n \n \n\n \n \n \n\n \n","repo_name":"rlawjdghek/Generative_Models","sub_path":"GANs/StarGANv2/models/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":9796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30728411851","text":"from flask import Flask, render_template\nfrom flask_socketio import SocketIO, emit\nfrom funcionArduino import *\napp = Flask(__name__)\napplication = app\napp.config['SECRET_KEY'] = 'secret_key'\nsocketio = SocketIO(app, cors_allowed_origins='*')\n\n\n@socketio.on('connect')\ndef connect():\n print('Cliente conectado')\n\n\n@socketio.on('disconnect')\ndef disconnect():\n print('Cliente desconectado')\n\n\n@socketio.on('send_message')\ndef send_message(data):\n ubicacionesLuz = data['ubicacionesLuz']\n ubicacionesVentana = data['ubicacionesVentana']\n ubicacionesPuerta = data['ubicacionesPuerta']\n enviarArduino(ubicaionesPuerta=ubicacionesPuerta, ubicacionesVentana=ubicacionesVentana, ubicacionesLuz=ubicacionesLuz)\n emit('send_message', {'message': {'ubicacionesLuz': ubicacionesLuz, 'ubicacionesPuerta': ubicacionesPuerta, 'ubicacionesVentana': ubicacionesVentana}}, broadcast=True)\n\n@socketio.on('plano')\ndef plano(data):\n print(data)\n emit('plano', {'message': data['message']}, broadcast=True)\n\n\n\nif __name__ == \"__main__\":\n socketio.run(app, debug=True, host='0.0.0.0', port=8000)\n","repo_name":"Arturo-daza/Proyecto-Domotica-Inclusiva","sub_path":"socketIO.py","file_name":"socketIO.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25028788678","text":"#Emilia Santini\r\n#211001035\r\nimport os\r\nos.system('cls')\r\n\r\njum=0\r\nn=int(input(\"Masukkan banyak data: \"))\r\nfor i in range(n):\r\n nilai=int(input(f\"Masukkan nilai ke {i+1} : \"))\r\n jum=jum+nilai\r\n\r\nrata=jum/n\r\nprint(f\"Total Nilai : {jum}\")\r\nprint(f\"Rata-rata : {rata}\")\r\n","repo_name":"EmiliaSumbawa/AI-INF","sub_path":"Minggu 3/Tugas 3/6.6 Praktikum 3.py","file_name":"6.6 Praktikum 3.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20205995339","text":"import miepy\nimport numpy as np\nfrom .particle_base import particle\nimport uuid\n\nclass sphere_cluster_particle(particle):\n def __init__(self, position, radius, material, lmax, orientation=None):\n \"\"\"A 'particle' that is a rigid collection of non-overlaping spheres\n\n Arguments:\n position[N,3] x,y,z position of the spheres\n radius[N] radii of the spheres\n material[N] sphere materials\n lmax[N] sphere lmax values\n orientation orientation of the collection (relative to as created)\n \"\"\"\n self.Nparticles = len(position)\n self.p_position = np.asarray(position)\n if self.p_position.ndim != 2 or self.Nparticles == 1:\n raise ValueError('a sphere_cluster_particle must have at least 2 particles')\n self.com = np.average(self.p_position, axis=0)\n\n self.p_radii = np.empty(self.Nparticles, dtype=float)\n self.p_radii[...] = radius\n\n self.p_material = np.empty(self.Nparticles, dtype=object)\n self.p_material[...] = material\n\n self.p_lmax = np.empty(self.Nparticles, dtype=int)\n self.p_lmax[...] = lmax\n self.lmax_cluster = np.max(self.p_lmax)\n\n self.id = uuid.uuid4()\n\n super().__init__(self.com, orientation, self.p_material[0])\n\n def __repr__(self):\n return f'''{self.__class__.__name__}:\n Nparticles = self.Nparticles\n position = {self.position}\n orientation = {self.orientation}\n '''\n\n def compute_tmatrix(self, lmax, wavelength, eps_m, **kwargs):\n eps = np.empty(self.Nparticles, dtype=complex)\n for i in range(self.Nparticles):\n eps[i] = self.p_material[i].eps(wavelength)\n \n self.tmatrix_fixed = miepy.tmatrix.tmatrix_sphere_cluster(self.p_position, self.p_radii, self.p_lmax,\n self.lmax_cluster, wavelength, eps, eps_m, extended_precision=False)\n\n self.tmatrix_fixed = miepy.tmatrix.tmatrix_reduce_lmax(self.tmatrix_fixed, lmax)\n\n self._rotate_fixed_tmatrix()\n return self.tmatrix\n\n def enclosed_radius(self):\n return np.max(np.linalg.norm(self.p_position - self.position[np.newaxis], axis=1)) \\\n + np.max(self.p_radii)\n\n def _dict_key(self, wavelength):\n return self.id\n","repo_name":"johnaparker/miepy","sub_path":"miepy/particles/sphere_cluster_particle.py","file_name":"sphere_cluster_particle.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"48"} +{"seq_id":"26372457192","text":"# find the maximum occurring character in a given string\ntxt_str = input(\"Enter your string: \")\nres = {}\nfor i in txt_str:\n if i in res:\n res[i] = res[i]+1\n print(res)\n else:\n res[i] = 1\nfinal = max(res,key=res.get)\nprint(final)\n","repo_name":"Rakeshkumarlenka/test_functions","sub_path":"DECEMBER/30-12-2021/String/_Q57_Max_Occurance.py","file_name":"_Q57_Max_Occurance.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43948107507","text":"\"\"\"\nYou are given a 0-indexed array nums comprising of n non-negative integers.\n\nIn one operation, you must:\n\nChoose an integer i such that 1 <= i < n and nums[i] > 0.\nDecrease nums[i] by 1.\nIncrease nums[i - 1] by 1.\nReturn the minimum possible value of the maximum integer of nums after performing any number of operations.\n\"\"\"\ndef minimizeArrayValue(self, nums: List[int]) -> int:\n \"\"\"\n Time: No \n Solve: Medium\n Difficulty: Medium(personally think is hard) \n Comments: \n I could tell it is a DP but the math behind it I will never able to see it. Is a pretty clean solution if you could think about the step\n of evening out all the value in the array. You need to round up, and seems like python2 has some problem with celiing function and truncation\n \n \"\"\"\n res = total = nums[0]\n for i in range(1,len(nums)):\n total += nums[i]\n res = max(math.ceil(total/(i+1)),res)\n return res","repo_name":"Hengsheng-Liu/Leetcodes","sub_path":"2439-Minimize Maximum of Array.py","file_name":"2439-Minimize Maximum of Array.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37599137153","text":"import pickle\nimport collections\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport os\nimport pathlib\nimport shutil\nimport math\nfrom scipy.stats import mode\nfrom sklearn import feature_selection\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.decomposition import PCA\nfrom joblib import dump, load \nfrom sklearn.utils import shuffle\nfrom sklearn.preprocessing import StandardScaler\n\n# add \n# Tabulate Demographic charactersitics of participants\n# \n\nLABEL_FREQUENCY = 700\nDATASET_PATH = \"../WESAD/\"\n\nclass ReadSubjectData:\n def __init__(self, file_path):\n \"\"\" Read Subject data from pkl file\"\"\"\n \n with open(file_path, 'rb') as file:\n data = pickle.load(file, encoding='latin1')\n\n self.data = data\n self.subject_no = self.data.get('subject',None)\n # self.labels = self.data['label']\n \n #['label', 'subject', 'signal']\n def get_data_keys(self):\n return self.data.keys()\n\n #['wrist', 'chest']\n def get_signal_types(self):\n return self.data.get('signal',{}).keys()\n\n @staticmethod\n def get_wrist_frequencies():\n wrist_freq_dict = {}\n wrist_freq_dict['acc_wrist_freq'] = 32\n wrist_freq_dict['bvp_wrist_freq'] = 64\n wrist_freq_dict['eda_wrist_freq'] = 4\n wrist_freq_dict['temp_wrist_freq'] = 4\n\n return wrist_freq_dict\n\n # ['ACC', 'BVP', 'EDA', 'TEMP']\n def get_wrist_data(self):\n wrist_data = self.data.get('signal',{}).get('wrist',[])\n return wrist_data\n \n # ['ACC', 'ECG', 'EDA', 'EMG', 'Resp', 'Temp']\n def get_chest_data(self):\n chest_data = self.data.get('signal',{}).get('chest',[])\n return chest_data\n \n def get_labels(self):\n return self.data.get('label',None)\n \n def get_net_accel(self, data):\n return (data['ACC_x'] ** 2 + data['ACC_y'] ** 2 + data['ACC_z'] ** 2).apply(lambda x: np.sqrt(x))\n \n def aggregate_features(self, feature, frequency=700):\n mean_features = np.zeros(math.ceil(len(feature)/frequency))\n std_features = np.zeros(math.ceil(len(feature)/frequency))\n max_features = np.zeros(math.ceil(len(feature)/frequency))\n min_features = np.zeros(math.ceil(len(feature)/frequency))\n # sum_features = np.zeros(math.ceil(len(feature)/frequency))\n\n idx =0\n\n for i in range(0, len(feature),frequency):\n mean_features[idx] = np.mean(feature[i:i+frequency])\n std_features[idx] = np.std(feature[i:i+frequency])\n max_features[idx] = np.amax(feature[i:i+frequency])\n min_features[idx] = np.amin(feature[i:i+frequency])\n # sum_features[idx] = np.sum(feature[i:i+frequency])\n idx+=1\n \n return np.column_stack((max_features, min_features, std_features, mean_features))\n \n def extract_chest_features(self, chest_data):\n\n # chest_data = chest_data[indices]\n agg_data = np.apply_along_axis(self.aggregate_features, 0, chest_data)\n chest_data = agg_data.reshape(agg_data.shape[0],-1) # shape of (nrows, *4*n_columns)\n return chest_data\n\n def extract_wrist_features(self, wrist_data_dict):\n wrist_freq_dict = ReadSubjectData.get_wrist_frequencies()\n reshape = lambda x: x.reshape(x.shape[0],-1)\n agg_acc_wrist = reshape(np.apply_along_axis(self.aggregate_features,0,wrist_data_dict['ACC'],wrist_freq_dict['acc_wrist_freq']))\n agg_bvp_wrist = reshape(np.apply_along_axis(self.aggregate_features,0,wrist_data_dict['BVP'],wrist_freq_dict['bvp_wrist_freq']))\n agg_eda_wrist = reshape(np.apply_along_axis(self.aggregate_features,0,wrist_data_dict['EDA'],wrist_freq_dict['eda_wrist_freq']))\n agg_temp_wrist = reshape(np.apply_along_axis(self.aggregate_features,0,wrist_data_dict['TEMP'],wrist_freq_dict['temp_wrist_freq']))\n\n # print(agg_acc_wrist.shape, agg_bvp_wrist.shape, agg_eda_wrist.shape, agg_temp_wrist.shape)\n agg_wrist_data = np.concatenate([agg_acc_wrist,agg_bvp_wrist,agg_eda_wrist,agg_temp_wrist], axis=1)\n return agg_wrist_data\n \n def get_aggregate_labels(self):\n idx=0\n labels = self.get_labels()\n new_labels = np.empty((labels.shape[0]//LABEL_FREQUENCY),)\n\n for i in range(0, len(labels),LABEL_FREQUENCY):\n new_labels[idx] = np.rint(np.mean(labels[i:i+LABEL_FREQUENCY]))\n idx +=1\n\n return new_labels\n\n def pca_dimension_reduction(self, features):\n pca = PCA(n_components=8)\n features = pca.fit_transform(features)\n return features\n\n def standardize_data(self, features, train=True):\n \n if train is True:\n scaler = StandardScaler()\n features = scaler.fit_transform(features)\n with open('standard_scaler.pkl','w') as f:\n dump(scaler,'scaler.gz')\n else:\n scaler = load('scaler.gz')\n features = scaler.transform(features)\n\n return features\n\n\ndef get_pickle_files(dataset_path):\n \"\"\"\n \"\"\"\n pickle_files = {}\n for root, dirnames, filenames in os.walk(dataset_path, topdown=True):\n for file_name in filenames :\n if file_name.endswith('.pkl'):\n subject_no = file_name.split('.')[0]\n pickle_files[subject_no] = os.path.join(root,file_name)\n\n return pickle_files\n\ndef create_train_test_data(features, labels):\n\n dataset = {}\n dataset['features'] = features\n dataset['labels'] = labels\n\n\n\n dataset['features'], dataset['labels'] = shuffle(dataset['features'], dataset['labels'])\n\n train_dataset, test_dataset = collections.OrderedDict(), collections.OrderedDict()\n\n train_dataset['features'], test_dataset['features'], train_dataset['labels'], test_dataset['labels'] = train_test_split(dataset['features'],dataset['labels'], test_size=0.3, random_state=1, stratify=dataset['labels'])\n\n\n # train_dataset['features'] = obj.pca_dimension_reduction(train_dataset['features'])\n train_dataset['features'] = obj.standardize_data(train_dataset['features'] , train=True)\n\n # # test_dataset['features'] = obj.pca_dimension_reduction(test_dataset['features'])\n test_dataset['features'] = obj.standardize_data(test_dataset['features'] , train=False)\n\n return train_dataset, test_dataset\n\ndef create_aggregate_features(DATASET_PATH):\n pass\n\nif __name__==\"__main__\":\n\n DATASET_PATH = \"../WESAD/\"\n pickle_files = get_pickle_files(dataset_path=DATASET_PATH)\n chest_data = {}\n chest_labels = {}\n # mean_data = 0\n\n\n for subject_no, file_path in pickle_files.items():\n if os.path.exists(f'{DATASET_PATH}/{subject_no}_train_data.npy') :\n continue\n \n # create an object for read_subject_data passing pickle file\n obj = ReadSubjectData(\n file_path=file_path\n )\n\n # get chest data from the subject object \n chest_data_dict = obj.get_chest_data()\n\n #get wrist data from the subject object\n wrist_data_dict = obj.get_wrist_data()\n\n # get labels(subject's activity) from subject object\n labels = obj.get_labels()\n keys = ['ACC','ECG','EMG','EDA','Temp','Resp']\n\n # extract feature from chest data di t and aggregate them based on frequency window\n chest_data = np.concatenate([chest_data_dict[key] for key in keys], axis=1)\n\n\n # Dimensions of different sensors used for chest data measurement\n # 'ACC' : 3, 'ECG' 1: , 'EDA' : 1, 'EMG': 1, 'RESP': 1, 'Temp': 1 ===> Total dimensions : 8\n \n # Labels --> 0 = not defined / transient, 1 = baseline, 2 = stress, 3 = amusement,\n # 4 = meditation, 5/6/7 = should be ignored in this dataset\n\n # Let's slice chest data for only labels --> 0,1,2,3,4 and ignore other labels in this experiment\n\n \n chest_features = obj.extract_chest_features(chest_data=chest_data)\n \n wrist_features = obj.extract_wrist_features(wrist_data_dict)\n \n \n labels = obj.get_aggregate_labels()\n \n chest_features = chest_features[np.where(np.logical_and(labels>0, labels<4))[0]]\n \n wrist_features = wrist_features[np.where(np.logical_and(labels>0, labels<4))[0]]\n \n features = np.concatenate([chest_features,wrist_features], axis=1)\n labels = labels[np.where(np.logical_and(labels>0, labels<4))[0]]\n\n train_dataset, test_dataset = create_train_test_data(features=features, labels=labels)\n \n with open(f'{DATASET_PATH}/{subject_no}_train_data.npy','wb') as f:\n np.save(f, train_dataset)\n \n with open(f'{DATASET_PATH}/{subject_no}_test_data.npy','wb') as f:\n np.save(f, test_dataset)","repo_name":"harinadh12/Federated_Learning_WESAD","sub_path":"data_extraction.py","file_name":"data_extraction.py","file_ext":"py","file_size_in_byte":8779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2404210119","text":"# Problem Set 4A\n# Name: Trevor KM\n# Collaborators: NONE\n# Time Spent: 1:00\n\ndef get_permutations(sequence):\n '''\n Enumerate all permutations of a given string\n\n sequence (string): an arbitrary string to permute. Assume that it is a\n non-empty string. \n\n You MUST use recursion for this part. Non-recursive solutions will not be\n accepted.\n\n Returns: a list of all permutations of sequence\n\n Example:\n >>> get_permutations('abc')\n ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']\n\n Note: depending on your implementation, you may return the permutations in\n a different order than what is listed here.\n '''\n \n # to return \n perm_list = []\n \n # base case\n if len(sequence)==1:\n perm_list.append(sequence)\n return perm_list # singleton list\n \n for i, char in enumerate(sequence):\n # current leading character in recursion\n current_char = sequence[:i]\n # remaining characters\n remain_chars = sequence[i + 1:]\n # loop through recursive permutation outputs\n for perm in get_permutations(current_char + remain_chars):\n # add leading char to remaining permutation\n new_perm = char + perm\n # append to resulting perm_list if not a dupe\n if new_perm not in perm_list:\n perm_list.append(char+perm)\n return perm_list\n\n\nif __name__ == '__main__':\n \n #EXAMPLE\n example_input = 'abc'\n print('Input:', example_input)\n print('Expected Output:', ['abc', 'acb', 'bac', 'bca', 'cab', 'cba'])\n print('Actual Output:', get_permutations(example_input))\n\n # Put three example test cases here (for your sanity, limit your inputs\n # to be three characters or fewer as you will have n! permutations for a \n # sequence of length n)\n \n # MY TEST CASES\n example_ii = '_3'\n print('Input:', example_ii)\n print('Expected Output:', ['_3', '3_'])\n print('Actual Output:', get_permutations(example_input))\n \n example_iii = '%3%'\n print('Input:', example_iii)\n print('Expected Output:', ['%3%', '%%3', \n '3%%'])\n print('Actual Output:', get_permutations(example_input))\n \n\n","repo_name":"rovertm/CS-Curriculum","sub_path":"001-Intro-to-CS-and-Programming/ps4/ps4a.py","file_name":"ps4a.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40930530145","text":"import pandas as pd\nimport argparse\n#import pandas_gbq\nfrom google.cloud import storage\nfrom google.oauth2 import service_account\n\n'''\n Must export GOOGLE_APPLICATION_CREDENTIALS\n'''\n\ndef set_credentials(credentials_file):\n #Credentials required to query data\n credentials = service_account.Credentials.from_service_account_file(\n credentials_file,\n )\n return credentials\n\ndef get_mean_reviews_by_biz(credentials,bucket_name):\n q_mean_reviews_by_biz = \"WITH BUSINESS_ATTR AS ( SELECT DISTINCT BUSINESS_ID, BUSINESS_NAME FROM `slalom-de.slalom.business_attributes`) \"\n q_mean_reviews_by_biz += \"SELECT ba.BUSINESS_ID, ba.BUSINESS_NAME, ROUND(AVG(rev.REVIEW_STARS),2) AS REVIEW_RATING\" \n q_mean_reviews_by_biz += \" FROM BUSINESS_ATTR ba, `slalom-de.slalom.reviews` rev\" \n q_mean_reviews_by_biz += \" WHERE ba.BUSINESS_ID = rev.BUSINESSID\"\n q_mean_reviews_by_biz += \" GROUP BY BUSINESS_ID, BUSINESS_NAME;\"\n\n #read data from bq and return dataframe\n df = pd.read_gbq(q_mean_reviews_by_biz,project_id=bucket_name,credentials=credentials)\n\n return df\n\ndef load_file_gcs(bucket_name,file_name,data_frame):\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n \n #create the file in the bucket\n bucket.blob(file_name).upload_from_string(data_frame.to_csv(sep=\"|\",index=False), 'text/csv')\n\ndef run():\n parser = argparse.ArgumentParser()\n parser.add_argument('--output_file', \n dest='output_file',\n required=True,\n help='Provide output file path')\n parser.add_argument('--bucket_name', \n dest='bucket_name',\n required=True,\n help='bucket name for output file')\n parser.add_argument('--credentials_file', \n dest='credentials_file',\n required=True,\n help='Service Account Credentials file path')\n args = parser.parse_args()\n\n bucket_name = args.bucket_name\n output_file = args.output_file\n credentials_file = args.credentials_file\n\n #set credentials using the json file\n credentials = set_credentials(credentials_file)\n\n #execute the query to extract data from bq\n df_mean_reviews_by_biz = get_mean_reviews_by_biz(credentials,bucket_name)\n\n #create the file in gcs\n load_file_gcs(bucket_name,output_file,df_mean_reviews_by_biz)\n\nif __name__ == '__main__':\n run()\n","repo_name":"vijayrgopu/slalom-de-gcp","sub_path":"load_mean_rev_by_biz.py","file_name":"load_mean_rev_by_biz.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21352193585","text":"from flask import Flask\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello_world():\n \"\"\"Print 'Hello, world!' as the response body.\"\"\"\n return 'hello world, Bravilor Bonamat!! first dayyyyyyyyyyy'\n\n@app.route('/tri')\ndef ri_tech():\n \"\"\"Print 'Hello, world!' as the response body.\"\"\"\n return 'Bingoooooooooooo. woowwwwwww 2nd page of Bravilor Bonamat!!!!!'\n\n# @app.route('/batch')\n# def batch():\n# \"\"\"Print 'Hello, world!' as the response body.\"\"\"\n# return 'feb-batch'\n\n\nif __name__ == '__main__':\n # run() method of Flask class runs the application\n # on the local development server.\n app.run(host=\"0.0.0.0\")\n","repo_name":"ygprakash/helloworld","sub_path":"simple_server.py","file_name":"simple_server.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2854505505","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\nbatch_size = 64\r\npenaltyLambda = 10\r\nimg_size=28\r\n\r\ndiscriminator_optimizer = tf.keras.optimizers.Adam(lr=1e-4, beta_1 = 0.5, beta_2=0.9)\r\ngenerator_optimizer = tf.keras.optimizers.Adam(lr=1e-4, beta_1 = 0.5, beta_2=0.9)\r\n\r\ndef gradient_penalty(discriminator, real_img, fake_img):\r\n epsilon = tf.random.uniform((batch_size, 1, 1, 1))\r\n interpolated_img = real_img*epsilon + fake_img*(1-epsilon)\r\n with tf.GradientTape() as gt:\r\n gt.watch(interpolated_img)\r\n mixed_score = discriminator(interpolated_img, training=False)\r\n gradients = gt.gradient(mixed_score, interpolated_img)\r\n gradients = tf.keras.backend.reshape(gradients, shape=(gradients.shape[0], -1))\r\n gradients = tf.norm(gradients, ord='euclidean', axis=1)\r\n gradient_pen = tf.reduce_mean((gradients-1) ** 2)\r\n return gradient_pen\r\n\r\ndef wass_loss(y_true, y_pred):\r\n loss = tf.keras.backend.mean(y_true - y_pred)\r\n return loss\r\n\r\n\r\n\"\"\" def wass_loss_cr(discriminator, generator, real_imgs, noise):\r\n gen_img = generator(noise, training=False)\r\n print(\"HELLO\")\r\n def loss(y_true, y_pred):\r\n penalty = penaltyLambda*gradient_penalty(discriminator, real_imgs, gen_img)\r\n print(\"did this work at all\")\r\n return (-tf.keras.backend.mean(y_true*y_pred)) + penalty\r\n return loss \"\"\"\r\n\r\n\r\n","repo_name":"mehmetcanakbay/paperimp_tensorflow2.0","sub_path":"wgan-gp/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3488054219","text":"# Define a merge_sort function\n\ndef merge_sort(arr):\n\n values = []\n if len(arr) > 1:\n\n mid = (0 + len(arr)) // 2\n\n left = arr[:mid]\n right = arr[mid:]\n\n left = merge_sort(left)\n right = merge_sort(right)\n\n while len(left) > 0 and len(right) > 0:\n if left[0] <= right[0]:\n values.append(left[0])\n left.pop(0)\n elif left[0] > right[0]:\n values.append(right[0])\n right.pop(0)\n\n if len(left) > 0:\n for i in left:\n values.append(i)\n else:\n for i in right:\n values.append(i)\n\n if len(arr) == 1:\n values = arr\n return values\n\n return values\n\n\ndef rearrange_digits(input_list):\n\n sorted_input_list = merge_sort(input_list)\n\n ans = []\n list_length = len(sorted_input_list)\n ans1 = \"\"\n ans2 = \"\"\n if list_length % 2 == 0:\n for i in range(list_length-1, 0, -2):\n ans1 += str(sorted_input_list[i])\n\n for i in range(list_length-2, -1, -2):\n ans2 += str(sorted_input_list[i])\n\n ans1 = int(ans1)\n ans2 = int(ans2)\n\n ans.append(ans1)\n ans.append(ans2)\n\n else:\n for i in range(list_length-1, -1, -2):\n ans1 += str(sorted_input_list[i])\n\n for i in range(list_length-2, 0, -2):\n ans2 += str(sorted_input_list[i])\n\n ans1 = int(ans1)\n ans2 = int(ans2)\n\n ans.append(ans1)\n ans.append(ans2)\n\n return ans\n\n\ndef test_function(test_case):\n output = rearrange_digits(test_case[0])\n solution = test_case[1]\n\n if sum(output) == sum(solution):\n print(\"Pass\")\n else:\n print(\"Fail\")\n\n\ntest_function([[1, 2, 3, 4, 5], [542, 31]])\ntest_case = [[4, 6, 2, 5, 9, 8], [964, 852]]\n","repo_name":"BachVu-3010/DataStructures_Algorithms_Udacity","sub_path":"Project3/rearrange_digits.py","file_name":"rearrange_digits.py","file_ext":"py","file_size_in_byte":1836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19509933569","text":"import logging\n\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import Http404\nfrom django.http import HttpRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect\nfrom django.shortcuts import render\nfrom django.views.generic import DetailView\nfrom django.views.generic import UpdateView\nfrom django.views.generic import View\nfrom django.views.generic.detail import SingleObjectMixin\n\nfrom .models import ShopTicket\nfrom utils.models import CampReadOnlyModeError\n\n\nlogger = logging.getLogger(\"bornhack.%s\" % __name__)\n\n\n@login_required\ndef shop_ticket_list_view(request: HttpRequest) -> HttpResponse:\n \"\"\"List all tickets for the logged-in user.\"\"\"\n base_queryset = (\n ShopTicket.objects.select_related(\n \"ticket_type\",\n \"ticket_type__camp\",\n \"product\",\n \"product__ticket_type__camp\",\n \"bundle_product\",\n )\n .filter(opr__order__user=request.user)\n .order_by(\"ticket_type__camp\", \"ticket_group\")\n )\n\n context = {\n \"tickets\": (base_queryset.filter(ticket_group__isnull=True)),\n \"tickets_in_groups\": (\n base_queryset.filter(ticket_group__isnull=False).order_by(\"ticket_group\")\n ),\n }\n\n return render(request, \"tickets/ticket_list.html\", context)\n\n\nclass ShopTicketDownloadView(LoginRequiredMixin, SingleObjectMixin, View):\n model = ShopTicket\n\n def dispatch(self, request, *args, **kwargs):\n if not request.user == self.get_object().opr.order.user:\n raise Http404(\"Ticket not found\")\n\n return super().dispatch(request, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n response = HttpResponse(content_type=\"application/pdf\")\n response[\n \"Content-Disposition\"\n ] = 'attachment; filename=\"{type}_ticket_{pk}.pdf\"'.format(\n type=self.get_object().shortname,\n pk=self.get_object().pk,\n )\n response.write(self.get_object().generate_pdf().getvalue())\n return response\n\n\nclass ShopTicketDetailView(LoginRequiredMixin, UpdateView, DetailView):\n model = ShopTicket\n template_name = \"tickets/ticket_detail.html\"\n context_object_name = \"ticket\"\n fields = [\"name\", \"email\"]\n\n def form_valid(self, form):\n return super().form_valid(form)\n\n def dispatch(self, request, *args, **kwargs):\n ticket = self.get_object()\n if ticket.opr.order.user != request.user:\n raise Http404(\"Ticket not found\")\n return super().dispatch(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n try:\n response = super().post(request, *args, **kwargs)\n except CampReadOnlyModeError:\n messages.error(\n self.request,\n \"The camp is over. You can't update the ticket.\",\n )\n return redirect(self.get_object().get_absolute_url())\n else:\n messages.info(self.request, \"Ticket updated!\")\n return response\n","repo_name":"bornhack/bornhack-website","sub_path":"src/tickets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3149,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"37124718067","text":"#coding:utf-8\r\n\r\n# glottal voice source as input of Two Tubes Model of vocal tract\r\n# Glottal Volume Velocity \r\n# based on A.E.Rosenberg's formula as Glottal Volume Velocity\r\n\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\n# Check version\r\n# Python 3.6.4 on win32 (Windows 10)\r\n# numpy 1.14.0 \r\n# matplotlib 2.1.1\r\n\r\n\r\nclass Class_Glottal(object):\r\n\tdef __init__(self, tclosed=5.0, trise=6.0, tfall=2.0, sampling_rate=48000):\r\n\t\t# initalize\r\n\t\tself.tclosed=tclosed # duration time of close state [mSec]\r\n\t\tself.trise=trise # duration time of opening [mSec]\r\n\t\tself.tfall=tfall # duration time of closing [mSec]\r\n\t\tself.sr= sampling_rate\r\n\t\tself.yg=self.make_one_plus()\r\n\t\t\r\n\tdef make_one_plus(self,):\r\n\t\t# output yg\r\n\t\tself.N1=int( (self.tclosed / 1000.) * self.sr )\r\n\t\tself.N2=int( (self.trise / 1000.) * self.sr )\r\n\t\tself.N3=int( (self.tfall / 1000.) * self.sr )\r\n\t\tself.LL= self.N1+ self.N2 + self.N3\r\n\t\tyg=np.zeros(self.LL)\r\n\t\t#print ('Length= ', self.LL)\r\n\t\tfor t0 in range(self.LL):\r\n\t\t\tif t0 < self.N1 :\r\n\t\t\t\tpass\r\n\t\t\telif t0 <= (self.N2 + self.N1):\r\n\t\t\t\tyg[t0]= 0.5 * ( 1.0 - np.cos( ( np.pi / self.N2 ) * (t0 - self.N1)) )\r\n\t\t\telse:\r\n\t\t\t\tyg[t0]= np.cos( ( np.pi / ( 2.0 * self.N3 )) * ( t0 - (self.N2 + self.N1) ) )\r\n\t\treturn yg\r\n\r\n\tdef make_N_repeat(self, repeat_num=3):\r\n\t\tyg_repeat=np.zeros( len(self.yg) * repeat_num)\r\n\t\tfor loop in range( repeat_num):\r\n\t\t\tyg_repeat[len(self.yg)*loop:len(self.yg)*(loop+1)]= self.yg\r\n\t\treturn yg_repeat\r\n\t\r\n\tdef fone(self, f):\r\n\t\t# calculate one point of frequecny response\r\n\t\txw= 2.0 * np.pi * f / self.sr\r\n\t\tyi=0.0\r\n\t\tyb=0.0\r\n\t\tfor v in range (0,(self.N2 + self.N3)):\r\n\t\t\tyi+= self.yg[self.N1 + v] * np.exp(-1j * xw * v)\r\n\t\t\tyb+= self.yg[self.N1 + v]\r\n\t\tval= yi/yb\r\n\t\treturn np.sqrt(val.real ** 2 + val.imag ** 2)\r\n\t\r\n\tdef H0(self, freq_low=100, freq_high=5000, Band_num=256):\r\n\t\t# get Log scale frequecny response, from freq_low to freq_high, Band_num points\r\n\t\tamp=[]\r\n\t\tfreq=[]\r\n\t\tbands= np.zeros(Band_num+1)\r\n\t\tfcl=freq_low * 1.0 # convert to float\r\n\t\tfch=freq_high * 1.0 # convert to float\r\n\t\tdelta1=np.power(fch/fcl, 1.0 / (Band_num)) # Log Scale\r\n\t\tbands[0]=fcl\r\n\t\t#print (\"i,band = 0\", bands[0])\r\n\t\tfor i in range(1, Band_num+1):\r\n\t\t\tbands[i]= bands[i-1] * delta1\r\n\t\t\t#print (\"i,band =\", i, bands[i]) \r\n\t\tfor f in bands:\r\n\t\t\tamp.append(self.fone(f) )\r\n\t\treturn np.log10(amp) * 20, bands # = amp value, freq list\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n\t\r\n\t# instance\r\n\tglo=Class_Glottal()\r\n\r\n\t# draw\r\n\tfig = plt.figure()\r\n\t# draw one waveform\r\n\tplt.subplot(3,1,1)\r\n\tplt.xlabel('mSec')\r\n\tplt.ylabel('level')\r\n\tplt.title('Glottal Waveform')\r\n\tplt.plot( (np.arange(len(glo.yg)) * 1000.0 / glo.sr) , glo.yg)\r\n\r\n\t# draw frequecny response\r\n\tplt.subplot(3,1,2)\r\n\tplt.xlabel('Hz')\r\n\tplt.ylabel('dB')\r\n\tplt.title('Glottal frequecny response')\r\n\tamp, freq=glo.H0(freq_high=5000, Band_num=256)\r\n\tplt.plot(freq, amp)\r\n\t\r\n\t# draw repeated waveform\r\n\tyg_repeat=glo.make_N_repeat(repeat_num=3)\r\n\tplt.subplot(3,1,3)\r\n\tplt.xlabel('mSec')\r\n\tplt.ylabel('level')\r\n\tplt.title('Glottal repeated Waveform')\r\n\tplt.plot( (np.arange(len(yg_repeat)) * 1000.0 / glo.sr) , yg_repeat)\r\n\t\r\n\t#\r\n\tfig.tight_layout()\r\n\tplt.show()\r\n\t\r\n#This file uses TAB\r\n","repo_name":"shun60s/Vocal-Tube-Model","sub_path":"glottal.py","file_name":"glottal.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"48"} +{"seq_id":"42089005464","text":"\"\"\"\nFile: fire.py\nName:\n---------------------------------\nThis file contains a method called\nhighlight_fires which detects the\npixels that are recognized as fire\nand highlights them for better observation.\n\"\"\"\nfrom simpleimage import SimpleImage\n\n\nHURDLE_FACTOR = 1.05\n\n\ndef highlight_fires(filename):\n \"\"\"\n :param filename: the picture from the document.\n :return: img, the fire will be marked by color red.\n \"\"\"\n img = SimpleImage(filename)\n for pixel in img:\n avg = (pixel.red + pixel.green + pixel.blue) / 3\n if pixel.red < avg * HURDLE_FACTOR:\n pixel.red = avg\n pixel.green = avg\n pixel.blue = avg\n else:\n pixel.red = 255\n pixel.green = 0\n pixel.blue = 0\n return img\n\n\ndef main():\n \"\"\"\n the picture will show where the fire happen, mark it by color red.\n \"\"\"\n original_fire = SimpleImage('images/greenland-fire.png')\n original_fire.show()\n highlighted_fire = highlight_fires('images/greenland-fire.png')\n highlighted_fire.show()\n\n\n# DO NOT EDIT CODE BELOW THIS LINE #\n\nif __name__ == '__main__':\n main()\n","repo_name":"cola30616/MyStanCodeProject","sub_path":"SC001/Assignment4/fire.py","file_name":"fire.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35569392424","text":"from transformers.modeling_bert import BertForNextSentencePrediction\nfrom transformers.tokenization_bert import BertTokenizer\nfrom torch.utils.data import DataLoader, RandomSampler, TensorDataset\nfrom torch.nn.modules.loss import CrossEntropyLoss\nimport torch, os, sys, nltk, tqdm, time, math\nfrom transformers.optimization import AdamW\nfrom utils_logplot import LogPlot\nfrom collections import Counter\n\nSTOP_WORDS = set([\"'\", \".\", \"!\", \"?\", \",\", '\"', '-', 'we', 'our', 'you', 'he', 'him', 'she', 'her', 'it', \"it's\", 'its', 'they', 'their', 'this', 'that', 'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'have', 'has', 'had', 'do', 'does', 'did', 'a', 'an', 'the', 'and', 'or', 'as', 'of', 'at', 'by', 'to', 'not', 'so', \"'s\", \"in\", \"for\", \"with\", \"on\"])\n\nclass PatternPenalty:\n # Depending on how many words are used a large fraction of the last X summaries\n def __init__(self, history_length=30):\n self.stop_words = STOP_WORDS\n self.history_words = []\n self.ngram_history = []\n self.history_length = history_length\n\n def score(self, summaries, bodies, bodies_tokenized=None, lengths=None, extra=None):\n batch_words = []\n batch_ngrams = []\n for summary in summaries:\n words = nltk.tokenize.word_tokenize(summary.lower())\n gram = 2\n n_grams = [tuple(words[i:(i+gram)]) for i in range(len(words)-gram+1)]\n\n word_set = set(words)-self.stop_words\n word_set = [w for w in word_set if len(w) > 1]\n self.history_words.append(word_set)\n self.ngram_history.append(n_grams)\n batch_words.append(word_set)\n batch_ngrams.append(n_grams)\n\n self.history_words = self.history_words[-self.history_length:] # Trim\n self.ngram_history = self.ngram_history[-self.history_length:] # Trim\n\n word_counter = Counter([w for words in self.history_words for w in words])\n ngram_counter = Counter([ng for ngrams in self.ngram_history for ng in ngrams])\n\n scores = []\n for words, ngrams in zip(batch_words, batch_ngrams):\n score = 0.0\n\n if any(word_counter[w] > 0.5*self.history_length for w in words):\n score = 1.0\n if any(ngram_counter[ng] > 0.5*self.history_length for ng in ngrams):\n score = 1.0\n # print(\">>>\",ngram_counter.most_common(8))\n scores.append(score)\n return scores, None\n\nclass LengthPenalty:\n # Depending on how many words are used a large fraction of the last X summaries\n def __init__(self, target_length):\n self.target_length = float(target_length)\n\n def score(self, summaries, bodies, bodies_tokenized=None, lengths=None, extra=None):\n # In lengths, the number of tokens. Is -1 if the summary did not produce an END token, which will be maximum penalty, by design.\n # scores = [1.0-L/self.target_length for L in lengths]\n scores = [1.0 if L > self.target_length else 1.0-L/self.target_length for L in lengths] # This lets it go beyond for free\n\n return scores, None\n\nclass RepeatPenalty:\n # Shouldn't use non-stop words several times in a summary. Fairly constraining.\n def __init__(self):\n self.stop_words = STOP_WORDS\n\n def score(self, summaries, bodies, bodies_tokenized=None, lengths=None, extra=None):\n scores = []\n for summary in summaries:\n words = nltk.tokenize.word_tokenize(summary.lower())\n L = len(words)\n N_1 = max(2, math.ceil(L / 10.0)) # You shouldn't use the same non-stop word more than 3 times.\n N_2 = math.ceil(L / 8.0)\n word_counts = Counter([w for w in words if w not in self.stop_words])\n all_word_counts = Counter([w for w in words if len(w) > 1])\n if len(word_counts) > 0 and len(all_word_counts) > 0 and (word_counts.most_common(1)[0][1] > N_1 or all_word_counts.most_common(1)[0][1] > N_2):\n # print(L, N_1, N_2)\n # print(\"Repeat penalty:\", word_counts.most_common(3), all_word_counts.most_common(3))\n scores.append(1.0)\n else:\n scores.append(0.0)\n return scores, None\n\n# if __name__ == \"__main__\":\n# import argparse\n\n# parser = argparse.ArgumentParser()\n# parser.add_argument(\"--gpu_nb\", type=int, default=3, help=\"Which GPU to use. For now single GPU.\")\n# parser.add_argument(\"--train_batch_size\", type=int, default=8, help=\"Training batch size.\")\n# parser.add_argument(\"--device\", type=str, default=\"cuda\", help=\"cuda or cpu\")\n# parser.add_argument(\"--do_train\", action='store_true', help=\"Whether to do some training.\")\n\n# args = parser.parse_args()\n# os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"+str(args.gpu_nb)\n\n# print(\"Loading model\")\n# fluency = FluencyCoLA(args.device, model_file=\"/home/phillab/models/news_gpt2_bs32.bin\")\n\n# if args.do_train:\n# dataloader = fluency.get_training_dataset(args.train_batch_size)\n# param_optimizer = list(fluency.model.named_parameters())\n# no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']\n# optimizer_grouped_parameters = [\n# {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},\n# {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}\n# ]\n\n# optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5)\n# scheduler = WarmupLinearSchedule(optimizer, warmup_steps=0, t_total=len(dataloader))\n# logplot = LogPlot(\"/home/phillab/logs/fluency/bert_cola_gpu.log\")\n\n# time_save = time.time()\n# optim_every = 4\n\n# for epi in range(1):\n# print(\"Epoch\", epi)\n# for ib, batch in tqdm.tqdm(enumerate(dataloader), total=len(dataloader)):\n# batch = tuple(t.to(args.device) for t in batch)\n# input_ids, masks, token_types, labels = batch\n# outputs, = fluency.get_output(input_ids, masks, token_types)\n\n# cross_ent = CrossEntropyLoss()\n# loss = cross_ent(outputs, labels)\n# acc = torch.argmax(outputs,dim=1).eq(labels).float().mean().item()\n\n# loss.backward()\n\n# if ib%optim_every == 0:\n# scheduler.step() # Update learning rate schedule\n# optimizer.step()\n# optimizer.zero_grad()\n\n# logplot.cache({\"loss\": loss.item(), \"accuracy\": acc}, prefix=\"T_\")\n# if time.time()-time_save > 60.0:\n# logplot.save(printing=True)\n# time_save = time.time()\n# fluency.save_model(\"/home/phillab/models/bert_fluency_cola_b.bin\")\n\nif __name__ == \"__main__\":\n\n summary = \"India's Telecom Commission is seeking clarity on 2G spectrum auction issues including the auction\"\n summary = \"The 39-year-old French star of the silent comedy The Artist scooped the Best Actor statue at the Academy Awards in\"\n summary = \"The two available units cost $574,000 and $649,900.\"\n\n reppen = RepeatPenalty()\n print(reppen.score([summary], [\"\"]))\n","repo_name":"CannyLab/summary_loop","sub_path":"model_guardrails.py","file_name":"model_guardrails.py","file_ext":"py","file_size_in_byte":7277,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"48"} +{"seq_id":"72730802067","text":"import pygame.event\nfrom pygame.mixer import Sound\n\nfrom menu import *\nfrom tiles import *\n\ndebug = False # debug\n\n# iniciando e criando fonte\npg.font.init()\nmyfont = pg.font.SysFont('Comic Sans MS', 10)\n\nWIN = pygame.USEREVENT + 1 # evento personalizado\n\natual = 1 # variavel de selecionar o mapa \n\n# dicionario com os mapas\nlevels = {\n 1:\"assets\\maps\\map1.csv\",\n 2:\"assets\\maps\\map2.csv\"\n}\n\n\n# Player\nclass Player:\n def __init__(self, game):\n self.game = game\n self.pos = [self.game.map.start_x, self.game.map.start_y]\n self.vel = [0, 0]\n self.speed = 1\n\n self.collect = {'coin': 0, 'potion1': False, 'potion2': False}\n\n # sprites para a animação (em string)\n self.sprites = {\n 'left': [\n 'assets/player/player-left1.png',\n 'assets/player/player-left2.png',\n 'assets/player/player-left3.png',\n 'assets/player/player-left4.png',\n ],\n 'right': [\n 'assets/player/player-right1.png',\n 'assets/player/player-right2.png',\n 'assets/player/player-right3.png',\n 'assets/player/player-right4.png',\n ],\n 'up': [\n 'assets/player/player-up1.png',\n 'assets/player/player-up2.png',\n 'assets/player/player-up3.png',\n 'assets/player/player-up4.png',\n ],\n 'down': [\n 'assets/player/player-down1.png',\n 'assets/player/player-down2.png',\n 'assets/player/player-down3.png',\n 'assets/player/player-down4.png',\n ],\n }\n # variaveis para a animação\n self.loadSprites()\n self.img = self.sprites['right'][0]\n self.aniFrame = 0\n\n # criando retangulo de colisão\n self.rect = self.img.get_rect()\n self.rect.x, self.rect.y = self.pos\n\n # função para transformar os caminhos em string em sprites\n def loadSprites(self):\n for cod, frames in self.sprites.items():\n for i, sprite in enumerate(frames):\n self.sprites[cod][i] = pg.image.load(sprite).convert_alpha()\n\n # colisão em geral\n def checkcollision(self):\n # bloco normais \n for tile in self.game.map.tiles:\n if self.rect.colliderect(tile.rect):\n return True\n # objetos unicos\n for obj in self.game.map.objects:\n if self.rect.colliderect(obj.rect):\n # coletaveis\n if obj.collectable:\n if obj.id == 'coin':\n coin_sound.play()\n self.collect['coin'] += 1\n else:\n self.collect[obj.id] = True\n self.game.map.objects.remove(obj)\n # verificando se o bloco esta da mesma cor \n if obj.color == self.game.bgColor:\n continue\n if obj.solid:\n return True\n\n def animation(self):\n # reset animation when stopping\n if self.vel == (0, 0):\n self.aniFrame = 0\n # left\n if self.vel[0] < 0:\n self.img = self.sprites['left'][int(self.aniFrame)]\n # right\n if self.vel[0] > 0:\n self.img = self.sprites['right'][int(self.aniFrame)]\n # up\n if self.vel[1] < 0:\n self.img = self.sprites['up'][int(self.aniFrame)]\n # down\n if self.vel[1] > 0:\n self.img = self.sprites['down'][int(self.aniFrame)]\n\n self.aniFrame += 4 / 60\n if self.aniFrame > 3:\n self.aniFrame = 0\n\n def move(self):\n self.pos[0] += self.vel[0]\n self.pos[1] += self.vel[1]\n\n self.rect.x = self.pos[0]\n self.rect.y = self.pos[1]\n\n if self.checkcollision():\n self.pos[0] -= self.vel[0]\n self.pos[1] -= self.vel[1]\n\n self.animation()\n\n# Game Loop\nclass Game:\n def __init__(self):\n pg.init()\n pg.display.set_caption(\"Spectre\")\n self.running, self.playing = True, False # variaveis usadas para o sistema de menus\n self.debug = False\n\n # teclas que seram usadas no menu\n self.UP_KEY, self.DOWN_KEY, self.START_KEY, self.BACK_KEY = False, False, False, False\n\n # configurando a tela\n self.DISPLAY_W, self.DISPLAY_H = 900, 600\n self.display = pg.Surface((self.DISPLAY_W, self.DISPLAY_H))\n self.window = pg.display.set_mode((self.DISPLAY_W, self.DISPLAY_H))\n self.font_name = pg.font.get_default_font()\n\n # definindo os menus \n self.main_menu = MainMenu(self)\n self.options = OptionsMenu(self)\n self.credits = CreditsMenu(self)\n self.endmenu = EndMenu(self)\n self.curr_menu = self.main_menu # menu atual\n\n self.clock = pg.time.Clock() # variavel para definir o fps\n self.bgColor = colors.white # cor de fundo\n\n # funçaõs que verifica se todas as moedas foram coletadas\n def victory(self):\n count = 0\n for obj in self.map.objects:\n if obj.id == \"coin\":\n count += 1\n if count == 0:\n pygame.event.post(pygame.event.Event(WIN)) # ativando evento personalizado\n\n # interface grafica(o menu que possuem as cores no canto superior esquerdo)\n def colorOverlay(self, surf):\n pg.draw.rect(surf, colors.black, (19, 19, 28, 10), 0)\n pg.draw.rect(surf, colors.white, (20, 20, 8, 8), 0)\n if self.player.collect['potion1']:\n pg.draw.rect(surf, hue[1], (29, 20, 8, 8), 0)\n if self.player.collect['potion2']:\n pg.draw.rect(surf, hue[2], (38, 20, 8, 8), 0)\n\n # Move player when key is hold\n def playerControl(self):\n keys = pg.key.get_pressed()\n self.player.vel = (0, 0)\n # Move player, change sprite based on direction\n if keys[pg.K_LEFT]:\n self.player.vel = (-self.player.speed, 0)\n if keys[pg.K_RIGHT]:\n self.player.vel = (self.player.speed, 0)\n if keys[pg.K_UP]:\n self.player.vel = (0, -self.player.speed)\n if keys[pg.K_DOWN]:\n self.player.vel = (0, self.player.speed)\n\n # função que verfica teclas espeficas(mudar cor, debug e sair do jogo)\n def keyPress(self, e):\n if e.key == pg.K_1:\n self.bgColor = colors.white\n if e.key == pg.K_2 and self.player.collect['potion1']:\n self.bgColor = hue[1]\n self.player.collect['potion1'] = False\n if e.key == pg.K_3 and self.player.collect['potion2']:\n self.bgColor = hue[2]\n self.player.collect['potion2'] = False\n if e.key == pg.K_0:\n self.debug = not self.debug\n if e.key == pg.K_ESCAPE:\n self.playing = False\n\n def drawGame(self):\n self.screen = pg.Surface(GAME_RESOLUTION)\n self.screen.fill(self.bgColor)\n # map\n self.map.draw_map(self.screen)\n # objects\n for obj in self.map.objects:\n obj.draw(self.screen)\n # player\n self.player.move()\n self.screen.blit(self.player.img, self.player.rect)\n # overlay\n self.colorOverlay(self.screen)\n # coin text\n #textsurface = myfont.render(f'{self.player.collect[\"coin\"]} Coins', False, (0, 0, 0))\n #self.screen.blit(textsurface, (20, 28))\n # debug\n if self.debug:\n pg.draw.rect(self.screen, (255, 0, 0), self.player.rect, 1)\n for obj in self.map.objects:\n pg.draw.rect(self.screen, (0, 255, 0), obj.rect, 1)\n # scale screen\n self.screen = pg.transform.scale(self.screen, (900, 600))\n self.window.blit(self.screen, (0, 0))\n\n def inGame(self):\n self.map = TileMap(levels[1])\n self.player = Player(self)\n\n while self.playing:\n for event in pg.event.get():\n if event.type == pg.QUIT:\n self.running, self.playing = False, False\n if event.type == pg.KEYDOWN:\n self.keyPress(event)\n \n # mudando de fase \n if event.type == WIN:\n global atual\n atual += 1\n # indo para a tela final\n if atual > 2:\n victory_music.play()\n self.bgColor = colors.white\n self.curr_menu = self.endmenu\n self.playing = False\n else:\n self.map= TileMap(levels[atual])\n self.player = Player(self)\n\n self.drawGame()\n self.playerControl()\n pg.display.update()\n self.clock.tick(60)\n self.victory()\n\n # checando teclas do menu\n def check_events(self):\n for event in pygame.event.get():\n if event.type == pg.QUIT:\n self.running, self.playing = False, False\n self.curr_menu.run_display = False\n\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_RETURN:\n self.START_KEY = True\n if event.key == pg.K_BACKSPACE or event.key == pg.K_ESCAPE:\n self.BACK_KEY = True\n if event.key == pg.K_DOWN or event.key == pg.K_s:\n self.DOWN_KEY = True\n if event.key == pg.K_UP or event.key == pg.K_w:\n self.UP_KEY = True\n\n def draw_text(self, text, size, x, y):\n font = pygame.font.Font(self.font_name, size)\n text_surface = font.render(text, True, colors.white)\n text_rect = text_surface.get_rect()\n text_rect.center = (x, y)\n self.display.blit(text_surface, text_rect)\n\n def reset_keys(self):\n self.UP_KEY, self.DOWN_KEY, self.START_KEY, self.BACK_KEY = False, False, False, False","repo_name":"jorge-junior/Spectre","sub_path":"game_module.py","file_name":"game_module.py","file_ext":"py","file_size_in_byte":10009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8800909067","text":"import os\nimport numpy as np\nfrom datetime import date\nfrom datetime import datetime\nfrom qdpy import jax_functions as jf\nfrom plotter import postplotter\nimport subprocess\nimport argparse\n\ncurrent_dir = os.path.dirname(os.path.realpath(__file__))\npackage_dir = os.path.dirname(current_dir)\nwith open(f\"{package_dir}/.config\", \"r\") as f:\n dirnames = f.read().splitlines()\nscratch_dir = dirnames[1]\n\ndpy_dir = f\"{scratch_dir}/dpy_jax\"\nqdpy_dir = f\"{scratch_dir}/qdpy_jax\"\n#------------------------------------------------------------------------# \nparser = argparse.ArgumentParser()\nparser.add_argument(\"--outdir\", help=\"output directory\",\n type=str, default=f\"{scratch_dir}/summaryfiles\")\nPARGS = parser.parse_args()\n#------------------------------------------------------------------------#\n\n\"\"\"\ndirnames = subprocess.check_output([\"ls\", f\"{PARGS.outdir}\"])\ndirnames = dirnames.decode().split('\\n')\nnumdir = len(dirnames)\nfor i in range(numdir):\n print(f\"[ {i:2d} ] {dirnames[i]}\")\nidx = int(input(f\"Select dataset to be plotted: \"))\noutdir = f\"{PARGS.outdir}/{dirnames[idx]}/summaryfiles\"\noutdir = f\"{PARGS.outdir}/{dirnames[idx]}/plots\"\nprint(f\"outdir = {outdir}\")\n\"\"\"\noutdir = f\"{PARGS.outdir}\"\nplotdir = f\"{PARGS.outdir}\"\n\n\ndef select_and_load():\n os.system(f\"ls {outdir}/summary* > {outdir}/fnames.txt\")\n with open(f\"{outdir}/fnames.txt\", \"r\") as f:\n fnames = f.read().splitlines()\n\n for i in range(len(fnames)):\n print(f\"{i:^5d} | {fnames[i]}\")\n\n select_modes = True\n summary_list = []\n count = 1\n\n while select_modes:\n selector = input(f\"File [{count}] | Enter the index for filename \" +\n f\"(enter x to exit) :\")\n if selector == 'x':\n select_modes = False\n break\n summary = jf.load_obj(f\"{fnames[int(selector)][:-4]}\")\n summary_list.append(summary)\n count += 1\n return summary_list\n\n\ndef plot_from_summary(summlist):\n fig, ax = None, None\n colors = ['red', 'blue', 'magenta', 'black', 'orange']\n count = 0\n for summary in summlist:\n GVARS = summary['params']['dpy']['GVARS']\n\n c_arr_fit = summary['c_arr_fit']\n true_params_flat = summary['true_params_flat']\n cind_arr = summary['cind_arr']\n sind_arr = summary['sind_arr']\n\n suffix = f\"{int(GVARS.knot_num)}s.{GVARS.eigtype}.{GVARS.tslen}d\"\n c_arr_fit_full = jf.c4fit_2_c4plot(GVARS, c_arr_fit*true_params_flat,\n sind_arr, cind_arr)\n fit_plot = postplotter.postplotter(GVARS, c_arr_fit_full,\n c_arr_fit_full*0.0, f'summary-{suffix}')\n fig, ax = fit_plot.plot_fit_wsr(fig=fig, ax=ax, pcolor=colors[count])\n count += 1\n return fig\n\n\nif __name__ == \"__main__\":\n summary_list = select_and_load()\n fig = plot_from_summary(summary_list)\n fig.savefig(f\"{plotdir}/compare-dpt-qdpt.pdf\")\n \n","repo_name":"srijaniiserprinceton/qdpy-numpyro","sub_path":"plotter/plot_summary_compare.py","file_name":"plot_summary_compare.py","file_ext":"py","file_size_in_byte":2959,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14800077126","text":"from tkinter import *\nfrom calculadora import Calculadora\n\nroot = Tk()\n# ----------------------------------------\n# funcoes\nresultado = StringVar()\ncalc = Calculadora()\n\n\ndef somar():\n numero1 = int(txt_numero1.get())\n numero2 = int(txt_numero2.get())\n resultado.set(calc.soma(numero1, numero2))\n\n\ndef subtracao():\n numero1 = int(txt_numero1.get())\n numero2 = int(txt_numero2.get())\n resultado.set(calc.subtracao(numero1, numero2))\n\n\ndef multiplicacao():\n numero1 = int(txt_numero1.get())\n numero2 = int(txt_numero2.get())\n resultado.set(calc.multiplicacao(numero1, numero2))\n\n\ndef divisao():\n numero1 = int(txt_numero1.get())\n numero2 = int(txt_numero2.get())\n resultado.set(calc.divisao(numero1, numero2))\n\n\n# ----------------------------------------\n# widgets\nroot.resizable(0, 0)\nimg = PhotoImage(file='image/calculator-icon-96.png')\nimg_plus = PhotoImage(file='image/plus-icon-32.png')\nimg_minus = PhotoImage(file='image/minus-icon-32.png')\nimg_x = PhotoImage(file='image/x-icon-32.png')\nimg_divide = PhotoImage(file='image/divide-icon-32.png')\n\nframe1 = Frame(root)\nframe2 = Frame(root)\nlabel_img = Label(frame1, image=img)\nlabel1 = Label(frame1, text='Valor 1:')\nlabel2 = Label(frame1, text='Valor 2:')\n\ntxt_numero1 = Entry(frame1)\ntxt_numero2 = Entry(frame1)\ntxt_resultado = Entry(frame1, textvariable=resultado)\n\nlabel3 = Label(frame1, text='RESULTADO')\n\nbtn_adicao = Button(frame1, image=img_plus, command=somar)\nbtn_subtracao = Button(frame1, image=img_minus, command=subtracao)\nbtn_multiplicacao = Button(frame1, image=img_x, command=multiplicacao)\nbtn_divisao = Button(frame1, image=img_divide, command=divisao)\n\n\n# ----------------------------------------\n# layout\nlabel_img.grid(columnspan=2)\nframe1.grid()\n\nlabel1.grid(row=1, column=0)\ntxt_numero1.grid(row=1, column=1)\nlabel2.grid(row=2, column=0)\ntxt_numero2.grid(row=2, column=1)\n\nbtn_adicao.grid(row=3, column=1, stick='W')\nbtn_subtracao.grid(row=3, column=1, padx=7)\nbtn_multiplicacao.grid(row=3, column=1, stick='E')\nbtn_divisao.grid(row=3, column=0, stick='W')\nlabel3.grid(row=4, columnspan=2)\ntxt_resultado.grid(row=5, columnspan=2)\n\n\nroot.mainloop()\n","repo_name":"PhilipeGama/python-tkinter","sub_path":"projetinhos/CalculadoraView.py","file_name":"CalculadoraView.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3037468395","text":"# --------------------------------------------------------------------------\n# ------------ Metody Systemowe i Decyzyjne w Informatyce ----------------\n# --------------------------------------------------------------------------\n# Zadanie 3: Regresja logistyczna\n# autorzy: A. Gonczarek, J. Kaczmar, S. Zareba\n# 2017\n# --------------------------------------------------------------------------\n\nimport numpy as np\n\n\ndef sigmoid(x):\n \"\"\"\n :param x: wektor wejsciowych wartosci Nx1\n :return: wektor wyjściowych wartości funkcji sigmoidalnej dla wejścia x, Nx1\n \"\"\"\n sig_x = 1 / (1 + np.exp(-x))\n return sig_x\n\n\ndef logistic_cost_function(w, x_train, y_train):\n \"\"\"\n :param w: parametry modelu Mx1\n :param x_train: ciag treningowy - wejscia NxM\n :param y_train: ciag treningowy - wyjscia Nx1\n :return: funkcja zwraca krotke (val, grad), gdzie val oznacza wartosc funkcji logistycznej, a grad jej gradient po w\n \"\"\"\n\n sigs = sigmoid(x_train @ w)\n val = -np.mean(y_train * np.log(sigs) + (1 - y_train) * np.log(1 - sigs))\n grad = x_train.T @ (sigs - y_train) / np.size(x_train, axis=0)\n return val, grad\n\n\ndef gradient_descent(obj_fun, w0, epochs, eta):\n \"\"\"\n :param obj_fun: funkcja celu, ktora ma byc optymalizowana. Wywolanie val,grad = obj_fun(w).\n :param w0: punkt startowy Mx1\n :param epochs: liczba epok / iteracji algorytmu\n :param eta: krok uczenia\n :return: funkcja wykonuje optymalizacje metoda gradientu prostego dla funkcji obj_fun. Zwraca krotke (w,func_values),\n gdzie w oznacza znaleziony optymalny punkt w, a func_valus jest wektorem wartosci funkcji [epochs x 1] we wszystkich krokach algorytmu\n \"\"\"\n w = w0\n val, grad = obj_fun(w)\n func_values = np.empty([epochs,1])\n ws = []\n for k in range(epochs):\n delta_w = -grad\n w = w + eta * delta_w\n ws.append(w)\n val, grad = obj_fun(w)\n func_values[k] = [val]\n return ws[np.argmin(func_values)],func_values\n\ndef stochastic_gradient_descent(obj_fun, x_train, y_train, w0, epochs, eta, mini_batch):\n \"\"\"\n :param obj_fun: funkcja celu, ktora ma byc optymalizowana. Wywolanie val,grad = obj_fun(w,x,y), gdzie x,y oznaczaja podane\n podzbiory zbioru treningowego (mini-batche)\n :param x_train: dane treningowe wejsciowe NxM\n :param y_train: dane treningowe wyjsciowe Nx1\n :param w0: punkt startowy Mx1\n :param epochs: liczba epok\n :param eta: krok uczenia\n :param mini_batch: wielkosc mini-batcha\n :return: funkcja wykonuje optymalizacje metoda stochastycznego gradientu prostego dla funkcji obj_fun. Zwraca krotke (w,func_values),\n gdzie w oznacza znaleziony optymalny punkt w, a func_values jest wektorem wartosci funkcji [epochs x 1] we wszystkich krokach algorytmu. Wartosci\n funkcji do func_values sa wyliczane dla calego zbioru treningowego!\n \"\"\"\n w = w0\n M = int(np.size(y_train)/mini_batch)\n x_batches = np.vsplit(x_train,M)\n y_batches = np.vsplit(y_train,M)\n ws = []\n func_values = np.empty([epochs,1])\n for k in range(epochs):\n val = 0\n for m in range(M):\n val,grad = obj_fun(w,x_batches[m],y_batches[m])\n delta_w = -grad\n w = w + eta * delta_w\n val, _ = logistic_cost_function(w,x_train,y_train)\n ws.append(w)\n func_values[k] = val\n return ws[np.argmin(func_values)],func_values\n\ndef regularized_logistic_cost_function(w, x_train, y_train, regularization_lambda):\n \"\"\"\n :param w: parametry modelu Mx1\n :param x_train: ciag treningowy - wejscia NxM\n :param y_train: ciag treningowy - wyjscia Nx1\n :param regularization_lambda: parametr regularyzacji\n :return: funkcja zwraca krotke (val, grad), gdzie val oznacza wartosc funkcji logistycznej z regularyzacja l2,\n a grad jej gradient po w\n \"\"\"\n val,grad = logistic_cost_function(w,x_train,y_train)\n w_0 = np.vstack((0,w[1:]))\n val_lambda = val + regularization_lambda/2 * np.linalg.norm(w_0)**2\n grad_lambda = grad + regularization_lambda*w_0\n\n return val_lambda, grad_lambda\n\ndef prediction(x, w, theta):\n \"\"\"\n :param x: macierz obserwacji NxM\n :param w: wektor parametrow modelu Mx1\n :param theta: prog klasyfikacji z przedzialu [0,1]\n :return: funkcja wylicza wektor y o wymiarach Nx1. Wektor zawiera wartosci etykiet ze zbioru {0,1} dla obserwacji z x\n bazujac na modelu z parametrami w oraz progu klasyfikacji theta\n \"\"\"\n y = sigmoid(x @ w) > theta\n\n return y\n\ndef f_measure(y_true, y_pred):\n \"\"\"\n :param y_true: wektor rzeczywistych etykiet Nx1\n :param y_pred: wektor etykiet przewidzianych przed model Nx1\n :return: funkcja wylicza wartosc miary F\n \"\"\"\n tp = np.count_nonzero((y_true + y_pred) == 2)\n falses = y_true - y_pred\n fp = np.count_nonzero(falses == 255)\n fn = np.count_nonzero(falses == 1)\n return (2 * tp) / (2 * tp + fp + fn)\ndef model_selection(x_train, y_train, x_val, y_val, w0, epochs, eta, mini_batch, lambdas, thetas):\n \"\"\"\n :param x_train: ciag treningowy wejsciowy NxM\n :param y_train: ciag treningowy wyjsciowy Nx1\n :param x_val: ciag walidacyjny wejsciowy Nval x M\n :param y_val: ciag walidacyjny wyjsciowy Nval x 1\n :param w0: wektor poczatkowych wartosci parametrow\n :param epochs: liczba epok dla SGD\n :param eta: krok uczenia\n :param mini_batch: wielkosc mini batcha\n :param lambdas: lista wartosci parametru regularyzacji lambda, ktore maja byc sprawdzone\n :param thetas: lista wartosci progow klasyfikacji theta, ktore maja byc sprawdzone\n :return: funckja wykonuje selekcje modelu. Zwraca krotke (regularization_lambda, theta, w, F), gdzie regularization_lambda\n to najlpszy parametr regularyzacji, theta to najlepszy prog klasyfikacji, a w to najlepszy wektor parametrow modelu.\n Dodatkowo funkcja zwraca macierz F, ktora zawiera wartosci miary F dla wszystkich par (lambda, theta). Do uczenia nalezy\n korzystac z algorytmu SGD oraz kryterium uczenia z regularyzacja l2.\n \"\"\"\n F = np.zeros(shape=(np.size(lambdas),np.size(thetas)))\n ws = []\n l_id = 0\n for l in lambdas:\n def obj_fun(w, x, y):\n return regularized_logistic_cost_function(w, x, y, l)\n w, func_values = stochastic_gradient_descent(obj_fun,x_train,y_train,w0, epochs, eta, mini_batch)\n ws.append(w)\n t_id = 0\n for t in thetas:\n y_pred = prediction(x_val,w,t)\n F[l_id][t_id] = f_measure(y_val,y_pred)\n t_id = t_id + 1\n l_id = l_id + 1\n lam_id,t_id = np.unravel_index(np.argmax(F, axis=None), F.shape)\n return lambdas[lam_id],thetas[t_id],ws[lam_id],F","repo_name":"mpdev10/MSiD-lab3","sub_path":"content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":6671,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33356054961","text":"# REFERENCE FILE ONLY.\n# This file is a copy of 'travel.py' in the 'try-cb-python' repo. It's not\n# intended to run. Please make any improvements or changes to the original\n# codebase and replicate the changes here afterwards. Docstrings have been\n# truncated or shortened for brevity.\n\nimport argparse\nimport math\nimport uuid\nimport jwt # from PyJWT\nfrom datetime import datetime\nfrom random import random\nfrom flasgger import Swagger, SwaggerView\nfrom flask import Flask, jsonify, make_response, request\nfrom flask.blueprints import Blueprint\nfrom flask_classy import FlaskView\nfrom flask_cors import CORS, cross_origin\n\n# Couchbase Imports\nimport couchbase.search as FT\nimport couchbase.subdocument as SD\nfrom couchbase.cluster import Cluster\nfrom couchbase.options import ClusterOptions, SearchOptions\nfrom couchbase.auth import PasswordAuthenticator\nfrom couchbase.exceptions import *\n\nJWT_SECRET = 'cbtravelsample'\n\n# tag::args[]\nparser = argparse.ArgumentParser()\nparser.add_argument('-c', '--cluster', help='Connection String i.e. localhost', default='db')\nparser.add_argument('-s', '--scheme', help='couchbase or couchbases', default='couchbase')\nparser.add_argument('-a', '--connectargs', help=\"?any_additional_args\", default=\"\")\nparser.add_argument('-u', '--user', help='User with access to bucket')\nparser.add_argument('-p', '--password', help='Password of user with access to bucket')\n\nargs = parser.parse_args()\n\nif not args.cluster:\n raise ConnectionError(\"No value for CB_HOST set!\")\n\nif (\"couchbases://\" in args.cluster) or (\"couchbase://\" in args.cluster):\n CONNSTR = f\"{args.cluster}{args.connectargs}\"\nelse:\n CONNSTR = f\"{args.scheme}://{args.cluster}{args.connectargs}\"\n \nauthenticator = PasswordAuthenticator(args.user, args.password)\nprint(\"Connecting to: \" + CONNSTR)\n\n# ...\n# API endpoints\n# ...\n\n# end::args[]\n\n# tag::api[]\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config['SWAGGER'] = {\n 'openapi': '3.0.3',\n 'title': 'Travel Sample API',\n 'version': '1.0',\n 'description': 'A sample API for getting started with Couchbase Server and the SDK.',\n 'termsOfService': ''\n}\n\nswagger_template = {\n \"components\": {\n \"securitySchemes\": {\n \"bearer\": {\n ...\n }\n },\n \"schemas\": {\n ...\n }\n }\n}\n\napi = Blueprint(\"api\", __name__)\n\nCORS(app, headers=['Content-Type', 'Authorization'])\n# end::api[]\n\n# tag::route[]\n@app.route('/')\ndef index():\n ...\n# end::route[]\n \"\"\"Returns the index page\n ...\n \"\"\"\n\n return \"\"\"\n

    Python Travel Sample API

    \n A sample API for getting started with Couchbase Server and the Python SDK.\n \n \"\"\"\n\n\ndef lowercase(key):\n return key.lower()\n\n# tag::airport-class-def[]\nclass AirportView(SwaggerView):\n \"\"\"Airport class for airport objects in the database\"\"\"\n# end::airport-class-def[]\n# tag::airports-endpoint[]\n @api.route('/airports', methods=['GET', 'OPTIONS'])\n @cross_origin(supports_credentials=True)\n def airports():\n \"\"\"Returns list of matching airports and the source query\n ...\n \"\"\"\n\n queryType = \"SQL++ query - scoped to inventory: \"\n partialAirportName = request.args['search']\n\n queryPrep = \"SELECT airportname FROM `travel-sample`.inventory.airport WHERE \"\n sameCase = partialAirportName == partialAirportName.lower() or partialAirportName == partialAirportName.upper() #bool\n\n if sameCase and len(partialAirportName) == 3:\n queryPrep += \"faa=$1\"\n queryArgs = [partialAirportName.upper()]\n elif sameCase and len(partialAirportName) == 4:\n queryPrep += \"icao=$1\"\n queryArgs = [partialAirportName.upper()]\n else:\n queryPrep += \"POSITION(LOWER(airportname), $1) = 0\"\n queryArgs = [partialAirportName.lower()]\n\n results = cluster.query(queryPrep, *queryArgs)\n airports = [x for x in results]\n\n context = [queryType + queryPrep]\n\n response = make_response(jsonify({\"data\": airports, \"context\": context}))\n return response\n# end::airports-endpoint[]\n# tag::flights-class[]\nclass FlightPathsView(SwaggerView):\n \"\"\" FlightPath class for computed flights between two airports FAA codes\"\"\"\n\n @api.route('/flightPaths//', methods=['GET', 'OPTIONS'])\n @cross_origin(supports_credentials=True)\n def flightPaths(fromLoc, toLoc):\n \"\"\"\n Return flights information, cost and more for a given flight time and date\n ...\n \"\"\"\n# end::flights-class[]\n# tag::flights-first-query[]\n queryType = \"SQL++ query - scoped to inventory: \"\n context = []\n\n faaQueryPrep = \"SELECT faa as fromAirport FROM `travel-sample`.inventory.airport \\\n WHERE airportname = $1 \\\n UNION SELECT faa as toAirport FROM `travel-sample`.inventory.airport \\\n WHERE airportname = $2\"\n \n faaResults = cluster.query(faaQueryPrep, fromLoc, toLoc)\n\n flightPathDict = {}\n for result in faaResults:\n flightPathDict.update(result)\n\n queryFrom = flightPathDict['fromAirport']\n queryTo = flightPathDict['toAirport']\n\n context.append(queryType + faaQueryPrep)\n\n# end::flights-first-query[]\n# tag::flights-second-query[]\n routeQueryPrep = \"SELECT a.name, s.flight, s.utc, r.sourceairport, r.destinationairport, r.equipment \\\n FROM `travel-sample`.inventory.route AS r \\\n UNNEST r.schedule AS s \\\n JOIN `travel-sample`.inventory.airline AS a ON KEYS r.airlineid \\\n WHERE r.sourceairport = $fromfaa AND r.destinationairport = $tofaa AND s.day = $dayofweek \\\n ORDER BY a.name ASC;\"\n\n flightDay = convdate(request.args['leave'])\n routeResults = cluster.query(routeQueryPrep, \n fromfaa=queryFrom, \n tofaa=queryTo, \n dayofweek=flightDay)\n\n routesList = []\n for route in routeResults:\n route['price'] = math.ceil(random() * 500) + 250\n routesList.append(route)\n\n context.append(queryType + routeQueryPrep)\n\n response = make_response(jsonify({\"data\": routesList, \"context\": context}))\n return response\n\n# end::flights-second-query[]\n\n# tag::user-class-def[]\nclass TenantUserView(SwaggerView):\n \"\"\"Class for storing user related information for a given tenant\"\"\"\n# end::user-class-def[]\n# tag::login-def[]\n @api.route('/tenants//user/login', methods=['POST', 'OPTIONS'])\n @cross_origin(supports_credentials=True)\n def login(tenant):\n \"\"\"Login an existing user for a given tenant agent\n ...\n \"\"\"\n# end::login-def[]\n# tag::login-code[]\n requestBody = request.get_json()\n user = requestBody['user']\n providedPassword = requestBody['password']\n\n userDocumentKey = lowercase(user)\n\n agent = lowercase(tenant)\n scope = bucket.scope(agent)\n users = scope.collection('users')\n\n queryType = f\"KV get - scoped to {scope.name}.users: for password field in document \"\n\n try:\n documentPassword = users.lookup_in(userDocumentKey, (\n SD.get('password'),\n )).content_as[str](0)\n\n if documentPassword != providedPassword:\n return abortmsg(401, \"Password does not match\")\n\n except DocumentNotFoundException:\n print(f\"User {user} item does not exist\", flush=True)\n except AmbiguousTimeoutException or UnAmbiguousTimeoutException:\n print(\"Request timed out - has Couchbase stopped running?\", flush=True)\n else:\n return jsonify({'data': {'token': genToken(user)}, 'context': [queryType + user]})\n\n return abortmsg(401, \"Failed to get user data\")\n# end::login-code[]\n# tag::signup-def[]\n @api.route('/tenants//user/signup', methods=['POST', 'OPTIONS'])\n @cross_origin(supports_credentials=True)\n def signup(tenant):\n \"\"\"Signup a new user\n ...\n \"\"\"\n# end::signup-def[]\n# tag::signup-code[]\n requestBody = request.get_json()\n user = requestBody['user']\n password = requestBody['password']\n\n userDocumentKey = lowercase(user)\n\n agent = lowercase(tenant)\n scope = bucket.scope(agent)\n users = scope.collection('users')\n\n queryType = f\"KV insert - scoped to {scope.name}.users: document \"\n\n try:\n users.insert(userDocumentKey, {'username': user, 'password': password})\n responseJSON = jsonify(\n {'data': {'token': genToken(user)}, 'context': [queryType + user]})\n response = make_response(responseJSON)\n return response, 201\n\n except DocumentExistsException:\n print(f\"User {user} item already exists\", flush=True)\n return abortmsg(409, \"User already exists\")\n except Exception as e:\n print(e)\n return abortmsg(500, \"Failed to save user\", flush=True)\n# end::signup-code[]\n# tag::view-flight-get-keys[]\n @api.route('/tenants//user//flights', methods=['GET', 'OPTIONS'])\n @cross_origin(supports_credentials=True)\n def getflights(tenant, username):\n \"\"\"List the flights that have been reserved by a user\n ...\n \"\"\"\n agent = lowercase(tenant)\n\n scope = bucket.scope(agent)\n users = scope.collection('users')\n flights = scope.collection('bookings')\n\n # HTTP token authentication\n bearer = request.headers['Authorization']\n if not auth(bearer, username):\n return abortmsg(401, 'Username does not match token username: ' + username)\n \n try:\n userDocumentKey = lowercase(username)\n\n lookupResult = users.lookup_in(\n userDocumentKey,\n [\n SD.get('bookings'),\n SD.exists('bookings')\n ])\n \n bookedFlightKeys = []\n if lookupResult.exists(1):\n bookedFlightKeys = lookupResult.content_as[list](0)\n# end::view-flight-get-keys[]\n# tag::view-flight-get-details[]\n rows = []\n for key in bookedFlightKeys:\n rows.append(flights.get(key).content_as[dict])\n\n queryType = f\"KV get - scoped to {scope.name}.users: for {len(bookedFlightKeys)} bookings in document \"\n response = make_response(jsonify({\"data\": rows, \"context\": [queryType + userDocumentKey]}))\n return response\n \n except DocumentNotFoundException:\n return abortmsg(401, \"User does not exist\")\n# end::view-flight-get-details[]\n\n# tag::booking-doc[]\n @api.route('/tenants//user//flights', methods=['PUT', 'OPTIONS'])\n @cross_origin(supports_credentials=True)\n def updateflights(tenant, username):\n \"\"\"Book a new flight for a user\n ...\n \"\"\"\n agent = lowercase(tenant)\n user = lowercase(username)\n\n scope = bucket.scope(agent)\n users = scope.collection('users')\n bookings = scope.collection('bookings')\n\n queryType = f\"KV update - scoped to {scope.name}.users: for bookings field in document \"\n\n # HTTP token authentication\n bearer = request.headers['Authorization']\n if not auth(bearer, username):\n return abortmsg(401, 'Username does not match token username: ' + username)\n\n try:\n flightData = request.get_json()['flights'][0]\n flightID = str(uuid.uuid4())\n bookings.upsert(flightID, flightData)\n\n except Exception as e:\n print(e, flush=True)\n return abortmsg(500, \"Failed to add flight data\")\n# end::booking-doc[]\n# tag::update-user[]\n try:\n users.mutate_in(user, (SD.array_append('bookings', flightID, create_parents=True),))\n resultJSON = {'data': {'added': [flightData]},\n 'context': [queryType + user]}\n return make_response(jsonify(resultJSON))\n \n except DocumentNotFoundException:\n return abortmsg(401, \"User does not exist\")\n except Exception:\n return abortmsg(500, \"Couldn't update flights\")\n# end::update-user[]\n\nclass HotelView(SwaggerView):\n \"\"\"Class for storing Hotel search related information\"\"\"\n# tag::search-query[]\n @api.route('/hotels///', methods=['GET'])\n @cross_origin(supports_credentials=True)\n def hotels(description, location):\n # Requires FTS index called 'hotels-index'\n \"\"\"Find hotels using full text search\n ...\n \"\"\"\n queryPrep = FT.ConjunctionQuery()\n if location != '*' and location != \"\":\n queryPrep.conjuncts.append(\n FT.DisjunctionQuery(\n FT.MatchPhraseQuery(location, field='country'),\n FT.MatchPhraseQuery(location, field='city'),\n FT.MatchPhraseQuery(location, field='state'),\n FT.MatchPhraseQuery(location, field='address')\n ))\n\n if description != '*' and description != \"\":\n queryPrep.conjuncts.append(\n FT.DisjunctionQuery(\n FT.MatchPhraseQuery(description, field='description'),\n FT.MatchPhraseQuery(description, field='name')\n ))\n\n # Attempting to run a compound query with no sub-queries will result in\n # a 'NoChildrenException'.\n\n if len(queryPrep.conjuncts) == 0:\n queryType = \"FTS search rejected - no search terms were provided\"\n response = {'data': [], 'context': [queryType]}\n return jsonify(response)\n\n searchRows = cluster.search_query('hotels-index', \n queryPrep, \n SearchOptions(limit=100))\n# end::search-query[]\n# tag::search-subdoc[]\n allResults = []\n addressFields = ['address', 'city', 'state', 'country']\n dataFields = ['name', 'description']\n\n scope = bucket.scope('inventory')\n hotel_collection = scope.collection('hotel')\n\n for hotel in searchRows:\n\n hotelFields = hotel_collection.lookup_in(\n hotel.id, [SD.get(x) for x in [*addressFields, *dataFields]])\n\n # Concatenates the first 4 fields to form the address. \n hotelAddress = []\n for x in range(len(addressFields)):\n try:\n hotelAddress.append(hotelFields.content_as[str](x))\n except DocumentNotFoundException:\n pass\n hotelAddress = ', '.join(hotelAddress)\n\n hotelData = {}\n for x, field in enumerate(dataFields):\n try: \n hotelData[field] = hotelFields.content_as[str](x+len(addressFields))\n except DocumentNotFoundException:\n pass\n \n hotelData['address'] = hotelAddress\n allResults.append(hotelData)\n\n queryType = f\"FTS search - scoped to: {scope.name}.hotel within fields {','.join([*addressFields, *dataFields])}\"\n response = {'data': allResults, 'context': [queryType]}\n return jsonify(response)\n# end::search-subdoc[]\n\n\ndef abortmsg(code, message):\n response = jsonify({'message': message})\n response.status_code = code\n return response\n\n\ndef convdate(rawdate):\n \"\"\"Returns integer data from mm/dd/YYYY\"\"\"\n day = datetime.strptime(rawdate, '%m/%d/%Y')\n return day.weekday()\n\n\ndef genToken(username):\n return jwt.encode({'user': username}, JWT_SECRET, algorithm='HS256').decode(\"ascii\")\n\n\ndef auth(bearerHeader, username):\n bearer = bearerHeader.split(\" \")[1]\n return username == jwt.decode(bearer, JWT_SECRET)['user']\n\n# tag::connect[]\ndef connect_db():\n print(CONNSTR, authenticator)\n cluster = Cluster(CONNSTR, ClusterOptions(authenticator))\n bucket = cluster.bucket('travel-sample')\n return cluster, bucket\n\n# end::connect[]\n# tag::start-app[]\nif __name__ == \"__main__\":\n cluster, bucket = connect_db()\n app.register_blueprint(api, url_prefix=\"/api\")\n swagger = Swagger(app, template=swagger_template)\n app.run(debug=True, host='0.0.0.0', port=8080, threaded=False)\n# end::start-app[]","repo_name":"couchbase/docs-sdk-python","sub_path":"modules/hello-world/examples/sample-app.py","file_name":"sample-app.py","file_ext":"py","file_size_in_byte":16724,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"31308343510","text":"import os\nfrom mlagents.trainers.exception import UnityTrainerException\nfrom mlagents.trainers.settings import TrainerSettings\nfrom mlagents.trainers.model_saver.torch_model_saver import DEFAULT_CHECKPOINT_NAME\n\n\ndef validate_existing_directories(\n output_path: str, resume: bool, force: bool, init_path: str = None\n) -> None:\n \"\"\"\n Validates that if the run_id model exists, we do not overwrite it unless --force is specified.\n Throws an exception if resume isn't specified and run_id exists. Throws an exception\n if --resume is specified and run-id was not found.\n :param model_path: The model path specified.\n :param summary_path: The summary path to be used.\n :param resume: Whether or not the --resume flag was passed.\n :param force: Whether or not the --force flag was passed.\n :param init_path: Path to run-id dir to initialize from\n \"\"\"\n\n output_path_exists = os.path.isdir(output_path)\n\n if output_path_exists:\n if not resume and not force:\n raise UnityTrainerException(\n \"Previous data from this run ID was found. \"\n \"Either specify a new run ID, use --resume to resume this run, \"\n \"or use the --force parameter to overwrite existing data.\"\n )\n else:\n if resume:\n raise UnityTrainerException(\n \"Previous data from this run ID was not found. \"\n \"Train a new run by removing the --resume flag.\"\n )\n\n # Verify init path if specified.\n if init_path is not None:\n if not os.path.isdir(init_path):\n raise UnityTrainerException(\n \"Could not initialize from {}. \"\n \"Make sure models have already been saved with that run ID.\".format(\n init_path\n )\n )\n\n\ndef setup_init_path(\n behaviors: TrainerSettings.DefaultTrainerDict, init_dir: str\n) -> None:\n \"\"\"\n For each behavior, setup full init_path to checkpoint file to initialize policy from\n :param behaviors: mapping from behavior_name to TrainerSettings\n :param init_dir: Path to run-id dir to initialize from\n \"\"\"\n for behavior_name, ts in behaviors.items():\n if ts.init_path is None:\n # set default if None\n ts.init_path = os.path.join(\n init_dir, behavior_name, DEFAULT_CHECKPOINT_NAME\n )\n elif not os.path.dirname(ts.init_path):\n # update to full path if just the file name\n ts.init_path = os.path.join(init_dir, behavior_name, ts.init_path)\n _validate_init_full_path(ts.init_path)\n\n\ndef _validate_init_full_path(init_file: str) -> None:\n \"\"\"\n Validate initialization path to be a .pt file\n :param init_file: full path to initialization checkpoint file\n \"\"\"\n if not (os.path.isfile(init_file) and init_file.endswith(\".pt\")):\n raise UnityTrainerException(\n f\"Could not initialize from {init_file}. file does not exists or is not a `.pt` file\"\n )\n","repo_name":"Unity-Technologies/ml-agents","sub_path":"ml-agents/mlagents/trainers/directory_utils.py","file_name":"directory_utils.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":15647,"dataset":"github-code","pt":"48"} +{"seq_id":"13750460792","text":"import sys\n\nv = {}\n\ndef interpret_command(line):\n c = line.split()\n if c[0] == 'define':\n v[c[2]] = int(c[1])\n else:\n if c[1] not in v or c[3] not in v:\n print('undefined')\n return\n\n a = v[c[1]]\n b = v[c[3]]\n print('true' if (a == b and c[2] == '=') or (a < b and c[2] == '<') or (a > b and c[2] == '>') else 'false')\n\ndef main():\n for line in sys.stdin:\n interpret_command(line)\n\nmain()\n","repo_name":"kevinbc0/online-judge","sub_path":"kattis/metaprogramming.py","file_name":"metaprogramming.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"18283215700","text":"#<=============================== Imports ===================================>#\nimport sys\nimport queue\nimport operator\nimport numpy as np\nfrom os import system\nfrom time import time \nfrom datetime import datetime\n\n#<========================= Puzzle Class Definition =========================>#\nclass Puzzle:\n\n # One can pass in an explicit grid as a list of lists (or matrix) OR a grid size. Second option will randomly generate grid.\n def __init__(self, size_grid=None, grid=None, test_type='') -> None:\n \n self.file_name = 'nodePath_'+ test_type + '.txt'\n\n # If explicit grid is given... \n if grid is not None:\n # Cast grid as np.array and define size\n self.grid = np.asarray(grid, dtype=np.uint8)\n self.size_grid = self.grid.shape[0]\n else:\n # If a size is give, randomly generate a grid of that size\n self.size_grid = size_grid\n nums = list(range(0, size_grid**2))\n self.grid = np.zeros((size_grid, size_grid), dtype=np.uint8)\n for row in range(size_grid):\n for col in range(size_grid):\n num = np.random.choice(nums)\n nums.remove(num)\n self.grid[row, col] = num\n\n self.depth_counter = 0 # Counts depth of BFS\n self.orig_parent = Puzzle.grid2str(self.grid) # Define the original parent node as string\n self.state_dict = {self.orig_parent: {\"count\": self.depth_counter}} # Define dictionary of all states. All other keys\n # will contain parent node and depth\n self.current_set = queue.Queue() # Queue of states to inspect \n self.current_set.put_nowait(self.orig_parent)\n\n # Generate a string representation of the winning state\n winning_state = list(range(1,self.size_grid**2))\n winning_state.append(0)\n self.winning_state = Puzzle.list2str(winning_state, sep=\" \")\n\n # Static method for converting a list to its corresponding string representation\n @staticmethod\n def list2str(list_, sep=\"\"):\n str_ = \"\"\n add_sep = False\n for val in list_:\n if not add_sep:\n str_ += str(val)\n add_sep = True\n else:\n str_ += sep + str(val)\n return str_\n\n # Static method for converting grid into its corresponding string reprentation\n @staticmethod\n def grid2str(grid_):\n return Puzzle.list2str(grid_.flatten(), sep=\" \")\n\n # Method for converting string representation back into grid\n def str2grid(self, str_):\n nums = np.asarray([int(s) for s in str_.split(' ')], dtype=np.uint8)\n return nums.reshape((self.size_grid, self.size_grid))\n\n # Write information to file\n def write_str_to_file(self, string, start='', end='\\n', overwrite=False):\n write_type = None\n if overwrite:\n write_type = 'w'\n else:\n write_type = 'a'\n with open(self.file_name, write_type) as writer:\n writer.write(start + str(string) + end)\n\n # Utility method for printing out grid\n def print(self, grid_str=None, to_file=True):\n grid = []\n if grid_str == None:\n grid = self.grid\n else:\n grid = self.str2grid(grid_str)\n\n # Print grid to file\n if to_file:\n for row in grid:\n for col in row:\n end = None\n if len(str(col)) > 1:\n start = ''\n else:\n start = ' '\n self.write_str_to_file(col, start=start, end=' ')\n self.write_str_to_file(\"\")\n self.write_str_to_file(\"\")\n \n # Print grid to terminal\n else:\n for row in grid:\n for col in row:\n print(col, end=\" \")\n print(\"\")\n\n # Switches string elements to create new state\n def switch_elements_in_str(self, str_, pos1, pos2):\n arr = str_.split(' ')\n str1_index = self.size_grid*pos1[0] + pos1[1]\n str2_index = self.size_grid*pos2[0] + pos2[1]\n temp = arr[str1_index]\n arr[str1_index] = arr[str2_index]\n arr[str2_index] = temp\n return Puzzle.list2str(arr, sep=\" \")\n\n # Gets next branch in the given direction\n def get_next_branch(self, parent, zero_pos, from_dir):\n # Get the new position using tuple addition\n pos = tuple(map(operator.add, zero_pos, (from_dir)))\n\n # Check if move is legal and generate that new state\n if (pos[0] < self.size_grid and pos[1] < self.size_grid and pos[0] > -1 and pos[1] > -1):\n mat_str = self.switch_elements_in_str(parent, pos, zero_pos)\n \n # If that corresponds to the winning state, return True\n if mat_str == self.winning_state:\n self.state_dict[mat_str] = {\"parent\": parent, \"count\": self.depth_counter}\n return True\n \n # Check if the current state is defined.\n if self.state_dict.get(mat_str) == None:\n # If not, add it to the dictionary along with its parent, count info then add it to queue\n self.state_dict[mat_str] = {\"parent\": parent, \"count\": self.depth_counter}\n self.current_set.put_nowait(mat_str)\n return False\n\n # Check all directions for a given 0 position\n def fill_void(self, parent):\n # Calculate 0 pos\n zero_pos_str = parent.split(\" \").index('0')\n zero_pos = (zero_pos_str//self.size_grid, zero_pos_str%self.size_grid)\n\n # Check all directions\n down = self.get_next_branch(parent, zero_pos, (1, 0))\n up = self.get_next_branch(parent, zero_pos, (-1, 0))\n right = self.get_next_branch(parent, zero_pos, (0, 1))\n left = self.get_next_branch(parent, zero_pos, (0, -1))\n\n # If any of these are true, we have reached the goal state. Propogate the true to the next level\n if down or up or right or left:\n return True\n return False\n \n # Search the current current queue and generate all depth levels\n def find_path(self):\n self.solution_found = False\n try:\n while True:\n self.depth_counter += 1\n if self.current_set.empty():\n break\n parent = self.current_set.get_nowait()\n if self.fill_void(parent):\n self.solution_found = True\n break\n \n if self.solution_found:\n # Go up the tree to find all parent nodes and optimized path\n next_set = self.state_dict[self.winning_state]\n path = [self.winning_state]\n try:\n while True:\n next_set_ind = next_set['parent']\n next_set = self.state_dict[next_set_ind]\n path.append(next_set_ind)\n except KeyError:\n pass\n\n # Print full move set\n path.reverse()\n self.path = path\n \n except KeyboardInterrupt:\n print(\"Keyboard Interrupt\")\n\n# Runs algorithm and prints results to terminal and screen \ndef run_case(case, overwrite=False, custom=None, disp_file=True):\n puzzle = None\n\n # Custom puzzle\n if custom != None:\n puzzle = Puzzle(grid=custom, test_type='custom') # This method takes in a puzzle directly and solves\n print(\"--------------------------- Custom Case ---------------------------\")\n puzzle.write_str_to_file(\"--------------------------- Custom Case ---------------------------\", overwrite=overwrite)\n\n # Random Puzzle\n elif len(case) > 1:\n case = case[-1]\n puzzle = Puzzle(size_grid=int(case), test_type='random') # This method takes in a puzzle size and randomly generates a puzzle\n print(\"--------------------------- Random Case \"+case+\"x\"+case+\" ---------------------------\")\n puzzle.write_str_to_file(\"--------------------------- Random Case \"+case+\"x\"+case+\" ---------------------------\", overwrite=overwrite)\n\n # Pre-determined test cases\n else:\n test_case = {'1': [[1, 2, 3, 4],[ 5, 6, 0, 8], [9, 10, 7, 12], [13, 14, 11, 15]],\n '2': [[1, 0, 3, 4],[ 5, 2, 7, 8], [9, 6, 10, 11], [13, 14, 15, 12]],\n '3': [[0, 2, 3, 4],[ 1, 5, 7, 8], [9, 6, 11, 12], [13, 10, 14, 15]],\n '4': [[5, 1, 2, 3],[ 0, 6, 7, 4], [9, 10, 11, 8], [13, 14, 15, 12]],\n '5': [[1, 6, 2, 3],[ 9, 5, 7, 4], [0, 10, 11, 8], [13, 14, 15, 12]]}\n\n puzzle = Puzzle(grid=test_case[case], test_type='test_cases') \n print(\"\\n--------------------------- Test Case \"+case+\" ---------------------------\")\n puzzle.write_str_to_file(\"--------------------------- Test Case \"+case+\" ---------------------------\", overwrite=overwrite)\n \n puzzle.print()\n print(\"\")\n\n start_time = time() # Time the BFS algorithm\n\n # Run Algorithm\n puzzle.find_path() \n \n # Calculate time\n time_elapsed_s = time() - start_time\n time_elapsed_mins = time_elapsed_s//60\n time_elapsed_hrs = time_elapsed_s//60**2\n time_elapsed_secs = time_elapsed_s%60\n\n # Write info to file. \n # Solution found\n if puzzle.solution_found:\n for string in puzzle.path:\n puzzle.write_str_to_file(string)\n\n puzzle.write_str_to_file(f\"\\nThis puzzle can be solved in {len(puzzle.path)-1} operations\")\n\n print(f\"All contents written to file ./{puzzle.file_name}\")\n print(f\"This puzzle can be solved in {len(puzzle.path)-1} operations\")\n # No solution detected\n else:\n print(\"No solutions to current puzzle. Please select another puzzle.\")\n puzzle.write_str_to_file(\"No solutions to current puzzle. Please select another puzzle.\")\n\n puzzle.write_str_to_file(f\"The BFS algorithm took {time_elapsed_hrs} hrs, {time_elapsed_mins} mins, and {time_elapsed_secs} s to implement\", end='\\n\\n\\n')\n print(f\"The BFS algorithm took {time_elapsed_hrs} hrs, {time_elapsed_mins} mins, and {time_elapsed_secs} s to implement\")\n\n if disp_file:\n system(puzzle.file_name)\n\n#<========================= Main =========================>#\nif __name__ == '__main__':\n # system('cls')\n\n # Command line parsing\n \n # Run all 5 given cases\n if len(sys.argv) == 1:\n overwrite = True\n for case in range(1,5):\n if overwrite:\n run_case(str(case), overwrite=True, disp_file=False)\n overwrite = False\n else:\n run_case(str(case), disp_file=False)\n run_case('5')\n\n # Run custom grid\n elif sys.argv[1] == 'custom':\n custom_list = [[1, 2, 3, 4],[ 5, 6, 0, 8], [9, 10, 7, 12], [13, 14, 11, 15]]\n run_case(None, custom=custom_list, overwrite=True)\n\n # Run individual case\n elif len(sys.argv) == 2:\n case = sys.argv[1]\n run_case(case, overwrite=True)\n \n # Run random grid\n elif len(sys.argv) == 3:\n case = sys.argv[1] + sys.argv[2]\n run_case(case, overwrite=True)","repo_name":"dlerner97/path_planning_puzzle_solve","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4248331549","text":"from csv_projects_funcs import csvToDictsList\r\nimport random\r\n\r\n\r\ndict_list = csvToDictsList('transaction_data.csv')\r\n\r\nprint(\"Transactions: \")\r\nprint()\r\nfor transaction in dict_list:\r\n for key, value in transaction.items():\r\n print(key, value)\r\nprint()\r\n\r\ntotal_spent = 0\r\ncounter = 0\r\ndates = []\r\nfor i in dict_list:\r\n for l in i:\r\n dates.append(l)\r\n total_spent += float(i[l][\"amount\"])\r\n counter += 1\r\n\r\nprint(f\" Total Amount: {total_spent}\\n Average Amount: {total_spent/counter}\\n Total Transactions: {counter}\")\r\nprint()\r\n\r\nrandom_transaction = random.choice(dates) \r\n\r\nfor i in dict_list:\r\n for key,value in i.items():\r\n if key == random_transaction:\r\n print(f\" On this Day {random_transaction} {value}\")\r\n \r\n","repo_name":"illumi420/dci","sub_path":"Python_csv_projects/project2_transactions.py","file_name":"project2_transactions.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40773684311","text":"# project: a socket server running on the huzzah ESP8266\n# and a client running on Mac/client\n# 2017-0716 source: https://forums.adafruit.com/viewtopic.php?f=60&t=117874\nimport socket\n\ndef Main():\n # creating server socket for communication I/O\n host = \"192.168.178.24\" # host ip\n #MAC-test: host = \"192.168.178.14\" #host ip-address\n\n port = 5000 #must be above 1024 to avoid conflict with core services\n mySocket = socket.socket()\n mySocket.bind((host, port))\n print('Server running on host: {0}, port: {1} '.format(host, port))\n\n mySocket.listen(1)\n conn, addr = mySocket.accept()\n print('Connection from: ' + str(addr))\n while True:\n data = conn.recv(1024).decode()\n if not data:\n break\n print('from connected user: ' + str(data))\n \n # parse user command\n data = str(data).upper()\n parse_command_echo(conn, data)\n\n conn.close()\n print('Connection closed')\n\n\n# default ECHO-service\ndef parse_command_echo(conn, data):\n print('parse_command_echo() - sending: ' + str(data) )\n conn.send(data.encode())\n\nif __name__ == '__main__':\n Main()\n","repo_name":"flashypepo/myMicropython-Examples","sub_path":"network/socket_server.py","file_name":"socket_server.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"74851315024","text":"import requests\nimport yaml\n\n\ndef get_posts(token, owner=\"Me\"):\n with open(\"config.yaml\") as f:\n data = yaml.safe_load(f)\n\n params = {\"owner\": owner}\n headers = {\"X-Auth-Token\": token}\n resource = requests.get(data[\"url_posts\"], headers=headers, params=params)\n return resource.json()\n","repo_name":"ikodzoev/web_test","sub_path":"HW_1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1518196892","text":"import os\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom keras import models\nimport numpy as np\nimport pandas as pd\nfrom ML.data_prep import data_prep, data_preprocess, actual_data\n\n\ndef model(material_num, cursor):\n df = data_prep(material_num, cursor=cursor)\n data, scaler = data_preprocess(df, material_num) # Add material_num for feed the model\n model = models.load_model(\"E:/Program/PKLProjekt/ML/model.h5\")\n\n # Prediction for data_1\n pred = []\n pred_batch = data[-30:]\n current_batch = pred_batch.reshape((1, 30, 1))\n for i in range(10):\n current_pred = model.predict(current_batch)[0]\n pred.append(current_pred)\n current_batch = np.append(current_batch[:, 1:, :], [[current_pred]], axis=1)\n\n # Rescaling of predicted value\n true_pred = scaler.inverse_transform(pred)\n\n # Saving the predicted value to a DataFrame and arrange the date then set it as index\n pred_df = pd.DataFrame()\n pred_df['date'] = pd.date_range(start='2021-06-01', periods=10, freq='D')\n pred_df = pred_df.set_index('date')\n pred_df['sale_qty'] = true_pred.astype(np.int64)\n pred_df = pred_df.reset_index()\n\n actual_df = actual_data(df, material_num)\n actual_df = actual_df.reset_index()\n material_desc = df['material_desc'][0]\n\n return pred_df, actual_df, material_desc\n\n\n\n","repo_name":"SoF4rAway/pklprojekt","sub_path":"ML/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7033544164","text":"from .. import util\n\n\nclass Library(object):\n \"\"\"A library (like gdk in gtk+)\"\"\"\n\n def __init__(self, namespace, url, doc_id):\n self.namespace = util.VersionedNamespace(namespace)\n self.url = url\n self.doc_id = doc_id\n\n @property\n def devhelp_url(self):\n \"\"\"URL pointing to the devhelp2 index\"\"\"\n\n return \"%s%s.devhelp2\" % (self.url, self.doc_id)\n\n @util.cached_property\n def version(self):\n \"\"\"Library version as a string\"\"\"\n\n module = util.import_namespace(\n self.namespace.name, self.namespace.version)\n return _get_library_version(module)\n\n @classmethod\n def for_namespace(cls, namespace, version):\n key = namespace + \"-\" + version\n for lib in LIBRARIES:\n if lib.namespace == key:\n return lib\n return cls(key, \"\", \"\")\n\n def __repr__(self):\n return \"<%s namespace=%r url=%r doc_id=%r>\" % (\n type(self).__name__, self.namespace, self.url, self.doc_id)\n\n\nLIBRARIES = [\n Library(\"GLib-2.0\", \"https://developer.gnome.org/glib/stable/\", \"glib\"),\n Library(\"Gio-2.0\", \"https://developer.gnome.org/gio/stable/\", \"gio\"),\n Library(\"GObject-2.0\", \"https://developer.gnome.org/gobject/stable/\", \"gobject\"),\n Library(\"Pango-1.0\", \"https://developer.gnome.org/pango/stable/\", \"pango\"),\n Library(\"GdkPixbuf-2.0\", \"https://developer.gnome.org/gdk-pixbuf/unstable/\", \"gdk-pixbuf\"),\n Library(\"Gdk-3.0\", \"https://developer.gnome.org/gdk3/stable/\", \"gdk3\"),\n Library(\"Gtk-3.0\", \"https://developer.gnome.org/gtk3/stable/\", \"gtk3\"),\n Library(\"WebKit2-4.0\", \"https://webkitgtk.org/reference/webkit2gtk/stable/\", \"webkit2gtk-4.0\"),\n Library(\"cairo-1.0\", \"https://developer.gnome.org/cairo/stable/\", \"cairo\"),\n Library(\"Clutter-1.0\", \"https://developer.gnome.org/clutter/stable/\", \"clutter\"),\n Library(\"Gst-1.0\", \"https://gstreamer.freedesktop.org/data/doc/gstreamer/head/gstreamer/html/\", \"gstreamer-1.0\"),\n Library(\"GES-1.0\", \"https://gstreamer.freedesktop.org/data/doc/gstreamer/head/gstreamer-editing-services/html/\", \"ges-1.0\"),\n Library(\"UDisks-2.0\", \"https://storaged.org/doc/udisks2-api/latest/\", \"udisks2\"),\n]\n\n\ndef _get_generic_library_version(mod):\n \"\"\"Tries to return a version string of the library version used to create\n the gir or if not available the version of the library dlopened.\n\n If no version could be found, returns an empty string.\n \"\"\"\n\n suffix = \"\"\n modname = mod.__name__\n for i, (o, l) in enumerate(reversed(list(zip(modname, modname.lower())))):\n if o != l:\n suffix = modname[-i - 1:].upper()\n break\n\n const_version = []\n for name in [\"MAJOR\", \"MINOR\", \"MICRO\", \"NANO\"]:\n for variant in [\"VERSION_\" + name, name + \"_VERSION\",\n suffix + \"_\" + name, suffix + \"_\" + name + \"_VERSION\",\n suffix + \"_VERSION_\" + name]:\n if hasattr(mod, variant):\n value = int(getattr(mod, variant))\n const_version.append(value)\n\n if const_version:\n return \".\".join(map(str, const_version))\n\n func_version = \"\"\n for name in [\"get_version\", \"version\", \"util_get_version\",\n \"util_get_version_string\", \"get_version_string\",\n \"version_string\"]:\n if hasattr(mod, name):\n try:\n value = getattr(mod, name)()\n except TypeError:\n continue\n\n if isinstance(value, (tuple, list)):\n func_version = \".\".join(map(str, value))\n break\n elif isinstance(value, str):\n func_version = value\n\n return func_version\n\n\ndef _get_library_version(mod):\n \"\"\"Returns a library version as string for a given Python module. In\n case no version is found returns an empty string.\n\n As there is no standard way to retrieve the version of the shared lib\n this might fail or return wrong info.\n \"\"\"\n\n mod_name = mod.__name__\n version = \"\"\n\n if mod_name == \"GstPbutils\":\n t = [mod.PLUGINS_BASE_VERSION_MAJOR, mod.PLUGINS_BASE_VERSION_MINOR,\n mod.PLUGINS_BASE_VERSION_MICRO, mod.PLUGINS_BASE_VERSION_NANO]\n return \".\".join(map(str, t))\n\n version = _get_generic_library_version(mod)\n\n # some cleanup\n version = version.rstrip(\".\")\n version = version.split(\"-\", 1)[0]\n\n return version\n","repo_name":"pygobject/pgi-docgen","sub_path":"pgidocgen/girdata/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":4419,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"48"} +{"seq_id":"42726148105","text":"import argparse\nimport sqlite3\nimport csv\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nconn = sqlite3.connect('/users/bclark66/sp_data_for_tempo_test2.db')\nc = conn.cursor()\n\n \nsql3 = \"\"\" select t.track_id,tb.start as bar_start,tb.duration as bar_duration, \n t.year as release_date,t.popularity,t.category,popularity as artist_popularity\n from track t left outer join \n track_bar tb on tb.track_id = t.track_id \n order by t.track_id, bar_start\"\"\"\n\n\n#df1 = pd.read_sql(sql2,conn)\n#df1 = df1.rename(columns={'secstart':'start'})\n#df1['secname'] = df1.type + df1.start.astype(str)\n\n\ndf2 = pd.read_sql(sql3,conn)\ndf2 = df2.rename(columns={'bar_start':'start'})\n\n#df1 = df1.merge(df2,how='outer',left_on=['track_id','start','type'],right_on=['track_id','start','type']).reset_index(drop=True)\n#df1 = df1.sort_values(['track_id','start']).reset_index(drop=True)\n#df1['secname'].fillna(method='ffill',inplace=True)\ndf2['bar_mean'] = df2.groupby('track_id')['bar_duration'].transform('mean')\ndf2['bar_stdev'] = df2.groupby('track_id')['bar_duration'].transform('std')\ndf2['bar_stdev_normalized'] = df2['bar_stdev'] / df2['bar_mean']\ndf2['bar_max'] = df2.groupby('track_id')['bar_duration'].transform('max')\ndf2['bar_min'] = df2.groupby('track_id')['bar_duration'].transform('min')\ndf2['bar_diff'] = df2.groupby('track_id')['bar_duration'].diff()\ndf2['bar_diff'].fillna(0,inplace=True)\ndf2['bar_range'] = df2['bar_max'] - df2['bar_min']\ndf2['bar_range_normalized'] = df2['bar_range'] / df2['bar_mean']\ndf2['bar_diff_normalized'] = df2['bar_diff'] / df2['bar_mean']\ndf2['bar_diff_stdev'] = df2.groupby('track_id')['bar_diff'].transform('std')\ndf2['region_5pct'] = df2['bar_diff_normalized'].abs() > .05\ndf2['region_2pct'] = df2['bar_diff_normalized'].abs() > .025\ndf2['region_5pct'] = df2['region_5pct'].astype('int')\ndf2['region_2pct'] = df2['region_2pct'].astype('int')\ndf2['region_5pct_count'] = df2.groupby('track_id')['region_5pct'].transform('sum')\ndf2['region_2pct_count'] = df2.groupby('track_id')['region_2pct'].transform('sum')\ndf2['region_5pct_cumsum'] = df2.groupby('track_id')['region_5pct'].transform('cumsum')\ndf2['region_2pct_cumsum'] = df2.groupby('track_id')['region_2pct'].transform('cumsum')\ndf2['bar_count'] = df2.groupby('track_id')['region_5pct'].transform('count')\ndf2['region_5pct_bar_count'] = df2.groupby(['track_id','region_5pct_cumsum'])['region_5pct'].transform('count')\ndf2['region_2pct_bar_count'] = df2.groupby(['track_id','region_2pct_cumsum'])['region_5pct'].transform('count')\n#df2 = df2.groupby('track_id')['region_1pct'].apply(lambda x: (x == 'TRUE').sum()).reset_index(name='nbr_regions_1pct')\n#print(\"df2 \",df2[df2.bar_diff_normalized.gt(.05).groupby(df2.track_id).transform('any')])\n# indexnames = df2[df2['type'] == 'bar'].index\n# df2.drop(indexnames,inplace=True)\ndf2 = df2.drop(columns=['start','bar_duration','bar_diff','bar_diff_normalized','region_5pct','region_2pct'])\ndf2 = df2.drop_duplicates()\ndf2.to_csv('song_data_tempo_with_bars_onlyv2.csv')\n\n\n\n ","repo_name":"Computational-Cognitive-Musicology-Lab/tempo_stability","sub_path":"code/get_song_data_for_bar_only_level_tempo_analysis_v2.py","file_name":"get_song_data_for_bar_only_level_tempo_analysis_v2.py","file_ext":"py","file_size_in_byte":3060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37810759602","text":"\"\"\"\n다른 방식으로 graph 생성 가능\nfor _ in range(m):\n a, b = map(int, sys.stdin.readline().split())\n graph[a].append(b)\n graph[b].append(a)\n\"\"\"\n\nfrom collections import deque\nimport sys\n\ndef bfs(graph, v, visited):\n queue = deque([v])\n\n if visited[v] == 0: # 0 : 방문X, 1,2 : 색\n visited[v] = 1\n\n while queue:\n v = queue.popleft()\n\n color = visited[v]\n for i in graph[v]:\n if visited[i] == 0: # 아직 한번도 방문하지 않음\n queue.append(i)\n if color == 1: # 현재의 정점과 다른 색상으로 색칠\n visited[i] = 2\n else:\n visited[i] = 1\n elif visited[i] == 1 and color == 1:\n return False\n elif visited[i] == 2 and color == 2:\n return False\n return True\n\nfor _ in range(int(sys.stdin.readline())):\n n, m = map(int, sys.stdin.readline().split())\n\n graph = [[] for _ in range(n+1)]\n\n for _ in range(m):\n a, b = map(int, sys.stdin.readline().split())\n graph[a].append(b)\n graph[b].append(a)\n\n visited = [False] * (n+1)\n\n for i in range(1, n + 1):\n if not visited[i]: # 방문한 정점이 아니면, bfs 수행\n result = bfs(graph, i, visited)\n if not result:\n break\n\n print('YES' if result else 'NO')","repo_name":"KoKwanwun/Algorithm","sub_path":"Baekjoon/Gold/Gold 4/1707. 이분 그래프.py","file_name":"1707. 이분 그래프.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28936156316","text":"import argparse\nimport time\nimport numpy as np\nimport scipy\nimport scipy.optimize as opt\nfrom scipy.ndimage.filters import gaussian_filter\nimport torch\nfrom event_utils import *\nfrom objectives import *\nfrom warps import *\n\ndef draw_objective_function(xs, ys, ts, ps, objective, warpfunc, x_range=(-200, 200), y_range=(-200, 200),\n gt=(0,0), show_gt=True, resolution=20, img_size=(180, 240)):\n \"\"\"\n Draw the objective function given by sampling over a range. Depending on the value of resolution, this\n can involve many samples and take some time.\n Parameters:\n xs,ys,ts,ps (numpy array) The event components\n objective (object) The objective function\n warpfunc (object) The warp function\n x_range, y_range (tuple) the range over which to plot the parameters\n gt (tuple) The ground truth\n show_gt (bool) Whether to draw the ground truth in\n resolution (float) The resolution of the sampling\n img_size (tuple) The image sensor size\n \"\"\"\n width = x_range[1]-x_range[0]\n height = y_range[1]-y_range[0]\n print(\"Drawing objective function. Taking {} samples\".format((width*height)/resolution))\n imshape = (int(height/resolution+0.5), int(width/resolution+0.5))\n img = np.zeros(imshape)\n for x in range(img.shape[1]):\n for y in range(img.shape[0]):\n params = np.array([x*resolution+x_range[0], y*resolution+y_range[0]])\n img[y,x] = -objective.evaluate_function(params, xs, ys, ts, ps, warpfunc, img_size, blur_sigma=0)\n img = cv.normalize(img, None, 0, 1.0, cv.NORM_MINMAX)\n plt.imshow(img, interpolation='bilinear', cmap='viridis')\n plt.xticks([])\n plt.yticks([])\n if show_gt:\n xloc = ((gt[0]-x_range[0])/(width))*imshape[1]\n yloc = ((gt[1]-y_range[0])/(height))*imshape[0]\n plt.axhline(y=yloc, color='r', linestyle='--')\n plt.axvline(x=xloc, color='r', linestyle='--')\n plt.show()\n\ndef optimize_contrast(xs, ys, ts, ps, warp_function, objective, optimizer=opt.fmin_bfgs, x0=None,\n numeric_grads=False, blur_sigma=None, img_size=(180, 240)):\n \"\"\"\n Optimize contrast for a set of events\n Parameters:\n xs (numpy float array) The x components of the events\n ys (numpy float array) The y components of the events\n ts (numpy float array) The timestamps of the events. Timestamps should be ts-t[0] to avoid precision issues.\n ps (numpy float array) The polarities of the events\n warp_function (function) The function with which to warp the events\n objective (objective class object) The objective to optimize\n optimizer (function) The optimizer to use\n x0 (np array) The initial guess for optimization\n numeric_grads (bool) If true, use numeric derivatives, otherwise use analytic drivatives if available.\n Numeric grads tend to be more stable as they are a little less prone to noise and don't require as much\n tuning on the blurring parameter. However, they do make optimization slower.\n img_size (tuple) The size of the event camera sensor\n blur_sigma (float) Size of the blurring kernel. Blurring the images of warped events can\n have a large impact on the convergence of the optimization.\n\n Returns:\n The max arguments for the warp parameters wrt the objective\n \"\"\"\n args = (xs, ys, ts, ps, warp_function, img_size, blur_sigma)\n x0 = np.array([0,0])\n if x0 is None:\n x0 = np.zeros(warp_function.dims)\n if numeric_grads:\n argmax = optimizer(objective.evaluate_function, x0, args=args, epsilon=1, disp=False)\n else:\n argmax = optimizer(objective.evaluate_function, x0, fprime=objective.evaluate_gradient, args=args, disp=False)\n return argmax\n\ndef optimize(xs, ys, ts, ps, warp, obj, numeric_grads=True, img_size=(180, 240)):\n \"\"\"\n Optimize contrast for a set of events. Uses optimize_contrast() for the optimiziation, but allows\n blurring schedules for successive optimization iterations.\n Parameters:\n xs (numpy float array) The x components of the events\n ys (numpy float array) The y components of the events\n ts (numpy float array) The timestamps of the events. Timestamps should be ts-t[0] to avoid precision issues.\n ps (numpy float array) The polarities of the events\n warp (function) The function with which to warp the events\n obj (objective class object) The objective to optimize\n numeric_grads (bool) If true, use numeric derivatives, otherwise use analytic drivatives if available.\n Numeric grads tend to be more stable as they are a little less prone to noise and don't require as much\n tuning on the blurring parameter. However, they do make optimization slower.\n img_size (tuple) The size of the event camera sensor\n\n Returns:\n The max arguments for the warp parameters wrt the objective\n \"\"\"\n numeric_grads = numeric_grads if obj.has_derivative else True\n argmax_an = optimize_contrast(xs, ys, ts, ps, warp, obj, numeric_grads=numeric_grads, blur_sigma=blur, img_size=img_size)\n return argmax_an\n\ndef optimize_r2(xs, ys, ts, ps, warp, obj, numeric_grads=True, img_size=(180, 240)):\n \"\"\"\n Optimize contrast for a set of events, finishing with SoE loss.\n Parameters:\n xs (numpy float array) The x components of the events\n ys (numpy float array) The y components of the events\n ts (numpy float array) The timestamps of the events. Timestamps should be ts-t[0] to avoid precision issues.\n ps (numpy float array) The polarities of the events\n warp (function) The function with which to warp the events\n obj (objective class object) The objective to optimize\n numeric_grads (bool) If true, use numeric derivatives, otherwise use analytic drivatives if available.\n Numeric grads tend to be more stable as they are a little less prone to noise and don't require as much\n tuning on the blurring parameter. However, they do make optimization slower.\n img_size (tuple) The size of the event camera sensor\n\n Returns:\n The max arguments for the warp parameters wrt the objective\n \"\"\"\n soe_obj = soe_objective()\n numeric_grads = numeric_grads if obj.has_derivative else True\n argmax_an = optimize_contrast(xs, ys, ts, ps, warp, obj, numeric_grads=numeric_grads, blur_sigma=None)\n argmax_an = optimize_contrast(xs, ys, ts, ps, warp, soe_obj, x0=argmax_an, numeric_grads=numeric_grads, blur_sigma=1.0)\n return argmax_an\n\nif __name__ == \"__main__\":\n \"\"\"\n Quick demo of various objectives.\n Args:\n path Path to h5 file with event data\n gt Ground truth optic flow for event slice\n img_size The size of the event camera sensor\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"path\", help=\"h5 events path\")\n parser.add_argument(\"--gt\", nargs='+', type=float, default=(0,0))\n parser.add_argument(\"--img_size\", nargs='+', type=float, default=(180,240))\n args = parser.parse_args()\n\n xs, ys, ts, ps = read_h5_event_components(args.path)\n ts = ts-ts[0]\n gt_params = tuple(args.gt)\n img_size=tuple(args.img_size)\n\n start_idx = 20000\n end_idx=start_idx+15000\n blur = None\n\n draw_objective_function(xs[start_idx:end_idx], ys[start_idx:end_idx], ts[start_idx:end_idx], ps[start_idx:end_idx], variance_objective(), linvel_warp())\n\n objectives = [r1_objective(), zhu_timestamp_objective(), variance_objective(), sos_objective(), soe_objective(), moa_objective(),\n isoa_objective(), sosa_objective(), rms_objective()]\n warp = linvel_warp()\n for obj in objectives:\n argmax = optimize(xs[start_idx:end_idx], ys[start_idx:end_idx], ts[start_idx:end_idx], ps[start_idx:end_idx], warp, obj, numeric_grads=True)\n loss = obj.evaluate_function(argmax, xs[start_idx:end_idx], ys[start_idx:end_idx], ts[start_idx:end_idx],\n ps[start_idx:end_idx], warp, img_size=img_size)\n gtloss = obj.evaluate_function(gt_params, xs[start_idx:end_idx], ys[start_idx:end_idx],\n ts[start_idx:end_idx], ps[start_idx:end_idx], warp, img_size=img_size)\n print(\"{}:({})={}, gt={}\".format(obj.name, argmax, loss, gtloss))\n if obj.has_derivative:\n argmax = optimize(xs[start_idx:end_idx], ys[start_idx:end_idx], ts[start_idx:end_idx],\n ps[start_idx:end_idx], warp, obj, numeric_grads=False)\n loss_an = obj.evaluate_function(argmax, xs[start_idx:end_idx], ys[start_idx:end_idx],\n ts[start_idx:end_idx], ps[start_idx:end_idx], warp, img_size=img_size)\n print(\" analytical:{}={}\".format(argmax, loss_an))\n","repo_name":"TimoStoff/events_contrast_maximization","sub_path":"utils/events_cmax.py","file_name":"events_cmax.py","file_ext":"py","file_size_in_byte":8629,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"48"} +{"seq_id":"24799974650","text":"import cv2 as cv \nimport numpy as np \n\nimg = cv.imread(\"WindowsLogo.jpg\", cv.IMREAD_COLOR)\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\nsift = cv.xfeatures2d.SIFT_create()\nkp = sift.detect(gray, None)\nimg = cv.drawKeypoints(gray, kp, img, flags=cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\ncv.namedWindow(\"key points\", cv.WINDOW_NORMAL)\ncv.imshow(\"key points\", img)\ncv.waitKey(0)\ncv.destroyAllWindows()","repo_name":"weizy2018/learnopencv","sub_path":"sift.py","file_name":"sift.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2792990437","text":"'''\n이분 탐색 or 투포인터 문제\n\n'''\ndef BOJ2470(N,arr):\n # 맨 왼쪽, 오른쪽 정의\n left,right = 0,N - 1\n minV = float('inf')\n ans1,ans2 = 0,0\n while left < right:\n # tmp = 왼 + 오른의 합\n tmp = arr[left] + arr[right]\n # tmp가 minV보다 작으면 갱신해줌\n if minV > abs(tmp):\n ans1,ans2 = arr[left], arr[right]\n minV = abs(tmp)\n if abs(tmp) == 0:\n break\n # tmp가 0보다 크면 right값을 줄여 tmp를 0에 가깝게해야함\n if tmp > 0:\n right -=1\n else:\n left += 1\n\n print(ans1,ans2)\n\nN = int(input())\narr = sorted(list(map(int,input().split())))\nBOJ2470(N,arr)\n","repo_name":"silverjjj/algorithm","sub_path":"BOJ/2470.py","file_name":"2470.py","file_ext":"py","file_size_in_byte":732,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"344229704","text":"from typing import NewType, Callable, Any, Literal\n\nfrom lxml import etree\n\n# -----------------------------------------------------------------------------\n# Application types\n#\n# NOTE: Although Vibin wants to be fairly streamer and media server agnostic,\n# some of these types leak the types found in the StreamMagic and Asset\n# implementations. If other streamers or media servers were to be supported\n# then that would likely require a refactoring of many of these types.\n# -----------------------------------------------------------------------------\n\nMediaType = Literal[\"album\", \"track\", \"artist\"]\n\nMediaId = str # Local media server id (Album, Track, Artist)\n\nMediaMetadata = dict # Local media server metadata\n\nPowerState = Literal[\"on\", \"off\"]\n\nMuteState = Literal[\"on\", \"off\"]\n\nWaveformFormat = Literal[\"dat\", \"json\", \"png\"]\n\nFavoriteType = Literal[\"album\", \"track\"]\n\n# Modifications that can be made to the active streamer playlist\nPlaylistModifyAction = Literal[\n # Add to the end of the playlist. Track or Album.\n \"APPEND\",\n # Insert into the playlist at the given index. Track only.\n \"INSERT\",\n # Replace the playlist with the Track's Album, and plays the Track. Track only.\n \"PLAY_FROM_HERE\",\n # Insert after the current entry. Track or Album.\n \"PLAY_NEXT\",\n # Insert after the current entry and starts playing the new entry. Track or Album.\n \"PLAY_NOW\",\n # Replaces the playlist. Track or Album.\n \"REPLACE\",\n]\n\n# Messaging -------------------------------------------------------------------\n\n# Message types sent to subscribed clients (over a WebSocket)\nUpdateMessageType = Literal[\n \"CurrentlyPlaying\",\n \"Favorites\",\n \"Position\",\n \"Presets\",\n \"StoredPlaylists\",\n \"System\",\n \"TransportState\",\n \"UPnPProperties\",\n \"VibinStatus\",\n]\n\nUpdateMessageHandler = Callable[[UpdateMessageType, Any], None]\n\n# UPnP ------------------------------------------------------------------------\n\nUPnPDeviceType = Literal[\"streamer\", \"media_server\"]\n\nUPnPServiceName = NewType(\"UPnPServiceName\", str)\n\nUPnPPropertyName = NewType(\"UPnPPropertyName\", str)\n\nUPnPProperties = dict[UPnPServiceName, dict[UPnPPropertyName, Any]]\n\nUPnPPropertyChangeHandlers = dict[\n (UPnPServiceName, UPnPPropertyName), Callable[[UPnPServiceName, etree.Element], Any]\n]\n\n# Transport -------------------------------------------------------------------\n\n# Transport play states.\nPlayStatus = Literal[\n \"buffering\",\n \"connecting\",\n \"no_signal\",\n \"not_ready\",\n \"pause\",\n \"play\",\n \"ready\",\n \"stop\",\n]\n\nTransportPosition = int\n\n# Actions that can be performed on the streamer.\nTransportAction = Literal[\n \"next\",\n \"pause\",\n \"play\",\n \"previous\",\n \"repeat\",\n \"seek\",\n \"shuffle\",\n \"stop\",\n \"toggle_playback\",\n]\n\nTransportRepeatState = Literal[\"off\", \"all\"]\n\nTransportShuffleState = Literal[\"off\", \"all\"]\n\n# Float: 0.0 -> 1.0 (for beginning -> end of track; 0.5 is half way into track)\n# Int: Number of seconds into the track\n# Str: h:mm:ss into the track\nSeekTarget = float | int | str\n\nDatabaseName = Literal[\n \"favorites\",\n \"links\",\n \"lyrics\",\n \"playlists\",\n \"settings\",\n]\n","repo_name":"mjoblin/vibin","sub_path":"vibin/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":3179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13042958367","text":"import json\nimport nltk\nfrom nltk.util import ngrams\nimport operator\n\n\n# Функция поиска ngram\ndef find_ngrams(input_list, n):\n return zip(*[input_list[i:] for i in range(n)])\n\n\ndef extract_ngrams(data, num):\n words = nltk.word_tokenize(data, language='russian')\n words = [word.lower() for word in words if word.isalpha()]\n n_grams = ngrams(words, num)\n return [' '.join(grams) for grams in n_grams]\n\n\ndgram_stats = {}\n# Чтение постов из дампа json\nwith open('vk_dump.json', encoding='utf8') as f:\n posts = json.load(f)\nfor post in posts:\n for ngram in extract_ngrams(post['text'], 2):\n dgram_stats[ngram] = dgram_stats.get(ngram, 0) + 1\n\nstats_list = sorted(dgram_stats.items(), key=operator.itemgetter(1))\nfor ngram, count in stats_list:\n print(\"%-32s %d\" % (ngram, count))\n","repo_name":"senyabk/vk_api_big_data","sub_path":"datagram.py","file_name":"datagram.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10580933177","text":"import datetime\nimport urllib2\nimport json\n\n\npressure = 800\ntemp = 60\nhumidity = 20\n\nbody = {\n'pressure': pressure,\n'humidity': humidity,\n'temperature': temp,\n'date': datetime.datetime.now().isoformat(),\n'sourceid': 1 } #sourceid assigned to ken's pi\n\nmyurl = \"/api/record\" #the full url of the backend api needs to be added here\nreq = urllib2.Request(myurl)\nreq.add_header('Content-Type', 'application/json; charset=utf-8')\njsondata = json.dumps(body)\njsondataasbytes = jsondata.encode('utf-8') # needs to be bytes\nreq.add_header('Content-Length', len(jsondataasbytes))\nresponse = urllib2.urlopen(req, jsondataasbytes)\n\n","repo_name":"cslsus/csc420620_fa2018_weather_monitoring","sub_path":"weatherpy2.py","file_name":"weatherpy2.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4424270995","text":"from scipy.spatial import cKDTree as KDT\nfrom numpy import array,exp,where,ones,pi,zeros,NaN\nfrom pyproj import Proj\nfrom numpy.linalg import norm\n\n#Gaussian yielding 1 at zero distance and most distant point at 3 standard deviations.\n_distanceFunction = lambda w: exp(-(3.*w/w.max())**2)\n\nclass KDInterpolator(KDT):\n\n \"\"\"Distance weighted nearest neighbour Interpolator for arbitrarily\n distributed in and output data. Should be used with unmasked 1D input arrays.\n\n Attributes:\n scales (float array): 1-D array with scaling weights for each dimension\n in order to differentiate the impact of distances across dimensions\n \"\"\"\n\n def __init__(self,coords,scales,*opts,**ks):\n\n \"\"\"Creates Intrepolator instance from coordinates and dimension scales.\n\n Args:\n coords (float array-like): array of coordinate tuples [# of points,\n # of dimensions]\n scales (float array-like): tuple of dimension weights [# of\n dimensions]\n \"\"\"\n\n self.scales=scales\n KDT.__init__(self,coords*scales,*opts,**ks)\n\n def __call__(self,inData,outcoords,k=5,fill_value=None,*opts,**ks):\n\n \"\"\"Interpolates input data on output grid.\n\n Args:\n inData (float array):\n outcoords (float array-like): array of output coordinate tuples\n [# of points, # of dimensions]\n k (integer): number of nearest points to consider\n fill_value (float): input data of this value will be ignored for\n interpolation, used for invalid points in interpolation output\n **opts: positional arguments passed to scipy.spatial.cKDTree.query\n function\n *ks: keyword arguments passed to scipy.spatial.cKDTree.query\n function\n\n Returns:\n interpolated data (float array).\n \"\"\"\n\n data=[]\n for p in outcoords:\n w,c=self.query((p*self.scales),k=k,*opts,**ks)\n indata=inData[c]\n #convert distances to weights\n if w.sum()!=0.:\n w=_distanceFunction(w)\n else:\n w=ones(w.shape)\n if fill_value!=None:\n mask=where(indata==fill_value,0,1)\n if not mask.sum():\n data.append(fill_value)\n else:\n w*=mask\n w/=w.sum()\n data.append((indata*w).sum())\n else:\n w/=w.sum()\n data.append((indata*w).sum())\n return array(data)\n\ndef KDMask(incoord,scales,inMask,outcoord,lonAxis=None,latAxis=None,crit=.5):\n\n \"\"\"Computes interpolated mask using KDInterpolator or KDGeographic.\n\n Args:\n incoord (float array-like): array of coordinate tuples [# of points,\n # of dimensions]\n scales (float array-like): tuple of dimension weights [3 of\n dimensions]\n inData (float array):\n outcoords (float array-like): array of output coordinate tuples\n [# of points, # of dimensions]\n crit (float): mask treshold for interpolated value.\n\n Returns:\n interpolated mask value for each output grid point (float array).\n\n \"\"\"\n if lonAxis==None or latAxis==None:\n kd=KDInterpolator(incoord,scales)\n else:\n kd=KDGeographic(incoord,scales,lonAxis,latAxis)\n Mask=kd(1.*inMask,outcoord)\n return where(Mask>=.5,True,False)\n\nclass KDGeographic:\n\n \"\"\"Lon,lat based interpolation projected on UTM grids to get\n more precise geographic interpolations. KDGeographic.interpolator\n contains a list of KDinterpolators.\n\n Attributes:\n interpolator (list of KDInterpolators): interpolators for each UTM zone\n Proj (list of pyproj.Proj) projection instances for each UTM UTMzone\n lonAxis: position of longitude dimension in input coordinates\n latAxis: position of latitude dimension in input coordinates\n \"\"\"\n\n def __init__(self,coords,scales,lonAxis,latAxis,*opts,**ks):\n \"\"\"Collects interpolators for input coordinates.\n\n Args:\n coords (float array-like): array of coordinate tuples [# of points,\n # of dimensions]\n scales (float array-like): tuple of dimension weights [# of\n dimensions]\n lonAxis: position of longitude dimension in input coordinates\n latAxis: position of latitude dimension in input coordinates\n **opts: positional arguments passed to scipy.spatial.cKDTree.query\n function\n *ks: keyword arguments passed to scipy.spatial.cKDTree.query\n function\n \"\"\"\n\n self.interpolator=[]\n self.Proj=[]\n self.lonAxis=lonAxis\n self.latAxis=latAxis\n for i in range(1,61):\n #set up projection for each UTM zone:\n self.Proj.append(Proj(proj='utm',zone=i))\n x,y=self.Proj[-1](coords[:,lonAxis],coords[:,latAxis])\n crdsxy=coords.copy()\n crdsxy[:,lonAxis]=x/111120. #conv back to degree scale for appropriate scaling\n crdsxy[:,latAxis]=y/111120.\n self.interpolator.append(KDInterpolator(crdsxy,scales,*opts,**ks))\n #Universal Polar Stereographic (North)\n self.Proj.append(Proj(proj='ups'))\n x,y=self.Proj[-1](coords[:,lonAxis],coords[:,latAxis])\n crdsxy=coords.copy()\n crdsxy[:,lonAxis]=x/111120. #conv back to degree scale for appropriate scaling\n crdsxy[:,latAxis]=y/111120.\n self.interpolator.append(KDInterpolator(crdsxy,scales,*opts,**ks))\n #Universal Polar Stereographic (North)\n self.Proj.append(Proj(proj='ups',south=True))\n x,y=self.Proj[-1](coords[:,lonAxis],coords[:,latAxis])\n crdsxy=coords.copy()\n crdsxy[:,lonAxis]=x/111120. #conv back to degree scale for appropriate scaling\n crdsxy[:,latAxis]=y/111120.\n self.interpolator.append(KDInterpolator(crdsxy,scales,*opts,**ks))\n\n def __call__(self,inData,outcoords,k=5,*opts,**ks):\n\n \"\"\"Interpolates input data on output grid.\n\n Args:\n inData (float array):\n outcoords (float array-like): array of output coordinate tuples\n [# of points, # of dimensions]\n k (integer): number of nearest points to consider\n **opts: positional arguments passed to scipy.spatial.cKDTree.query\n function\n *ks: keyword arguments passed to scipy.spatial.cKDTree.query\n function\n\n Returns:\n interpolated data (float array).\n \"\"\"\n\n data=[]\n for p in outcoords:\n #retrieve ID of right projection:\n if p[self.latAxis]>84.:\n utmID=-2\n elif p[self.latAxis]<-80.:\n utmID=-1\n else:\n utmID=_UTMzone(p[self.lonAxis])\n #interpolate:\n x,y=self.Proj[utmID](p[self.lonAxis],p[self.latAxis])\n pxy=array(p).copy()\n pxy[self.lonAxis]=x/111120. #conv back to degree scale for appropriate scaling\n pxy[self.latAxis]=y/111120.\n d=self.interpolator[utmID](inData,array([pxy,]),k=k,*opts,**ks)\n data.append(d.squeeze())\n return array(data)\n\nclass KDGeographicPoint:\n\n \"\"\"Lon,lat based interpolation projected on UTM grid to get\n more precise geographic interpolations. Version for individual\n destination point.\n\n Attributes:\n interpolator (KDInterpolator): interpolator\n Proj (pyproj.Proj) projection instance of UTMzone containing destination point\n lonAxis: position of longitude dimension in input coordinates\n latAxis: position of latitude dimension in input coordinates\n \"\"\"\n\n def __init__(self,coords,scales,lonAxis,latAxis,outcoords,*opts,**ks):\n \"\"\"Collects interpolators for input coordinates.\n\n Args:\n coords (float array-like): array of coordinate tuples [# of points,\n # of dimensions]\n scales (float array-like): tuple of dimension weights [# of\n dimensions]\n lonAxis: position of longitude dimension in input coordinates\n latAxis: position of latitude dimension in input coordinates\n outcoords (float array-like): array of output coordinates\n [# of dimensions]\n **opts: positional arguments passed to scipy.spatial.cKDTree.query\n function\n *ks: keyword arguments passed to scipy.spatial.cKDTree.query\n function\n \"\"\"\n self.lonAxis=lonAxis\n self.latAxis=latAxis\n self.p=array(outcoords) #output geographic coordinates\n if self.p[self.latAxis]>84.:\n utmID=60\n elif self.p[self.latAxis]<-80.:\n utmID=61\n else:\n utmID=_UTMzone(self.p[self.lonAxis])\n if utmID < 60:\n #set up projection for each UTM zone:\n self.Proj=Proj(proj='utm',zone=utmID)\n elif utmID==60:\n #Universal Polar Stereographic (North)\n self.Proj=Proj(proj='ups')\n elif utmID==61:\n #Universal Polar Stereographic (North)\n self.Proj=Proj(proj='ups',south=True)\n #UTM coordinates of output point:\n x,y=self.Proj(self.p[self.lonAxis],self.p[self.latAxis])\n self.pxy=array(self.p).copy()\n self.pxy[self.lonAxis]=x/111120. #conv back to degree scale for appropriate scaling\n self.pxy[self.latAxis]=y/111120.\n #UTM coordinates of input points:\n x,y=self.Proj(coords[:,lonAxis],coords[:,latAxis])\n crdsxy=coords.copy()\n crdsxy[:,lonAxis]=x/111120. #conv back to degree scale for appropriate scaling\n crdsxy[:,latAxis]=y/111120.\n # search for k nearest points:\n self.interpolator=(KDInterpolator(crdsxy,scales,*opts,**ks))\n self.coords=coords\n\n def __call__(self,inData,k=5,*opts,**ks):\n\n \"\"\"Interpolates input data on output grid.\n\n Args:\n inData (float array):\n k (integer): number of nearest points to consider\n **opts: positional arguments passed to scipy.spatial.cKDTree.query\n function\n *ks: keyword arguments passed to scipy.spatial.cKDTree.query\n function\n\n Returns:\n interpolated data (float array).\n \"\"\"\n\n d=self.interpolator(inData,array(self.pxy).reshape([1,-1]),k=k,*opts,**ks).squeeze()\n return d\n\n_UTMzone=lambda lon:int((lon+180)%360)//6 #UTMzone index (0-59)\n","repo_name":"mommebutenschoen/irregularInterpolation","sub_path":"irregularInterpolation/KDInterpolator.py","file_name":"KDInterpolator.py","file_ext":"py","file_size_in_byte":10538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3591473961","text":"import uvicorn\nfrom fastapi import Depends, FastAPI, HTTPException\nfrom sqlalchemy.orm import Session\n\nfrom vector_ai_app import crud, models, schemas\nfrom vector_ai_app.database import SessionLocal, engine\nfrom celery_worker import create_country_celery, create_city_celery, remove_city_celery, \\\n update_city_celery, create_continent_celery\n\nmodels.Base.metadata.create_all(bind=engine)\n\napp = FastAPI()\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n@app.get(\"/\")\ndef index():\n return {\"Message\" : \"Vector AI API Index See /docs for more info\"}\n\n\n@app.get(\"/cities/\")\ndef read_cities(country:str= '', skip: int=0,limit:int=100, db: Session = Depends(get_db)):\n cities = crud.get_cities(db,skip=skip,limit=limit,country=country)\n return cities\n\n\n@app.post(\"/create_city/\")\ndef create_city(city:schemas.City):\n create_city_celery.delay(city.dict())\n return {'Message' : f'Request to create {city.name} is received'}\n\n\n@app.put(\"/update_city\")\ndef update_city(city:schemas.UpdateCity):\n update_city_celery.delay(city.dict())\n return {'Message' : f'Request to update {city.name} is received'}\n\n\n@app.delete(\"/remove_city/\")\ndef delete_city(city:schemas.RemoveCity):\n remove_city_celery.delay(city.dict())\n return {'Message' : f'Request to remove {city.city} is received'}\n\n\n@app.post(\"/create_country/\")\ndef create_country(country:schemas.Country):\n create_country_celery.delay(country.dict())\n return {'Message' : f'Request to create {country.name} is received'}\n\n\n@app.get(\"/countries/\")\ndef get_countries(db:Session = Depends(get_db)):\n return crud.get_all_countries(db)\n\n\n@app.post(\"/create_continent\")\ndef create_continent(continent:schemas.Continent):\n create_continent_celery(continent.dict())\n return {'Message': f\"Request to create {continent.name} is received\"}\n\n\n@app.get(\"/continents/\")\ndef get_continents(db:Session = Depends(get_db)):\n return crud.get_all_continents(db)\n\n\nif __name__ == \"__main__\":\n uvicorn.run(app, host = \"localhost\", port=800)\n","repo_name":"mericaliyigit/vectorai","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28866673550","text":"from src.business.application.common import interfaces\nfrom src.business.application.station import exceptions\nfrom src.business.domain import entities\nfrom tests.mocks.repositories.base import BaseRepo\n\n\nclass StationReader(interfaces.StationReader, BaseRepo):\n async def read_station(\n self, station_id: entities.StationId\n ) -> entities.Station:\n station = self.storage[\"station\"].get(station_id, None)\n\n if not station:\n raise exceptions.StationNotExists\n\n return station\n\n async def read_stations(\n self, limit: int, offset: int\n ) -> list[entities.Station]:\n stations = list(self.storage[\"station\"].values())[offset:][:limit]\n return stations\n","repo_name":"radzih/routes_api","sub_path":"tests/mocks/repositories/station.py","file_name":"station.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20608666907","text":"# install ViennaRNA-2.4.13\n# conda install -c bioconda viennarna\n# conda install -c conda-forge bash\n\n# IMPORTS\n# ViennaRNA\nimport RNA\n\n# Os\nimport os\n\n# Numpy\nimport numpy as np\n\n# CSV\nimport csv\n\n# Pyrosetta\nfrom pyrosetta import init, get_fa_scorefxn, pose_from_sequence, pose_from_pdb\nfrom pyrosetta.teaching import *\ninit()\nscorefxn = get_fa_scorefxn()\n\n# GLOBAL VARIABLES\n# Change\nNUM_NUCLEOTIDES = 50\nRESULTS_CSV = \"Carpeta_en_uso/database.csv\"\nROSETA_FILES = \"/Users/anuska/Desktop/IGEM/Rosetta/rosetta_src_code/\"\nNUM_STRUCTURES_PER_SEQUENCE = 100\n\n# No change\nADENINE = \"a\"\nGUANINE = \"g\"\nCITOSINE = \"c\"\nTYMINE = \"t\"\nURACIL = \"u\"\nFIRST_TIME = False\n\n# FUNCTION THAT CREATES A RANDOM DNA - ONE CHAIN\n# Choose a,c,g or t randomly\ndef random_nucleotide():\n value = np.random.choice((1, 2, 3, 4))\n global ADENINE, GUANINE, CITOSINE, TYMINE\n if value == 1:\n return ADENINE\n elif value == 2:\n return GUANINE\n elif value == 3:\n return CITOSINE\n elif value == 4:\n return TYMINE\n\n# creates a DNA chain with a number of NUM_NUCLEOTIDES\ndef noise_nucleotides():\n global NUM_NUCLEOTIDES\n noise = []\n for _ in range(0, NUM_NUCLEOTIDES):\n aux = random_nucleotide()\n noise.append(aux)\n return noise\n\n# Perform the DNA to RNA change\ndef from_DNA_to_RNA(noise):\n global NUM_NUCLEOTIDES, TYMINE\n for i in range(0, NUM_NUCLEOTIDES):\n if noise[i] == TYMINE:\n noise[i] = URACIL\n return noise\n\n# Creates a doiuble RNA (NOT used now)\ndef RNAby2(rna):\n chainB = []\n global NUM_NUCLEOTIDES, ADENINE, GUANINE, URACIL, CITOSINE\n for i in range(0, NUM_NUCLEOTIDES):\n if rna[i] == ADENINE:\n chainB.append(CITOSINE)\n elif rna[i] == CITOSINE:\n chainB.append(ADENINE)\n elif rna[i] == URACIL:\n chainB.append(GUANINE)\n elif rna[i] == GUANINE:\n chainB.append(URACIL)\n return chainB[::-1]\n\n# Create DNA chain\nnoise = noise_nucleotides()\n# Transform to RNA\nrna = from_DNA_to_RNA(noise)\nseq_A = \"\".join(rna)\n\n# SECONDARY STRUCTURE FROM RNA\n# compute minimum free energy (MFE) and corresponding structure\n(secondary_structure, mfe) = RNA.fold(seq_A)\n\n# print output\nprint(\"Structure: %s \\nMinimum free energy: %f \" % (secondary_structure, mfe))\n\n# Saving FASTA and dtb formats in txt\nwith open(\"aptamer.txt\", \"w\") as Myfile:\n Myfile.write(\">\" + \"aptamer\" + \"\\n\" + seq_A + \"\\n\")\nwith open(\"aptamer_sec.txt\", \"w\") as Myfile:\n Myfile.writelines(secondary_structure)\n\n# PERFORM THE 3D FOLDING OF RNA\n# export and configure Roseatta RNA_TOOLS\nos.system(\"export ROSETTA=%s\"%(ROSETA_FILES))\nos.system(\"export RNA_TOOLS=$ROSETTA/tools/rna_tools/\")\nos.system(\"export PATH=$RNA_TOOLS/bin/:$PATH\")\nos.system(\"export ROSETTA3=$ROSETTA/main/source/bin/\")\nos.system(\"export PYTHONPATH=$PYTHONPATH:$RNA_TOOLS/bin/\")\nos.system(\"source ~/.bashrc\")\nos.system(\"python $RNA_TOOLS/sym_link.py\")\n\n# Configure and preform the folding in a Rosetta's protocol\nos.system(\"$ROSETTA3/rna_denovo.default.macosclangrelease -fasta aptamer.txt -nstruct %s -minimize_rna -secstruct_file aptamer_sec.txt -out::file::silent my_rna_structures.out\"%(NUM_STRUCTURES_PER_SEQUENCE))\n# Minimize and extract the best one\nos.system(\"$RNA_TOOLS/silent_util/silent_file_sort_and_select.py my_rna_structures.out -select 1 -o aptamer_best.out\")\n# To PDB in a Rosetta's protocol\nos.system(\"$ROSETTA3/score.default.macosclangrelease -in:file:silent aptamer_best.out -in::file::fullatom -out:output\")\n\n# PERFORM THE RNA TO DNA TRANSFORMATION\n# From RNA to DNA\nwith open(\"S_000001_0001.pdb\", \"r\") as file_pdb:\n atomes = file_pdb.readlines()\n\n# (Lyon Igem code)\ndef from_RNA_to_DNA(atomes):\n output = []\n #i = 0\n for atome in atomes:\n #i += 1\n #print(\"hola %s %s\" % (i, atome))\n # Lyon's code solution:\n if (atome == \"\\n\"):\n break\n if not(\"O2'\" in atome):\n if (atome[-5] == \"H\" or atome[-4] == \"H\") and (\"U\" in atome) and (\"H5 \" in atome):\n line = atome.split(\" \")\n line[1] = line[1][:-2] + \"C7\"\n line[-1] = \" C \\n\"\n atome = \" \".join(line)\n if \"U\" in atome:\n atome = atome[:19]+\"T\"+atome[20:]\n output.append(atome)\n return output\n\noutput = from_RNA_to_DNA(atomes)\n# Save DNA\nwith open(\"DNA_aptamer.pdb\", \"w\") as file_pdb:\n file_pdb.writelines(output)\n\n# Extract pose from DNA in PDB format\nnew_pose = pose_from_pdb(\"DNA_aptamer.pdb\")\n\n# Scoring\npose = pose_from_sequence(seq_A)\nscoring_2 = scorefxn(new_pose)\nscoring_1 = scorefxn(pose)\nprint(scoring_1)\nprint(scoring_2)\n\n# SAVE THE DNA TO CSV\n# Remove the unused files\nos.system(\"rm my_rna_structures.out\")\nos.system(\"rm aptamer_sec.txt\")\nos.system(\"rm aptamer.txt\")\nos.system(\"rm S_000001_0001.pdb\")\n#os.system(\"rm DNA_aptamer.pdb\")\nos.system(\"rm aptamer_best.out\")\nos.system(\"rm default.sc\")\n\n# Extract the angles from pose\n# Open csv and save: Save the given sequence in a csv\n# First line contain: Sequence, Degrees and Score\ndef save_to_csv(seq, x, score):\n headers = (\"Sequence\", \"Degrees\", \"Score\")\n table = [seq, x, score]\n global FIRST_TIME, RESULTS_CSV\n if (FIRST_TIME == True):\n with open(RESULTS_CSV, \"w\", newline = \"\", encoding = \"utf-8\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(headers)\n writer.writerow(table)\n FIRST_TIME = False\n else:\n with open(RESULTS_CSV, \"a\", newline = \"\", encoding = \"utf-8\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(table)\n\n# Save the poses in a csv (per lines)\ndef save_poses_in_a_csv(pose, seq):\n global NUM_NUCLEOTIDES, ADENINE, GUANINE, TYMINE, CITOSINE\n gen_img = []\n for k in range(0, NUM_NUCLEOTIDES):\n gen_img.append(pose.gamma(k+1))\n gen_img.append(pose.epsilon(k+1))\n gen_img.append(pose.zeta(k+1))\n gen_img.append(pose.chi(k+1))\n gen_img.append(pose.zeta(k+1))\n print(gen_img)\n sequence = []\n for s in range(0, NUM_NUCLEOTIDES):\n if seq[s] == ADENINE:\n sequence.append(\"A[ADE]\")\n elif seq[s] == GUANINE:\n sequence.append(\"G[GUA]\")\n elif seq[s] == CITOSINE:\n sequence.append(\"C[CYT]\")\n elif seq[s] == TYMINE or seq[s] == URACIL:\n sequence.append(\"T[THY]\")\n print(sequence)\n my_seq = \"\".join(sequence)\n score = scorefxn(pose)\n save_to_csv(my_seq, gen_img, score)\n\nsave_poses_in_a_csv(new_pose, seq_A)\n","repo_name":"Zildj1an/SELEX","sub_path":"Aptamer_Folding_AI/3D_structure_creation_in_pdb.py","file_name":"3D_structure_creation_in_pdb.py","file_ext":"py","file_size_in_byte":6544,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"48"} +{"seq_id":"40522553839","text":"import torch.nn.functional as F\nfrom torch import Tensor\nfrom torch.autograd import Variable\nfrom gym.spaces import Box\nfrom utils.misc import soft_update, hard_update, onehot_from_logits, gumbel_softmax, hard_update_lst, soft_update_lst\nfrom utils.noise import OUNoise\n\nimport itertools\nimport torch\nimport numpy as np\n\nMSELoss = torch.nn.MSELoss()\n\nclass AgentBaseNet(object):\n def __init__(self, dim_act = 2, nagents = 3, gamma=0.95, tau=0.01, lr=0.01, hidden_dim=64, discrete_action=False, cuda=True):\n\n '''\n :param dim_act:\n :param nagents:\n :param gamma:\n :param tau:\n :param lr:\n :param hidden_dim:\n :param discrete_action:\n :param cuda:\n '''\n\n self.nagents = nagents\n self.dim_act = dim_act\n\n self.gamma = gamma\n self.tau = tau\n self.lr = lr\n self.discrete_action = discrete_action\n self.niter = 0\n self.cuda = cuda # move all models to the device\n self.discrete_action = discrete_action\n if not discrete_action:\n self.exploration = [OUNoise(self.dim_act) for i in range(self.nagents)]\n\n else:\n self.exploration =[0.3 for i in range(self.nagents)]\n\n self.model_list = []\n self.model_name_list = []\n self.opt_list=[]\n self.opt_name_list = []\n\n def reset_noise(self):\n if not self.discrete_action:\n [self.exploration[i].reset() for i in range(self.nagents)]\n\n def scale_noise(self, scale):\n for i in range(self.nagents):\n if self.discrete_action:\n self.exploration[i] = scale\n else:\n self.exploration[i].scale = scale\n\n def update_episode_num(self, num):\n self.episode_num = num\n def provide_env(self, env):\n self.env = env\n\n # 要以get_rollout_action为输入才可以\n def step(self, obs, explore=False, return_weights = False):\n \"\"\"\n Take a step forward in environment for a minibatch of observations\n Inputs:\n obs (PyTorch Variable): Observations for this agent\n explore (boolean): Whether or not to add exploration noise\n Outputs:\n action (PyTorch Variable): Actions for this agent\n hidden_unit\n d_Q\n \"\"\"\n if return_weights == True:\n weights_prob, action = self.get_rollout_action(obs, return_weights)\n else:\n action = self.get_rollout_action(obs)\n\n if action.device != 'cpu':\n action = action.to('cpu')\n for agent_idx in range(self.nagents):\n if self.discrete_action:\n if explore:\n action[agent_idx,:,:] = gumbel_softmax(action[agent_idx,:,:], hard=True)\n else:\n action[agent_idx,:,:] = onehot_from_logits(action[agent_idx,:,:])\n else: # continuous action\n if explore:\n action[agent_idx,:,:] += Variable(Tensor(self.exploration[agent_idx].noise()),\n requires_grad=False)\n action = action.clamp(-1, 1)\n\n if return_weights == True:\n return weights_prob, action\n else:\n return action\n\n def get_rollout_action(self, obs):\n pass\n\n def get_target_action(self, obs):\n pass\n\n def get_action(self, obs):\n pass\n\n\n def get_params(self):\n dict={}\n k_list = self.model_name_list + self.opt_name_list\n v_list = self.model_list + self.opt_list\n for pair in zip(k_list, v_list):\n if type(pair[1])==list:\n dict[pair[0]] = [x.state_dict() for x in pair[1]]\n else:\n dict[pair[0]] = pair[1].state_dict()\n return dict\n\n\n\n def load_params(self, params):\n k_list = self.model_name_list + self.opt_name_list\n v_list = self.model_list + self.opt_list\n\n for idx in range(len(k_list)):\n name = k_list[idx]\n model = v_list[idx]\n if type(params[name])==list:\n for i,v in enumerate(params[name]):\n model[i].load_state_dict(v)\n # if 'opt' in name and self.cuda:\n # self.move_optmizer_cuda(model[i])\n else:\n model.load_state_dict(params[name])\n # if 'opt' in name and self.cuda:\n # self.move_optmizer_cuda(model)\n\n def update(self):\n \"\"\"\n Update parameters of agent model based on sample from replay buffer\n Inputs:\n sample: tuple of (observations, actions, rewards, next\n observations, and episode end masks) sampled randomly from\n the replay buffer. Each is a list with entries\n corresponding to each agent\n agent_i (int): index of agent to update\n parallel (bool): If true, will average gradients across threads\n logger (SummaryWriter from Tensorboard-Pytorch):\n If passed in, important quantities will be logged\n \"\"\"\n pass\n\n def add_log(self, logger, dict_loss):\n if logger is not None:\n logger.add_scalars('agent%i/losses' % 0,\n dict_loss,\n self.niter)\n self.niter += 1\n\n def update_all_targets(self):\n \"\"\"\n Update all target networks (called after normal updates have been\n performed for each agent)\n \"\"\"\n pass\n\n # moving all to given device, including optimizer\n def mov_all_models(self, device='cpu'):\n for model in self.model_list:\n if type(model) == list:\n for model_i in model:\n model_i.to(device)\n else:\n model.to(device)\n for opt in self.opt_list:\n if type(opt) == list:\n for opt_i in opt:\n self.move_optmizer_cuda(opt_i, device)\n else:\n self.move_optmizer_cuda(opt, device)\n\n def move_optmizer_cuda(self, optimizer, device):\n for state in optimizer.state.values():\n for k,v in state.items():\n if isinstance(v, torch.Tensor):\n state[k] = v.to(device)\n\n def prep_training(self):\n\n for model in self.model_list:\n if type(model) == list:\n for model_i in model:\n model_i.train()\n else:\n model.train()\n\n def prep_rollouts(self):\n for model in self.model_list:\n if type(model) == list:\n for model_i in model:\n model_i.eval()\n else:\n model.eval()\n\n def save(self, filename):\n \"\"\"\n Save trained parameters of all agents into one file\n \"\"\"\n self.mov_all_models(device='cpu') # move parameters to CPU before saving\n save_dict = {'init_dict': self.init_dict,\n 'agent_params': self.get_params()}\n torch.save(save_dict, filename)\n\n @classmethod\n def init_from_env(cls, env, dim_thought = 128, gamma=0.95, tau=0.01, lr=0.01, hidden_dim=64, max_agents=3, cuda=True):\n \"\"\"\n Instantiate instance of this class from multi-agent environment\n \"\"\"\n adv_flag = env.agent_types.count('adversary')\n for type, acsp, obsp in zip(env.agent_types, env.action_space, env.observation_space):\n if adv_flag and type!='adversary':\n continue\n num_in_pol = obsp.shape[0]\n if isinstance(acsp, Box):\n discrete_action = False\n get_shape = lambda x: x.shape[0]\n else: # Discrete\n discrete_action = True\n get_shape = lambda x: x.n\n num_out_pol = get_shape(acsp)\n\n #此处在num_agent限制,从而保证dim_thought的正确性\n if adv_flag:\n nagents = env.agent_types.count('adversary')\n else:\n nagents = env.agent_types.count('agent')\n\n init_dict = {'dim_obs': num_in_pol, 'dim_thought': dim_thought, 'dim_act': num_out_pol,\n 'max_agents': max_agents, 'nagents':nagents , 'gamma': gamma,\n 'tau': tau, 'lr': lr,\n 'hidden_dim': hidden_dim,\n 'discrete_action': discrete_action,\n 'cuda': cuda}\n instance = cls(**init_dict)\n\n instance.init_dict = init_dict\n return instance\n\n @classmethod\n def init_from_save(cls, filename):\n \"\"\"\n Instantiate instance of this class from file created by 'save' method\n \"\"\"\n save_dict = torch.load(filename)\n instance = cls(**save_dict['init_dict'])\n instance.init_dict = save_dict['init_dict']\n instance.load_params(save_dict['agent_params'])\n return instance","repo_name":"chenbq/CADTC","sub_path":"algorithms/AgentBaseNet.py","file_name":"AgentBaseNet.py","file_ext":"py","file_size_in_byte":8965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18343719104","text":"import boto3\ncognito_idp = boto3.client(\"cognito-idp\")\nimport requests\n\ndef handler(event, context):\n try:\n res = requests.get(\n \"https://paper-api.alpaca.markets/v2/account\",\n params={},\n headers={\"Accept\":\"application/json\"}\n )\n # your code goes here\n except BaseException as e:\n # error handling goes here\n raise(e)\n try:\n data = cognito_idp.list_users(\n UserPoolId=\"us-east-1_HdYJb7Znp\",\n Limit=10\n )\n except BaseException as e:\n print(e)\n raise(e)\n \n return {\"message\": \"Successfully executed\"}\n","repo_name":"Kumudikatest/kpycli","sub_path":"kpycli.py","file_name":"kpycli.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4773309319","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$',views.index), # index redirects to main\n\turl(r'^main$',views.main), # shows register and login if not logged in, otherwise redirects to localhost/quotes\n\turl(r'^users$',views.user), # redirects to users/< request.session[\"id\"] >\n\turl(r'^users/(?P[0-9])$',views.users),\n\turl(r'^register$',views.register),\n\turl(r'^success$',views.success),\n\turl(r'^login$',views.login),\n\turl(r'^logout$',views.logout),\n\t]","repo_name":"marinaskevin/python_belt_exam","sub_path":"apps/main/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21156743496","text":"from gravel_spec.utils import *\nfrom gravel_spec.ops import *\nfrom gravel_spec.element import *\nfrom gravel_spec.graph import *\nfrom gravel_spec.config import *\n\n\nWINDOW = 600\n\n\nclass IPFilter(Element):\n ele_name = 'IPFilter'\n num_in_ports = 1\n num_out_ports = 1\n \n def process_packet(self, old, p, in_port):\n ether_type = p.ether.ether_type\n return [{ 'pre_cond' : ether_type == 0x0800, \n 'packets' : { 0 : p }, \n 'new_state' : old }]\n\n\nclass TcpFilter(Element):\n ele_name = 'udp_tcp_filter'\n num_in_ports = 1\n num_out_ports = 1\n \n def process_packet(self, old, p, in_port):\n proto = p.ip4.proto\n return [{ 'pre_cond' : proto == 6,\n 'packets' : { 0 : p },\n 'new_state' : old }]\n\n\nclass LBStorage(Element):\n ele_name = 'lb_storage'\n num_in_ports = 2\n num_out_ports = 2\n\n private_state_type = [('decisions', 'map', (4, 2, 4, 2), (4,)),\n ('timestamps', 'map', (4, 2, 4, 2), (8,)),\n ('curr_time', 'bitvec', 8)]\n\n def process_packet(self, old, p, in_port):\n flow_id = p.ip4.src, p.tcp.src, p.ip4.dst, p.tcp.dst\n is_known_flow = And(in_port == 0, old.decisions.has_key(flow_id))\n new_p = p.copy()\n new_p.inner_ip = new_p.ip4.copy()\n new_p.ip4.dst = old.decisions[flow_id][0]\n new_p.ip4.length = new_p.inner_ip.length + 20\n new_p.ip4.proto = 0x4\n timestamp_updated = old.copy()\n timestamp_updated.timestamps[flow_id] = If(old.timestamps.has_key(flow_id), \n If(z3.UGT(old.curr_time, old.timestamps[flow_id][0]), \n old.curr_time, old.timestamps[flow_id][0]),\n old.curr_time)\n \n is_unknown_flow = And(in_port == 0, Not(old.decisions.has_key(flow_id)))\n\n register_new_flow = in_port == 1\n new = old.copy()\n flow_id = p.inner_ip.src, p.tcp.src, p.inner_ip.dst, p.tcp.dst\n new.decisions[flow_id] = p.ip4.dst\n new.timestamps[flow_id] = new.curr_time\n \n return [{ 'pre_cond' : is_known_flow, \n 'packets' : { 0 : new_p },\n 'new_state' : timestamp_updated },\n { 'pre_cond' : register_new_flow,\n 'packets' : { 0 : p },\n 'new_state' : new },\n { 'pre_cond' : is_unknown_flow,\n 'packets' : { 1 : p },\n 'new_state' : old }]\n\n def handle_event(self, s, event, *params):\n new = s.copy()\n new.curr_time = params[0]\n expire_filter = lambda ks, vs: And(s.timestamps.has_key(ks),\n z3.ULT(s.timestamps[ks][0], -1 - WINDOW), \n z3.UGE(new.curr_time, WINDOW + s.timestamps[ks][0]))\n new.decisions = new.decisions.filter(expire_filter)\n new.timestamps = new.timestamps.filter(expire_filter)\n return [{ 'pre_cond' : z3.BoolVal(True),\n 'packets' : {},\n 'new_state' : new }]\n\nclass Scheduler(Element):\n ele_name = 'scheduler'\n num_in_ports = 1\n num_out_ports = 1\n\n private_state_type = [('addr_map', 'map', (4,), (4,)),\n ('cnt', 'bitvec', 4), \n ('num_dsts', 'bitvec', 4)]\n\n def process_packet(self, old, p, in_port):\n dst_ip = old.addr_map[old.cnt % old.num_dsts][0]\n new = old.copy()\n new.cnt = (old.cnt + 1) % old.num_dsts\n new_packet = p.copy()\n new_packet.inner_ip = new_packet.ip4.copy()\n new_packet.ip4.dst = dst_ip\n new_packet.ip4.length = new_packet.inner_ip.length + 20\n new_packet.ip4.proto = 0x4\n return [{ 'pre_cond' : z3.BoolVal(True),\n 'packets' : { 0 : new_packet },\n 'new_state' : new }]\n\nclass Maglev(Element):\n ele_name = 'maglev_selector'\n num_in_ports = 1\n num_out_ports = 1\n \n private_state_type = [('lookup_table', 'map', (4,), (4,)),\n ('hash_func', 'uf', (12,), 4)]\n\n def process_packet(self, old, p, in_port):\n flow_id = p.ip4.src, p.tcp.src, p.ip4.dst, p.tcp.dst\n hash_val = old.hash_func(Concat(*flow_id))\n dst_ip = old.lookup_table[hash_val][0]\n new_packet = p.copy()\n new_packet.inner_ip = new_packet.ip4.copy()\n new_packet.ip4.dst = dst_ip\n new_packet.ip4.length = new_packet.inner_ip.length + 20\n new_packet.ip4.proto = 0x4\n return [{ 'pre_cond' : z3.BoolVal(True),\n 'packets' : { 0 : new_packet },\n 'new_state' : old }]\n\n def state_inv(self, s):\n k = fresh_bv('k', 32)\n return ForAll([k], s.lookup_table.has_key(k))\n\n\ndef has_inner_ip(p):\n return (p.ip4.proto == 0x4)\n\ndef get_flow_id(p):\n flow_id = p.ip4.src, p.tcp.src, p.ip4.dst, p.tcp.dst\n return flow_id\n\ndef from_same_flow(p1, p2):\n return And(is_tcp(p1), is_tcp(p2),\n Not(has_inner_ip(p1)),\n Not(has_inner_ip(p2)),\n p2.ip4.src == p1.ip4.src, \n p2.ip4.dst == p1.ip4.dst,\n p2.tcp.src == p1.tcp.src, \n p2.tcp.dst == p1.tcp.dst)\n\ndef is_tcp(p):\n return And(p.ether.ether_type == 0x0800,\n If(has_inner_ip(p),\n p.inner_ip.proto == 6,\n p.ip4.proto == 6))\n\ndef steer_to(c, s, p, dst_ip, ddl):\n s_n = s\n t = fresh_bv('t', 64)\n _, s_n = c.handle_event(s, 'cache', '', t)\n o, _ = c.process_packet(s_n, 'in', p)\n return ForAll([t], Implies(And(z3.UGT(ddl, t)),\n And(Not(o['out'].is_empty()),\n o['out'].ip4.dst == dst_ip,\n o['__edges']['cache'][1].is_empty())))\n\ndef get_flow_id(p):\n return p.ip4.src, p.tcp.src, p.ip4.dst, p.tcp.dst\n\nclass LBTasks(ConfigVerifyTask, unittest.TestCase):\n @classmethod\n def build_conf(cls):\n parser = HeaderParser()\n parser.add_header('ether', ETHER_HDR)\n parser.add_header('ip4', IPv4_HDR)\n parser.add_header('inner_ip', IPv4_HDR)\n parser.add_header('tcp', TCP_HDR)\n parser.add_header('payload', [('data', 1500)])\n \n elements = [('in', Source),\n ('out', Sink),\n ('ip_filter', IPFilter),\n ('tcp_filter', TcpFilter),\n ('cache', LBStorage),\n ('lb', Scheduler)]\n\n path = Path('in', 0) >> (0, 'ip_filter', 0) >> (0, 'tcp_filter', 0) \\\n >> (0, 'cache', 1) >> (0, 'lb', 0) >> (1, 'cache', 0) >> (0, 'out')\n\n return Config(elements, path.edges(), parser)\n\n def test_tcp_only(self):\n c = self.conf()\n p, old_states = c.fresh_packet(), c.fresh_states()\n out, _ = c.process_packet(old_states, 'in', p)\n self.verify(Implies(Not(out['out'].is_empty()),\n is_tcp(p)))\n\n def test_always_steer(self):\n c = self.conf()\n p, s = c.fresh_packet(), c.fresh_states()\n out, _ = c.process_packet(s, 'in', p)\n self.verify(Implies(And(p.ether.ether_type == 0x0800,\n p.ip4.proto == 6),\n Not(out['out'].is_empty())))\n\n def test_is_l3(self):\n c = self.conf()\n p, old_states = c.fresh_packet(), c.fresh_states()\n out, _ = c.process_packet(old_states, 'in', p)\n self.verify(Implies(Not(out['out'].is_empty()),\n has_inner_ip(out['out'])))\n\n def test_persistency(self):\n c = self.conf()\n p1, p2, old_states = c.fresh_packet(), c.fresh_packet(), c.fresh_states()\n out1, new_s = c.process_packet(old_states, 'in', p1)\n\n p2.ip4.src, p2.ip4.dst = p1.ip4.src, p1.ip4.dst\n p2.tcp.src, p2.tcp.dst = p1.tcp.src, p1.tcp.dst\n out2, _ = c.process_packet(new_s, 'in', p2)\n self.verify(Implies(And(Not(out1['out'].is_empty()),\n Not(out2['out'].is_empty())),\n out1['out'].ip4.dst == out2['out'].ip4.dst))\n\n def test_step_init(self):\n c = self.conf()\n dst_ip = fresh_bv('dst_ip', 32)\n p0, p1, s0 = c.fresh_packet(), c.fresh_packet(), c.fresh_state()\n o, s1 = c.process_packet(s0, 'in', p0)\n dst_ip = o['out'].ip4.dst\n t = s0['cache'].curr_time\n ddl = t + WINDOW\n t0 = fresh_bv('time', 64)\n self.verify(Implies(And(p0.ether.ether_type == 0x0800,\n p0.ip4.proto == 6),\n steer_to(c, s1, p0, dst_ip, ddl)))\n\n def test_step_packet(self):\n c = self.conf()\n dst_ip = fresh_bv('dst_ip', 32)\n p0, p1, s0 = c.fresh_packet(), c.fresh_packet(), c.fresh_state()\n t = fresh_bv('time', 64)\n\n p_diff = c.fresh_packet()\n _, s1 = c.process_packet(s0, 'in', p_diff)\n flow_id = get_flow_id(p0)\n _, ss = c.handle_event(s0, 'cache', '', z3.BitVecVal(0, 64))\n self.verify(Implies(And(steer_to(c, s0, p0, dst_ip, t),\n Not(from_same_flow(p0, p_diff)),\n from_same_flow(p0, p1)),\n steer_to(c, s1, p0, dst_ip, t)),\n lambda m: [m, m.eval(ss['cache'].timestamps.has_key(flow_id)), m.eval(s0['cache'].timestamps.has_key(flow_id)), m.eval(s1['cache'].timestamps[flow_id][0]), m.eval(t), m.eval(s0['cache'].curr_time)])\n\n def test_step_time(self):\n c = self.conf()\n dst_ip = fresh_bv('dst_ip', 32)\n p0, p1, s0 = c.fresh_packet(), c.fresh_packet(), c.fresh_state()\n #t0 = s0['cache'].timestamps[get_flow_id(p0)][0]\n t0 = fresh_bv('time0', 64)\n\n t1 = fresh_bv('time1', 64)\n _, s1 = c.handle_event(s0, 'cache', '', t1)\n flow_id = get_flow_id(p0)\n self.verify(Implies(And(steer_to(c, s0, p0, dst_ip, t0),\n z3.ULT(t1, t0),\n from_same_flow(p0, p1)),\n steer_to(c, s1, p1, dst_ip, t0)))\n","repo_name":"Kaiyuan-Zhang/Gravel-public","sub_path":"specs/lb_simple.py","file_name":"lb_simple.py","file_ext":"py","file_size_in_byte":10269,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"13271802114","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, x: int, next: 'Node' = None, random: 'Node' = None):\n self.val = int(x)\n self.next = next\n self.random = random\n\"\"\"\nclass Solution(object):\n def copyRandomList(self, head):\n if not head:\n return None\n p = head\n # 第一步,在每个原节点后面创建一个新节点\n # 1->1'->2->2'->3->3'\n while p:\n new_node = Node(p.val,None,None)\n new_node.next = p.next\n p.next = new_node\n p = new_node.next\n p = head\n # 第二步,设置新节点的随机节点\n while p:\n if p.random:\n p.next.random = p.random.next\n p = p.next.next\n # 第三步,将两个链表分离\n p = head\n dummy = Node(-1,None,None)\n cur = dummy\n while p:\n cur.next = p.next\n cur = cur.next\n p.next = cur.next\n p = p.next\n return dummy.next\n\nclass Solution:\n def copyRandomList(self, head: 'Node') -> 'Node':\n if head is None:\n return None\n cur = head\n while cur:\n new_node = Node(cur.val)\n new_node.next = cur.next\n cur.next = new_node\n cur = new_node.next\n\n cur = head\n while cur:\n if cur.random:\n cur.next.random = cur.random.next\n cur = cur.next.next\n \n p = head\n tmp = Node(-1)\n cur = tmp\n while p:\n cur.next = p.next\n cur = cur.next\n p.next = cur.next\n p = p.next\n\n return tmp.next","repo_name":"maxwang967/kick-start","sub_path":"leetcode/138.py","file_name":"138.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23766129644","text":"def traductor(secuencia): \n \"\"\"\n Función traductor realiza la traducción de nucleótidos a aminoácidos\n input: \n secuencia: secuencia de nucleótidos\n output: \n sencuecia: secuencia de nucleótidos codificantes\n péptido: secuencia de amino ácidos\n \"\"\"\n seqc = secuencia[secuencia.find(\"ATG\"):]\n peptido = \"\"\n for i in range(len(seqc)):\n i *= 3\n codon = seqc[i:i+3]\n if len(codon) < 3: # no reconoce como codon si hay 0,1,2 Nucleotidos\n break\n AmAc = codon_dictionary[codon] # cambiar por un código\n if AmAc ==\"_\":\n break\n peptido += AmAc #equivale al append\n return([seqc,peptido])\n\n\ndef gcp(secuencia):\n \"\"\"\n gcp realiza el cálculo del porcentaje de contenido de Gc y Cs\n input: secuencia de nucléotidos\n output: porcentaje de gc\n \"\"\"\n pgc = round(100*(secuencia.count(\"C\") + secuencia.count(\"G\"))/len(secuencia), 2)\n return pgc\n\n\n\n# Empalme de las enzimas EcoR1\n\nseq = \"CGATGATGAATTCGTACCCGAGCTGAATTCAGCAGAATTCAGCTGATCGATACCAGAATTCCATA\"\necoRI = \"GAATTC\"\ndef ecor1(enzima, seq):\n sitios_corte = []\n seq_copy = seq \n\n while True:\n sitiocorte = seq_copy.find(enzima)\n if sitiocorte == -1:\n break\n frag1 = seq_copy[:sitiocorte + len(enzima)]\n frag2 = seq_copy[sitiocorte + len(enzima):]\n sitios_corte.append(frag1)\n seq_copy = frag2\n\n sitios_corte.append(seq_copy) \n\n return sitios_corte\n\nresultados = ecor1(ecoRI, seq)\nprint(resultados)\n\n#Empalme de las enzimas HindIII\n\nseq = \"CGATGATGAATTCGTACCCGAGCTGAATTCAGCAGAATTCAGCTGATCGATACCAGAATTCCATA\"\nHindIII = \"AAGCTT\"\ndef HindIII1(enzima, seq):\n sitios_corte = []\n seq_copy = seq \n\n while True:\n sitiocorte = seq_copy.find(enzima)\n if sitiocorte == -1:\n break\n frag1 = seq_copy[:sitiocorte + len(enzima)]\n frag2 = seq_copy[sitiocorte + len(enzima):]\n sitios_corte.append(frag1)\n seq_copy = frag2\n\n sitios_corte.append(seq_copy) \n\n return sitios_corte\n\nresultados = HindIII1 (HindIII, seq)\nprint(resultados)\n\n\n#Empalme de las enzimas NotI\n\nseq = \"CGATGATGAATTCGTACCCGAGCTGAATTCAGCAGAATTCAGCTGATCGATACCAGAATTCCATA\"\nNotI = \"GCGGCCGC\"\ndef NotI1 (enzima, seq):\n sitios_corte = []\n seq_copy = seq \n\n while True:\n sitiocorte = seq_copy.find(enzima)\n if sitiocorte == -1:\n break\n frag1 = seq_copy[:sitiocorte + len(enzima)]\n frag2 = seq_copy[sitiocorte + len(enzima):]\n sitios_corte.append(frag1)\n seq_copy = frag2\n\n sitios_corte.append(seq_copy) \n\n return sitios_corte\n\nresultados = NotI1 (NotI, seq)\nprint(resultados)\n","repo_name":"jheselalmeida-123/gbi6pyhton","sub_path":"restenz.py","file_name":"restenz.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11720629753","text":"import os\nimport sys\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n\ndf = []\nboard_names = []\nfor i in range(len(sys.argv)-1):\n file_path = sys.argv[i+1]\n\n df.append(pd.read_csv(file_path))\n board_names.append(os.path.basename(file_path).split('.')[0])\n\n print(len(df))\n print(file_path)\n\ndf_mean = []\nintra_var = []\ninter_var = []\ninter_var_tics = []\n\nfor i in range(len(df)):\n df_mean.append(df[i].mean(axis=0))\n df_std = df[i].std(axis=0)\n print('Max standard deviation: {}'.format(max(df_std)))\n num_of_error_bits = sum(1 for i in df_mean[i] if i not in (0.0, 1.0)) # Hamming Distance!\n print(num_of_error_bits)\n intra_var.append(num_of_error_bits/len(df_mean[i]))\n print(str(intra_var[i]*100)+'%')\n majority_out = [round(n) for n in df_mean[i]]\n print('{}'.format(hex(int(''.join([str(s) for s in majority_out]), 2))))\n\n # suma = 0\n # for i in range(df[i].shape[0]):\n # suma += (sum(1 for j in range(df[i].shape[1]) if df_mean[i][j] != df[i].iloc[i, j])/df[i].shape[1])*100\n\n # IC = suma/df[i].shape[0]\n # print('ID : {}%'.format(IC))\n\nfor i in range(len(df)):\n for j in range(i):\n if (j != i):\n num_of_error_bits = sum(1 for k in abs(df_mean[i]-df_mean[j]) if k == 1.) # Hamming Distance!\n temp_inter_var = 100 * num_of_error_bits/len(df_mean[i])\n inter_var.append(temp_inter_var)\n inter_var_tics.append(\"({},{})\".format(i, j))\n print(\"Inter puf variation ({},{}) {}\".format(i, j, temp_inter_var))\n\nmaen_inter_PUF = sum(inter_var)/len(inter_var)\nprint(\"Mean inter puf var: {}%\".format(maen_inter_PUF))\n\n#plt.figure(1)\n#ax1 = plt.subplot(len(df), 1, 1)\nfig, axes = plt.subplots(len(df), 1)\n\nfor i in range(len(df)):\n axes[i].text(-30, 0.25, \"b_\"+ board_names[i], fontsize=14)\n axes[i].plot(range(len(df_mean[i])), df_mean[i], '.')\n text_str = \"intra puf variations: {:.2f}%\".format(100*intra_var[i])\n axes[i].text(140, 0.5, text_str, fontsize=12)\n\n#plt.xticks(range(0,len(df_mean), 100), labels, rotation='vertical')\nplt.subplots_adjust(left=0.15, right=0.75)\n\nplt.show()\n\nplt.figure(2)\nplt.title(\"InterPUF variation\")\nplt.plot(inter_var, linestyle='', marker='o')\nplt.axhline(maen_inter_PUF, linestyle='dashed')\navg_description = \"average: {:.2f}%\".format(maen_inter_PUF)\nplt.text(0, maen_inter_PUF, avg_description)\nplt.xticks(range(len(inter_var)), inter_var_tics)\nplt.ylim(0, 100)\n\nplt.show()\n\n\n","repo_name":"Animki6/python-for-puf","sub_path":"compare_with_graph.py","file_name":"compare_with_graph.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73588263824","text":"from django.utils.timezone import now\nfrom django.db import models\nfrom django.contrib.postgres.fields.array import ArrayField\n\n\nclass Post(models.Model):\n author_id = models.PositiveIntegerField()\n title = models.CharField(max_length=256)\n content = models.TextField()\n created_at = models.DateTimeField(auto_now_add=True)\n users_liked_ids = ArrayField(\n base_field=models.PositiveIntegerField(),\n blank=True,\n default=list\n )\n\n def like(self, user_id):\n if user_id not in self.users_liked_ids:\n self.users_liked_ids.append(user_id)\n likes_by_day, created = LikesByDay.objects.get_or_create(date=now())\n likes_by_day.likes_count += 1\n likes_by_day.save()\n\n def unlike(self, user_id):\n if user_id in self.users_liked_ids:\n self.users_liked_ids.remove(user_id)\n likes_by_day, created = LikesByDay.objects.get_or_create(date=now())\n likes_by_day.likes_count -= 1\n likes_by_day.save()\n\n\nclass LikesByDay(models.Model):\n date = models.DateField()\n likes_count = models.IntegerField(default=0)\n","repo_name":"Ahoskie/test-task","sub_path":"social-network/backend/posts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24344038420","text":"\"\"\"\nDB UTIL\nAbstract Database operations to support various Flask forms and views (routes.py)\n\nFor use with FLASK and SQLalchemy\nDatabase schema / models - background.database\nDatabase session object instantiated as part of Flask app (see __init__.py)\n\nLast Update June 2019 by Jasmine Jones\n\n\"\"\"\n#from powertoken.models import User, Activity, Event\nfrom background.database import User, Activity, Event, Day\nfrom powertoken import db\nfrom sqlalchemy import exc\nfrom werkzeug.datastructures import MultiDict\nimport datetime\nimport logging, sys\nimport traceback\n\nlogging.basicConfig(stream=sys.stderr, level=logging.INFO)\n\ndef close_session():\n\tdb.session.close()\n\ndef pt_userExists(username, db=db):\n\t''' RETURNS TRUE | FALSE '''\n\tuser = db.session.query(User).filter_by(username=username).first()\n\t#user= User.query.filter_by(username=username).first()\n\tif user is None:\n\t\treturn False\n\telse:\n\t\treturn True\n\ndef pt_userProfileComplete(username, db=db):\n\t''' RETURNS TRUE | FALSE '''\n\tif pt_userExists(username, db):\n\t\tuser= db.session.query(User).filter_by(username=username).first()\n\t\tif any ([not user.wc_id, not user.wc_token, not user.fb_token]):\n\t\t\treturn False\n\t\telse:\n\t\t\treturn True\t\n\telse:\n\t\treturn False\n\ndef pt_addUser(username, db=db):\n\t'''\n\tGiven string @param username, adds a blank powertoken user with that name to the db \n\t'''\n\tif pt_userExists(username, db):\n\t\treturn\n\t\t\n\tuser= User(username=username)\n\tuser.metaphor = \"tally\"\n\tdb.session.add(user)\n\terrorMsg = None\n\t\n\t#error when same name is added\n\ttry:\n\t\tdb.session.commit()\n\texcept:\n\t\tdb.session.rollback()\n\t\tlogging.debug(\"User {} could not be added\".format(username))\t\n\n\t\ndef wc_addInfo(username, wc_id, wc_token, activities, db=db):\n\t'''\n\tAdd wc_user id, token and related activity info from WC API call to the database\n\t@params username String, wc_id integer, wc_token string, activities Dict with keys=id,name,expiration\n\t'''\n\n\t#update user information, regardless of new user or not. tokens always refresh upon login\t\n\tuser = db.session.query(User).filter_by(username=username).first()\n\tuser.wc_id = wc_id\n\tuser.wc_token = wc_token\n\t\n\t#add and update new activities\n\tfor act in activities:\n\t\t#check for existing ID\n\t\told_activity = db.session.query(Activity).filter_by(wc_act_id=act[\"id\"]).first()\n\t\tlogging.debug(\"to addInfo, retrieved {}\".format(old_activity))\n\t\t\n\t\tif old_activity is not None:\n\t\t\t#update existing activity (except key)\n\t\t\told_activity.expiration = act[\"name\"]\n\t\t\told_activity.expiration = act[\"expiration\"]\n\t\t\tuser_id= wc_id\n\t\telse:\n\t\t\t#create new activity if none with ID found\n\t\t\tnew_activity = Activity(wc_act_id=act[\"id\"], name=act[\"name\"], expiration=act[\"expiration\"], user_id=wc_id)\n\t\t\tdb.session.add(new_activity)\n\n\t#rollback session if commit fails\n\terrorMsg = None\n\ttry:\n\t\tdb.session.commit()\n\texcept exc.IntegrityError as error:\n\t\terrorMsg = \"Could not add user info to database\"\n\t\tdb.session.rollback()\n\t\tlogging.debug(sys.exc_info()[0])\n\tfinally:\n\t\treturn errorMsg\n\n\ndef wc_getUserActivities(username, db=db):\n\t'''\n\tGet a list of a user's activities from the database\n\tReturns a MultiDict\n\t'''\n\tuser = db.session.query(User).filter_by(username=username).first()\n\tif user is None:\n\t\treturn None\n\n\tactivities = []\t\t\n\tfor act in user.activities.all():\n\t\t# Don't show the user expired activities (but they still need to be\n\t\t# in the database).\n\t\tif act.expiration > datetime.datetime.now():\n\t\t\t# return a MultiDict data structure for use in flask forms\n\t\t\td = MultiDict([(\"wc_act_id\", act.wc_act_id), (\"act_name\", act.name),\n\t\t\t\t\t(\"weight\", act.weight)])\n\t\t\tactivities.append(d)\n\t\n\treturn activities\n\t\ndef wc_addActivityWeight(username, activity_list, db=db):\n\t'''\n\t@params username string, activity_list is list of tuples [(id, weight), ...]\n\t'''\n\tuser = db.session.query(User).filter_by(username=username).first()\n\tfor act in activity_list:\n\t\twc_act = user.activities.filter_by(wc_act_id=act[0]).first()\n\t\twc_act.weight = act[1]\n\n\tdb.session.commit()\n\t\ndef fb_addInfo(username, fb_token, db=db):\n\t'''\n\tSave fitbit token to user profile in the database\n\t@params powertoken username and fitbit token\t\n\t'''\n\tuser= db.session.query(User).filter_by(username=username).first()\n\tuser.fb_token = fb_token\n\t\n\terrorMsg = None\n\ttry:\n\t\tdb.session.commit()\n\texcept:\n\t\tdb.rollback()\n\t\terrorMsg = \"User with username already exists\"\n\n\treturn errorMsg\n\ndef viz_dataDict(username):\n\t'''\n\tReturns dict of data needed to generate vizualization\n\t'''\n\t# FORMAT\tjstr = { \"user\": \"PT002\", \"progress\": 0.25, \"activities\": [{\"start_time\": \"09:30:00\", \"weight\": 5, \"completed\": \"false\", \"name\": \"act1\"}, {\"start_time\": \"13:16:00\", \"weight\": 3, \"completed\": \"true\", \"name\": \"act2\"}, {\"start_time\": \"14:45:00\", \"weight\": 1, \"completed\": \"false\", \"name\": \"act3\"}, {\"start_time\": \"05:00:00\", \"weight\": 2, \"completed\": \"true\", \"name\": \"act4\"}]}\n\tjstr = { \"user\": \"test\", \"progress\": 0.25, \"activities\": [{\"start_time\": \"09:30:00\", \"weight\": 5, \"completed\": \"false\", \"name\": \"act1\"}, {\"start_time\": \"13:16:00\", \"weight\": 3, \"completed\": \"true\", \"name\": \"act2\"}, {\"start_time\": \"14:45:00\", \"weight\": 1, \"completed\": \"false\", \"name\": \"act3\"}, {\"start_time\": \"05:00:00\", \"weight\": 2, \"completed\": \"true\", \"name\": \"act4\"}]}\n\n\tuser = db.session.query(User).filter_by(username=username).first()\n\tif user is None:\n\t\tlogging.info(\"User {} not found. Returning test data\".format(username))\n\t\treturn jstr\n\t\t\n\tuser_name = user.username\n\t#from day get list of activities\n\tday = user.thisday() \n\tif day is None:\n\t\tlogging.info(\"User {} does not have this day. Returning test data\".format(username))\n\t\treturn jstr\n\t\t\n\tprogress = day.computed_progress\n\tactivities = [] #info: start_time, weight, name\n\tfor event in day.events.all():\n\t\ttime = event.start_time\n\t\tcompleted = event.completed\n\t\tname = event.activity.name\n\t\tweight = event.activity.weight\n\t\n\t\tactivity = {\"start_time\": time, \"weight\": weight, \"completed\": completed, \"name\": name}\n\t\tactivities.append(activity)\n\t\n\t# map charge/tally step count to 0-1 scale\n\tif progress > 1\t:\n\t\tprogress = map_steps_to_progress(progress)\n\t\tlogging.info(\"(db_util.viz_data) Steps to progress: {} to {}\".format(day.computed_progress, progress))\n\n\tdata = {\"user\":user_name, \"progress\":progress, \"activities\": activities}\n\treturn data\n\n\ndef map_steps_to_progress(step_count):\n\toldmin = 0\n\toldmax = 100000\n\tnewmin = 0\n\tnewmax = 1\n\t\n\toldrange = oldmax - oldmin\n\tnewrange = newmax - newmin\n\tnewvalue = (((step_count - oldmin) * newrange) / oldrange) + newmin\n\t\n\treturn newvalue\n\n\n\n\n\n\n\n\n\n\n\n\t","repo_name":"jazzij/powertoken","sub_path":"powertoken/db_util.py","file_name":"db_util.py","file_ext":"py","file_size_in_byte":6493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1591458966","text":"from gpiozero import LEDBoard\nfrom gpiozero.tools import random_values, sin_values, scaled\nfrom signal import pause\n\n\ndef random():\n for led in tree:\n led.source_delay = 0.1\n led.source = random_values()\n\n\ndef sin():\n for led in tree:\n led.source_delay = 0.1\n led.source = scaled(sin_values(), 0, 1, -1, 1)\n\n\nif __name__ == '__main__':\n tree = LEDBoard(*range(2, 28), pwm=True)\n random()\n # sin()\n pause()\n","repo_name":"andrewtatham/LightyPi","sub_path":"xmas/xmas_tree.py","file_name":"xmas_tree.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40327133412","text":"def makeItBig(list):\n for i in range(len(list)):\n if list[i] > 0:\n list[i] = \"big\"\n\ndef countPositives(list):\n count = 0\n for i in list:\n if i > 0:\n count += 1\n list[len(list)-1] = count\n\ndef sumTotal(list):\n sum = 0\n for i in list:\n sum += i\n return sum\n\ndef average(list):\n avg = 0.0\n for i in list:\n avg += i\n avg = avg / len(list)\n return avg\n\ndef length(list):\n return len(list)\n\ndef minimum(list):\n if len(list) == 0:\n return False\n min = list[0]\n for i in list:\n if i < min:\n min = i\n return min\n \ndef maximum(list):\n if len(list) == 0:\n return False\n max = list[0]\n for i in list:\n if i > max:\n max = i\n return max\n\ndef UltimateAnalyze(list):\n sum = 0\n min = list[0]\n max = list[0]\n length = len(list)\n\n for i in list:\n sum += i\n if i < min:\n min = i\n if i > max:\n max = i\n \n avg = float(sum) / float(length)\n return {\"sumTotal\": sum, \"average\": avg, \"minimum\": min, \"maximum\": max}\n\ndef reverseList(list):\n for i in range(int(len(list)/2)):\n end = len(list)-i-1\n tmp = list[end]\n list[end] = list[i]\n list[i] = tmp\n\n","repo_name":"jaytpeters/Dojo","sub_path":"python_stack/python_fundamentals/forLoopBasic2.py","file_name":"forLoopBasic2.py","file_ext":"py","file_size_in_byte":1286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"163749613","text":"from textwrap import dedent\nfrom typing import List\n\nimport openai\nfrom backend.approaches.approach import Approach\nfrom backend.cognition.openai_client import OpenAIClient\nfrom backend.cognition.openai_settings import ChatCompletionsSettings\nfrom backend.config import DefaultConfig\nfrom backend.contracts.chat_response import ApproachType\nfrom common.contracts.chat_session import DialogClassification\nfrom common.logging.log_helper import CustomLogger\n\n\nclass ApproachClassifier(Approach):\n def __init__(self, logger: CustomLogger):\n self.logger = logger\n\n def run(\n self, history: List[str], bot_config, openai_client: OpenAIClient\n ) -> ApproachType:\n\n message_list = [\n {\n \"role\": \"system\",\n \"content\": dedent(bot_config[\"approach_classifier\"][\"system_prompt\"]),\n }\n ]\n\n if bot_config[\"approach_classifier\"][\"history\"][\"include\"]:\n # TODO: SWE to add comment here explaining the logic\n for message in history[\n -((bot_config[\"approach_classifier\"][\"history\"][\"length\"] * 2) + 1) :\n ]:\n if message[\"participant_type\"] == \"user\":\n message_list.append(\n {\"role\": \"user\", \"content\": message[\"utterance\"]}\n )\n else:\n classification = \"\"\n if (\n message[\"question_type\"]\n == DialogClassification.structured_query.name\n ):\n classification = ApproachType.structured.value\n elif (\n message[\"question_type\"]\n == DialogClassification.unstructured_query.name\n ):\n classification = ApproachType.unstructured.value\n elif (\n message[\"question_type\"] == DialogClassification.chit_chat.name\n ):\n classification = ApproachType.chit_chat.value\n else:\n classification = ApproachType.unstructured.value\n message_list.append(\n {\"role\": \"assistant\", \"content\": classification}\n )\n else:\n message_list.append({\"role\": \"user\", \"content\": history[-1][\"utterance\"]})\n try:\n response = openai_client.chat_completions(\n messages=message_list,\n openai_settings=ChatCompletionsSettings(\n **bot_config[\"approach_classifier\"][\"openai_settings\"]\n ),\n api_base=f\"https://{DefaultConfig.AZURE_OPENAI_CLASSIFIER_SERVICE}.openai.azure.com\",\n api_key=DefaultConfig.AZURE_OPENAI_CLASSIFIER_API_KEY,\n )\n except openai.error.InvalidRequestError as e:\n self.logger.error(f\"OpenAI API Error: {e}\", exc_info=True)\n raise e\n\n classification_response: str = response[\"choices\"][0][\"message\"][\"content\"]\n self.log_aoai_response_details(\n f'Classification Prompt:{history[-1][\"utterance\"]}',\n f\"Response: {classification_response}\",\n response,\n )\n if classification_response == \"1\":\n return ApproachType.structured\n elif classification_response == \"2\":\n return ApproachType.unstructured\n elif classification_response == \"3\":\n return ApproachType.chit_chat\n elif classification_response == \"4\":\n # Continuation: Return last question type from history if it exists\n if len(history) > 1:\n last_question_type = history[-2][\"question_type\"]\n if last_question_type == DialogClassification.structured_query.value:\n return ApproachType.structured\n elif (\n last_question_type == DialogClassification.unstructured_query.value\n ):\n return ApproachType.unstructured\n elif last_question_type == DialogClassification.chit_chat.value:\n return ApproachType.chit_chat\n elif last_question_type == DialogClassification.inappropiate.value:\n return ApproachType.inappropriate\n else:\n raise Exception(f\"Unknown question type: {last_question_type}\")\n else:\n return ApproachType.unstructured\n elif classification_response == \"5\":\n # This is a special case where the user has typed something that violates guardrails because it contains illegal, harmful or malicious content\n return ApproachType.inappropriate\n else:\n return ApproachType.unstructured\n\n def log_aoai_response_details(self, prompt, result, aoai_response):\n addl_dimensions = {\n \"completion_tokens\": aoai_response.usage.get(\"completion_tokens\", 0),\n \"prompt_tokens\": aoai_response.usage.prompt_tokens,\n \"total_tokens\": aoai_response.usage.total_tokens,\n \"aoai_response[MS]\": aoai_response.response_ms,\n }\n addl_properties = self.logger.get_updated_properties(addl_dimensions)\n self.logger.info(f\"prompt: {prompt}, response: {result}\", extra=addl_properties)\n","repo_name":"Azure-Samples/openai","sub_path":"End_to_end_Solutions/AOAISearchDemo/app/backend/approaches/approach_classifier.py","file_name":"approach_classifier.py","file_ext":"py","file_size_in_byte":5382,"program_lang":"python","lang":"en","doc_type":"code","stars":644,"dataset":"github-code","pt":"48"} +{"seq_id":"6907773212","text":"from os import error\nfrom client import send\nimport glob\nimport json\nimport requests\nfrom typing import Dict, List\n\n\n# log_postのJSONの末尾に','がなかったときのやつ\ndef format_log_old_ver(log_file: str) -> List[dict]:\n with open(log_file, 'r') as f:\n read_strings = ''.join([s.strip() for s in f.readlines()])\n brackets = {'{': '}', '[': ']'}\n stack = []\n json_string = '['\n for string in read_strings:\n if string in brackets.keys():\n stack.append(brackets[string])\n if string in brackets.values():\n stack.pop(-1)\n if not stack:\n json_string += string\n json_string += ','\n continue\n json_string += string\n json_string = json_string[:-1] + ']'\n return json.loads(json_string)\n\n\ndef send_log(url: str, data_list: List[dict]):\n for data in data_list:\n res = requests.post(url, json=data)\n try:\n print(res, res.json())\n except Exception:\n print(res)\n\n\nif __name__ == '__main__':\n url = 'http://127.0.0.1:5000/car'\n log_files = glob.glob('./log_post/*')\n for log_file in log_files:\n send_log(url, data_list=format_log_old_ver(log_file))\n","repo_name":"cfdtirej/radicon-run-server","sub_path":"log_send.py","file_name":"log_send.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24221317990","text":"import torch\nfrom torch.utils.data import Dataset\nimport numpy as np\nimport cv2\n\nCOLORS = ['#F44336',\"#E91E63\",'#9C27B0','#673AB7','#3F51B5','#2196F3','#03A9F4','#00BCD4','#4CAF50',\n '#8BC34A','#CDDC39','#FFEB3B','#FFC107','#FF9800','#FF5722']\n\n\nR_MIN = 12\nR_MAX = 36\nline_width_min = 2\nline_width_max = 4\nbackground_intensity = 30.0 / 255.0\n\ndef hex2rgb(h):\n h = h.lstrip('#')\n return tuple(int(h[i:i+2], 16) for i in (0, 2 ,4))\n\ndef DrawRandomCircle(img,segments,r_min,r_max,alpha):\n color = hex2rgb( np.random.choice(COLORS) )\n t = np.random.random()\n r = int(t * r_min + (1-t) * r_max)\n ti = np.random.random()\n tj = np.random.random()\n i = int(ti*img.shape[0])\n j = int(tj*img.shape[1])\n canvas = img.copy()\n cv2.circle(canvas,(i,j), r, color, -1)\n cv2.circle(segments,(i,j), r, (1,0,0), -1)\n img = cv2.addWeighted(img, 1.0 - alpha, canvas, alpha, 0, img )\n box = [i-r,j-r,2*r,2*r]\n return img,segments,box\n\ndef DrawRandomSquare(img,segments,r_min,r_max,alpha):\n color = hex2rgb( np.random.choice(COLORS) )\n t = np.random.random()\n r = int(t * r_min + (1-t) * r_max)\n i = int(np.random.random()*img.shape[0])\n j = int(np.random.random()*img.shape[1])\n theta = np.pi * np.random.random()\n ri = r*np.cos(theta)\n rj = r*np.sin(theta) \n pts = [(ri,rj),(-rj,ri),(-ri,-rj),(rj,-ri) ]\n pts = [(i+y,j+x) for (y,x) in pts]\n pts = np.array(pts, np.int32)\n pts = pts.reshape((-1,1,2))\n canvas = img.copy()\n cv2.fillPoly(canvas,[pts],color)\n cv2.fillPoly(segments,[pts],(0,1,0))\n img = cv2.addWeighted(img, 1.0 - alpha, canvas, alpha, 0, img )\n box = [min(pts[:,:,0])[0],min(pts[:,:,1])[0], max(pts[:,:,0])[0]-min(pts[:,:,0])[0], max(pts[:,:,1])[0] - min(pts[:,:,1])[0] ]\n return img,segments,box\n\ndef DrawRandomLine(img,segments,line_width_min,line_width_max,alpha):\n color = hex2rgb( np.random.choice(COLORS) )\n t = np.random.random()\n line_width = int(t * line_width_min + (1-t) * line_width_max)\n i1 = int(np.random.random()*img.shape[0])\n j1 = int(np.random.random()*img.shape[1])\n i2 = int(np.random.random()*img.shape[0])\n j2 = int(np.random.random()*img.shape[1])\n canvas = img.copy()\n cv2.line(canvas,(i1,j1),(i2,j2),color,line_width)\n cv2.line(segments,(i1,j1),(i2,j2),(0,0,1),line_width)\n img = cv2.addWeighted(img, 1.0 - alpha, canvas, alpha, 0, img )\n pts = np.asarray([(i1,j1),(i2,j2)])\n box = [min(pts[:,0]),min(pts[:,1]), max(pts[:,0])-min(pts[:,0]), max(pts[:,1]) - min(pts[:,1]) ]\n return img,segments,box\n\ndef generateSegmentation(canvas_size, n_max, alpha = 0.5, noise_types=[], r_min=R_MIN,r_max=R_MAX):\n background_intensity = np.clip( np.random.normal(80.0 / 255.0, 40.0 / 255.0) , 0,1)\n canvas = background_intensity * np.ones((canvas_size,canvas_size,3))\n segments = np.zeros((canvas_size,canvas_size,3))\n boxes = []\n labels = []\n for _ in range(np.random.choice(range(n_max))):\n canvas,segments,b = DrawRandomCircle(canvas,segments,r_min,r_max,alpha)\n boxes += [b]\n labels += [1]\n for _ in range(np.random.choice(range(n_max))):\n canvas,segments,b = DrawRandomSquare(canvas,segments,r_min,r_max,alpha)\n boxes += [b]\n labels += [2]\n for _ in range(np.random.choice(range(n_max))):\n canvas,segments,b = DrawRandomLine(canvas,segments,line_width_min,line_width_max,alpha)\n boxes += [b]\n labels += [3]\n for t in noise_types:\n canvas = noisy(t,canvas)\n return canvas,segments, labels, boxes\n\ndef generateClassification(canvas_size, alpha = 0.5, noise_types=[]):\n canvas = background_intensity * np.ones((canvas_size,canvas_size,3))\n segments = np.zeros((canvas_size,canvas_size,3))\n label = np.random.choice(3)\n if label ==0:\n canvas,_,box = DrawRandomCircle(canvas,segments,r_min,r_max,alpha)\n elif label == 1:\n canvas,_,box = DrawRandomSquare(canvas,segments,r_min,r_max,alpha)\n elif label == 2:\n canvas,_,box = DrawRandomLine(canvas,segments,line_width_min,line_width_max,alpha)\n for t in noise_types:\n canvas = noisy(t,canvas)\n label += 1\n return canvas,label,box\n\ndef generateDetection(canvas_size, n_max, alpha = 0.5, noise_types=[]):\n canvas = background_intensity * np.ones((canvas_size,canvas_size,3))\n segments = np.zeros((canvas_size,canvas_size,3))\n boxes = []\n labels = []\n for _ in range(max(1,np.random.choice(range(n_max)))):\n canvas,segments,b = DrawRandomCircle(canvas,segments,r_min,r_max,alpha)\n boxes += [b]\n labels += [1]\n for _ in range(max(1,np.random.choice(range(n_max)))):\n canvas,segments,b = DrawRandomSquare(canvas,segments,r_min,r_max,alpha)\n boxes += [b]\n labels += [2]\n for _ in range(max(1,np.random.choice(range(n_max)))):\n canvas,segments,b = DrawRandomLine(canvas,segments,line_width_min,line_width_max,alpha)\n boxes += [b]\n labels += [3]\n for t in noise_types:\n canvas = noisy(t,canvas)\n return canvas,segments, labels, boxes\n\ndef stackSegments(segments):\n canvas = np.zeros((segments.shape[:2]))\n canvas += 1 * segments[:,:,0]\n canvas += 2 * segments[:,:,1]\n canvas += 3 * segments[:,:,2]\n return canvas\n\nclass SimpleSegmentationDataset(Dataset):\n \"\"\"A simple dataset for image segmentation purpose\"\"\"\n def __init__(self, patch_size, n_max, alpha =1.0,virtual_size=1000, stack=True, r_min=R_MIN, r_max=R_MAX):\n self.r_min = r_min\n self.r_max = r_max\n self.virtual_size = virtual_size\n self.patch_size = patch_size\n self.n_max = n_max\n self.alpha = alpha\n self.stack = stack\n\n def __len__(self):\n return self.virtual_size\n\n def __getitem__(self, idx):\n x,y,_,_ = generateSegmentation(self.patch_size, self.n_max, self.alpha,r_min=self.r_min,r_max=self.r_max)\n # none leayer for the segmentation\n none_layer = (1 - (y.sum(2) > 0 )).astype(np.uint8)[:,:,np.newaxis]\n y = np.concatenate([none_layer, y], axis = 2)\n # Torch format\n x = x.transpose([2,0,1])\n y = y.transpose([2,0,1])\n if self.stack:\n y = stackSegments(y)\n y = y[np.newaxis,:,:]\n x = torch.from_numpy(x).float()\n y = torch.from_numpy(y).float()\n return x,y\n \nclass SimpleClassificationDataset(Dataset):\n \"\"\"\n A simple dataset for image classification purpose\"\"\"\n def __init__(self, patch_size, alpha =1.0,virtual_size=1000):\n self.virtual_size = virtual_size\n self.patch_size = patch_size\n self.alpha = alpha\n \n def __len__(self):\n return self.virtual_size\n\n def __getitem__(self, idx):\n x,y,box = generateClassification(self.patch_size, self.alpha)\n x = x.transpose([2,0,1])\n x = torch.from_numpy(x).float()\n #y = torch.from_numpy(y).long()\n return x,y\n \nclass SimpleDetectionDataset(Dataset):\n \"\"\"\n Work in Progess\n \n A simple dataset for image classification purpose\"\"\"\n def __init__(self, patch_size, n_max, alpha =1.0,virtual_size=1000):\n self.virtual_size = virtual_size\n self.patch_size = patch_size\n self.alpha = alpha\n self.n_max = n_max\n \n def __len__(self):\n return self.virtual_size\n\n def __getitem__(self, idx):\n x,y,box = generateClassification(self.patch_size, self.alpha)\n x = x.transpose([2,0,1])\n x = torch.from_numpy(x).float()\n #y = torch.from_numpy(y).long()\n box = torch.from_numpy(np.asarray(box)).float()\n return x,y,box\n\ndef drawBox(img,box, text=None):\n img = img.astype(np.uint8).copy() \n cv2.rectangle(img,(box[0],box[1]),(box[2],box[3]),(255, 235, 59),2)\n if text:\n font = cv2.FONT_HERSHEY_SIMPLEX\n img = cv2.putText(img,text,(int(box[0]),int(box[1]-5)), font, 0.5,(255, 235, 59),2,cv2.LINE_AA)\n return img\n\n","repo_name":"vlievin/ToyDatasets","sub_path":"shapeSorter.py","file_name":"shapeSorter.py","file_ext":"py","file_size_in_byte":7948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8689107435","text":"# cook your dish here\ndef find(node):\n res = node\n while res != par[res]:\n par[res] = par[par[res]]\n res = par[res]\n return res\n\n\ndef union(n1, n2):\n p1, p2 = find(n1), find(n2)\n if p1 == p2:\n return 0\n if rank[p2] > rank[p1]:\n par[p1] = p2\n rank[p2] += rank[p1]\n else:\n par[p2] = p1\n rank[p1] += rank[p2]\n return 1\n\nt = int(input())\nfor _ in range(t):\n n, m = list(map(int,input().split()))\n par = [i for i in range(n)]\n rank = [1 for _ in range(n)]\n for _ in range(m):\n a, b = list(map(int,input().split()))\n union(a, b)\n q = int(input())\n for _ in range(q):\n x, y = list(map(int,input().split()))\n if find(x) == find(y):\n print('YO')\n else:\n print('NO')","repo_name":"dhruv-gautam16/Code_Chef-Contest-","sub_path":"HDELIVER.py","file_name":"HDELIVER.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"20641982473","text":"from Track import *\nfrom BoundingBox import *\nimport math\nimport cv2\nimport numpy as np\nimport random\nimport datetime, os\n\n\ndef merge_tracks(track_array, unresolved_points, frame_difference, frameCount, radius=50):\n \"\"\"\n Hlavna funkcia ktora sluzi na mergovanie tras. Na zaciatku sa prerobia trasy z maticneho tvaru na objekty.\n Nasledne sa najdu kandidati na spojenie podla parametra difference a radius. Potom sa trasy spoja na zaklade\n tokovej matice alebo vypoctu mean squared error.\n :param track_array: pole tras vo formate matice bodov kde kazdy bod je definovany [x, y, zaradeny, frame]\n :param unresolved_points: pole nezaradenych bodov\n :param frame_difference: maximalny pocet bynechanych framov\n :param radius: maximalne okolie ktore sa prehladava\n :return: pole pospajanych tras vo formate matice\n \"\"\"\n sum0 = 0\n for tr in track_array:\n sum0 += len(tr)\n\n tracks = create_tracks(track_array)\n print_info(tracks, frameCount)\n before = []\n for track in tracks:\n before.append(Track(None, track))\n unresolved_points = create_bounding_boxes(unresolved_points)\n # loop all tracks\n array_for_join = []\n array_for_join_points = []\n\n for track in tracks:\n # for all 1 to frame difference add point and check tracks for merge\n tracks_in_radius = []\n points_in_radius = []\n\n # check tracks for merge\n for track_for_merge in tracks:\n if track != track_for_merge:\n # compare last point from first track with first point in second track\n track2_first_bb = track_for_merge.bounding_boxes[0]\n track1_last_bb = track.bounding_boxes[-1]\n if is_in_radius(track1_last_bb, track2_first_bb, radius) \\\n and track1_last_bb.frame_index < track2_first_bb.frame_index <= track1_last_bb.frame_index + frame_difference:\n tracks_in_radius.append(Track(None, track_for_merge))\n\n # add array of all tracks which can be joined to track\n array_for_join.append(tracks_in_radius)\n # add array of all points which can be joined to track\n array_for_join_points.append(points_in_radius)\n\n merged = join_tracks_min_error(tracks, array_for_join, True, False)\n print_info(merged, frameCount)\n\n sum1 = 0\n sum2 = 0\n for tr in before:\n sum1 += len(tr.bounding_boxes)\n for tr in merged:\n sum2 += len(tr.bounding_boxes)\n\n print('sum0=' + str(sum0) + 'sum1=' + str(sum1) + 'sum2=' + str(sum2))\n merged = track_object_to_matrix(merged)\n return merged\n\n\ndef merge_tracks_flow_matrix(track_array, flow_matrix, frame_difference, radius=50):\n \"\"\"\n Spojenie tras na zaklade tokovej matice. Na zaciatku sa najdu kandidati na spojenie podla parametra difference.\n Nasledne podla tokovej matice sa vypocita cesta z bodu [X1,Y1] na frame Z1 do bodu [X2,Y2] na frame Z2 tak, ze ku bodu X1,Y1 sa pripocita hodnota vektoru na tomto bode. Operacia sa opakuje\n (Z2 - Z1) krat.\n :param track_array: pole tras vo formate matice bodov kde kazdy bod je definovany [x, y, zaradeny, frame]\n :param flow_matrix: tokova matica\n :param frame_difference: maximalny rozdiel framov\n :param radius: maximalny radius\n :return: maticu spojenych tras\n \"\"\"\n sum0 = 0\n for tr in track_array:\n sum0 += len(tr)\n tracks = create_tracks(track_array)\n\n print_info(tracks)\n before = []\n for track in tracks:\n before.append(Track(None, track))\n # loop all tracks\n array_for_join = []\n\n for track in tracks:\n tracks_in_frame_range = []\n # check tracks for merge\n for track_for_merge in tracks:\n if track != track_for_merge:\n # compare last point from first track with first point in second track\n track2_first_bb = track_for_merge.bounding_boxes[0].frame_index\n track1_last_bb = track.bounding_boxes[-1].frame_index\n if track1_last_bb < track2_first_bb <= track1_last_bb + frame_difference:\n tracks_in_frame_range.append(track_for_merge)\n # add array of all tracks which can be joined to track\n\n array_for_join.append(tracks_in_frame_range)\n\n new_tracks = join_tracks_flow_matrix(tracks, array_for_join, flow_matrix, radius)\n print_info(new_tracks)\n\n print('done')\n sum1 = 0\n sum2 = 0\n for tr in before:\n sum1 += len(tr.bounding_boxes)\n for tr in new_tracks:\n sum2 += len(tr.bounding_boxes)\n\n print('sum0=' + str(sum0) + 'sum1=' + str(sum1)+ 'sum2=' + str(sum2))\n new_tracks = track_object_to_matrix(new_tracks)\n return new_tracks\n\n\ndef join_tracks_flow_matrix(tracks, tracks_for_merge, flow_matrix, max_radius):\n \"\"\"\n Funkcia spaja trasy podla tokovej matice. Pre kazdu trasu a jej kandidata vypocita vzdialenost podla tokovej matice.\n Ak je vzdialenost mensia ako paramater radius, prida sa do pola na spajanie spolu s informaciou o vzdialenosti.\n Nasledne sa toto pole utriedi podla vzdialenosti od najmensej a spoja sa trasy. Trasy ktore sa spoja su z pola zmazane\n podla nasledovnych pravidiel. Nech A je povodna trasa a B je jeho kandidat na spojenie.\n 1. Zmazat vsetky prvky pola kde kandidat na spojenie je trasa B.\n 2. Zmazat vsetky prvky pola kde povodna trasa je trasa A.\n 3. Najst vsetky prvky pola kde kandidat na spojenie je trasa A a nahradit ho novou trasou AB.\n :param tracks: pole tras\n :param tracks_for_merge: pole kandidatov pre kazdu trasu z pola tracks\n :param flow_matrix: tokova matica\n :param max_radius: maximalna vzdialenost\n :return:\n \"\"\"\n x_max = len(flow_matrix)\n y_max = len(flow_matrix[0])\n print('x max=' + str(x_max))\n print('y max=' + str(y_max))\n\n merge_tracks_array = []\n for track_index in range(len(tracks)):\n # cislo framu poslednej bunky v trase1\n frame = tracks[track_index].bounding_boxes[-1].frame_index\n # pre kazdeho kandidata vypocitat vzdialenost podla tokovej matice\n for merge_index in range(len(tracks_for_merge[track_index])):\n x = tracks[track_index].bounding_boxes[-1].x\n y = tracks[track_index].bounding_boxes[-1].y\n # prva bunka v trase ktora je kandidat na spojenie ku trase1\n second_track_frame_index = tracks_for_merge[track_index][merge_index].bounding_boxes[0].frame_index\n frame_diff = second_track_frame_index - frame\n # vypocitat na akej pozicii sa bude nachadzat bod podla tokovej matice\n for index in range(frame_diff):\n # pripocitat hodnotu podla tokovej matice\n x_int = int(x)\n y_int = int(y)\n if x_int < x_max and y_int < y_max:\n x_temp = flow_matrix[x_int][y_int][1][0]\n y_temp = flow_matrix[x_int][y_int][1][1]\n angle_radiant = get_vector_angle(x_temp, y_temp)\n x, y = get_position(x, y, angle_radiant, tracks[track_index].speed)\n #print('x=' + str(x_int) + ' y=' + str(y_int))\n #x += flow_matrix[x_int][y_int][1][0]\n #y += flow_matrix[x_int][y_int][1][1]\n first_x = tracks_for_merge[track_index][merge_index].bounding_boxes[0].x\n first_y = tracks_for_merge[track_index][merge_index].bounding_boxes[0].y\n distance = get_distance_array([first_x, first_y], [x, y])\n if distance < max_radius:\n new_merge_track = MergeTracks(Track(None, tracks[track_index]), Track(None, tracks_for_merge[track_index][merge_index]))\n new_merge_track.distance = distance\n merge_tracks_array.append(new_merge_track)\n\n merge_tracks_array.sort(key=lambda merge_track: merge_track.distance)\n final_array = []\n # postupne pospajat trasy\n while len(merge_tracks_array) > 0:\n first = merge_tracks_array[0].first_track\n first_copy = Track(None, first)\n second = merge_tracks_array[0].second_track\n #todo del\n if first in tracks:\n tracks.remove(first)\n if second in tracks:\n tracks.remove(second)\n first.merge_tracks(second)\n # pridat spojenu trasu do vysledneho pola\n final_array.append(first)\n # zmazat spojenu trasu\n del merge_tracks_array[0]\n copy = merge_tracks_array.copy()\n # pole indexov na zmazanie\n index_for_del = []\n for index in range(len(copy)):\n if copy[index].first_track == first_copy:\n index_for_del.append(index)\n if copy[index].first_track == second:\n merge_tracks_array[index].first_track = first\n if copy[index].second_track == first_copy:\n merge_tracks_array[index].second_track = first\n if copy[index].second_track == second:\n index_for_del.append(index)\n if copy[index].first_track == first:\n index_for_del.append(index)\n if copy[index].second_track == first:\n index_for_del.append(index)\n for ind in reversed(index_for_del):\n del merge_tracks_array[ind]\n\n no_duplicate_array = []\n\n for track in final_array:\n if track not in no_duplicate_array:\n no_duplicate_array.append(track)\n for track in tracks:\n if track not in no_duplicate_array:\n no_duplicate_array.append(track)\n # print_track2(no_duplicate_array, no_duplicate_array)\n return no_duplicate_array\n\n\ndef join_tracks_min_error(tracks, tracks_for_merge, method1=True, method2=False):\n \"\"\"\n Spoji trasy na zaklade najmensej chyby spojenia. Chyba spojenia sa pocita 3 funkciami.\n :param tracks: pole tras\n :param tracks_for_merge: pole kandidatov pre kazdu trasu\n :param method1: ak je True pouzije sa funkcia mean squared error z N poslednych bodov\n :param method2: ak je True pouzije sa funkcia mean squared error alfa n\n :return: spojene trasy\n \"\"\"\n merge_tracks_array = []\n # create list with merge tracks\n for track_index in range(len(tracks)):\n for merge_index in range(len(tracks_for_merge[track_index])):\n merge = MergeTracks(Track(None, tracks[track_index]), Track(None, tracks_for_merge[track_index][merge_index]))\n if method1:\n merge.mean_squared_error_n_last(8)\n elif method2:\n merge.mean_squared_error_alfa_n()\n merge_tracks_array.append(merge)\n\n # zoradit pole podla chyby spojenia\n merge_tracks_array.sort(key=lambda merge_track : merge_track.sum)\n string = ''\n for tr in merge_tracks_array:\n string += str(tr.sum) + ','\n print(string)\n\n final_array = []\n while len(merge_tracks_array) > 0:\n first = merge_tracks_array[0].first_track\n first_copy = Track(None, first)\n second = merge_tracks_array[0].second_track\n if first in tracks:\n tracks.remove(first)\n if second in tracks:\n tracks.remove(second)\n first.merge_tracks(second)\n final_array.append(first)\n del merge_tracks_array[0]\n copy = merge_tracks_array.copy()\n index_for_del = []\n for index in range(len(copy)):\n if copy[index].first_track == first_copy:\n index_for_del.append(index)\n if copy[index].first_track == second:\n merge_tracks_array[index].first_track = first\n if copy[index].second_track == first_copy:\n merge_tracks_array[index].second_track = first\n if copy[index].second_track == second:\n index_for_del.append(index)\n if copy[index].first_track == first:\n index_for_del.append(index)\n if copy[index].second_track == first:\n index_for_del.append(index)\n for ind in reversed(index_for_del):\n del merge_tracks_array[ind]\n\n no_duplicate_array = []\n\n for track in final_array:\n if track not in no_duplicate_array:\n no_duplicate_array.append(track)\n for track in tracks:\n if track not in no_duplicate_array:\n no_duplicate_array.append(track)\n\n return no_duplicate_array\n\n\ndef create_tracks(track_array):\n \"\"\"\n Prerobi maticu tras na maticu objektov Track.\n :param track_array: pole tras\n :return: pole objektov\n \"\"\"\n tracks = []\n track_id = 0\n for track in track_array:\n t = Track(track)\n t.id = track_id\n t.compute_speed()\n track_id += 1\n tracks.append(t)\n return tracks\n\n\ndef create_bounding_boxes(bb_array):\n \"\"\"\n Prerobi maticu nezaradenych bodov na maticu objektov BoundingBox.\n :param bb_array: pole tras\n :return: pole objektov\n \"\"\"\n if bb_array is None:\n return None\n bounding_boxes = []\n for bb in bb_array:\n bounding_boxes.append(TrackBoundingBox(bb[0], bb[1], bb[2], bb[3], 0, 0))\n return bounding_boxes\n\n\ndef is_in_radius(bb1, bb2, radius):\n return get_distance_object(bb1, bb2) <= radius\n\n\ndef get_distance_object(bb1, bb2):\n \"\"\"\n Vrati vzdialenost medzi dvoma objektami BoundingBox.\n :param bb1: bounding box a\n :param bb2: bounding box b\n :return: vzdialenost\n \"\"\"\n distance = math.sqrt((bb1.x - bb2.x)**2 + (bb1.y - bb2.y)**2)\n return distance\n\n\ndef get_distance_array(a, b):\n \"\"\"\n Vrati vzdialenost medzi dvoma bodmi, kde bod je pole [X, Y]\n :param a: bod a\n :param b: bod b\n :return: vzdialenost\n \"\"\"\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)\n\n\ndef print_track2(tracks,old):\n # Create a black image\n img = np.zeros((720, 1280, 3), np.uint8)\n img2 = np.zeros((720, 1280, 3), np.uint8)\n print('len old=' + str(len(old)))\n print('len new=' + str(len(tracks)))\n for track_index in range(len(tracks)):\n color = (random.randint(0,255), random.randint(0,255), random.randint(0,255))\n for index in range(len(tracks[track_index].bounding_boxes) - 1):\n x = tracks[track_index].bounding_boxes[index].x\n y = tracks[track_index].bounding_boxes[index].y\n next_x = tracks[track_index].bounding_boxes[index + 1].x\n next_y = tracks[track_index].bounding_boxes[index + 1].y\n cv2.line(img, (x, y), (next_x, next_y), color)\n for track_index in range(len(old)):\n color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n for index in range(len(old[track_index].bounding_boxes) - 1):\n x = old[track_index].bounding_boxes[index].x\n y = old[track_index].bounding_boxes[index].y\n next_x = old[track_index].bounding_boxes[index + 1].x\n next_y = old[track_index].bounding_boxes[index + 1].y\n cv2.line(img2, (x, y), (next_x, next_y), color)\n\n # If q is pressed then exit program\n cv2.imwrite(\"after2.png\",img)\n cv2.imshow(\"PO\", img)\n cv2.imshow(\"PRED\", img2)\n k = cv2.waitKey(0)\n if k == ord('q'):\n cv2.destroyAllWindows()\n\n\ndef print_track(tracks, merging, merge_point, merged, before, method, seed,file, num = -1):\n # Create a black image\n img = np.zeros((720, 1280, 3), np.uint8)\n img2 = np.zeros((720, 1280, 3), np.uint8)\n img3 = np.zeros((720, 1280, 3), np.uint8)\n img4 = np.zeros((720, 1280, 3), np.uint8)\n for track_index in range(len(tracks)):\n color = (random.randint(0,255), random.randint(0,255), random.randint(0,255))\n next_x = -1\n next_y = -1\n for index in range(len(tracks[track_index].bounding_boxes) - 1):\n x = tracks[track_index].bounding_boxes[index].x\n y = tracks[track_index].bounding_boxes[index].y\n next_x = tracks[track_index].bounding_boxes[index + 1].x\n next_y = tracks[track_index].bounding_boxes[index + 1].y\n cv2.line(img, (x, y), (next_x, next_y), color)\n '''for merge in merging[track_index]:\n if next_y == 1 or next_x == -1:\n break\n first_bb = merge.bounding_boxes[0]\n merge_x = first_bb.x\n merge_y = first_bb.y\n cv2.line(img, (next_x, next_y), (merge_x, merge_y), (0,0,255))\n for point in merge_point[track_index]:\n #cv2.line(img, (point.x, point.y), (point.x, point.y), (0, 255, 0))\n img[point.y][point.x] = (0, 255, 0)'''\n\n for track_index in range(len(before)):\n color = (random.randint(0, 255), random.randint(0,255), random.randint(0,255))\n color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n x = before[track_index].bounding_boxes[0].x\n y = before[track_index].bounding_boxes[0].y\n #cv2.putText(img3, \"track id:\" + str(track_index), (x, y - 5), cv2.FONT_ITALIC, 0.35, (0, 0, 255))\n for index in range(len(before[track_index].bounding_boxes) - 1):\n x = before[track_index].bounding_boxes[index].x\n y = before[track_index].bounding_boxes[index].y\n next_x = before[track_index].bounding_boxes[index + 1].x\n next_y = before[track_index].bounding_boxes[index + 1].y\n cv2.line(img3, (x, y), (next_x, next_y), color)\n\n for track_index in range(len(merged)):\n for index in range(len(merged[track_index].bounding_boxes) - 1):\n x = merged[track_index].bounding_boxes[index].x\n y = merged[track_index].bounding_boxes[index].y\n next_x = merged[track_index].bounding_boxes[index + 1].x\n next_y = merged[track_index].bounding_boxes[index + 1].y\n cv2.line(img2, (x, y), (next_x, next_y), color)\n\n if num != -1:\n for index in range(len(tracks[num].bounding_boxes) - 1):\n x = tracks[num].bounding_boxes[index].x\n y = tracks[num].bounding_boxes[index].y\n next_x = tracks[num].bounding_boxes[index + 1].x\n next_y = tracks[num].bounding_boxes[index + 1].y\n cv2.line(img4, (x, y), (next_x, next_y), (255, 0, 0))\n #cv2.imshow(\"1 track\", img4)\n date = str(datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M\"))\n dir_name = \"test\\\\\" + date + '_seed_'+str(seed)\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n #cv2.imshow(\"Pred\", img3)\n cv2.imwrite(dir_name + os.sep + file + \"_\" + method + \"_file_pred.jpg\", img3)\n #cv2.imshow(\"join\", img)\n #cv2.imshow(\"Po\", img2)\n cv2.imwrite(dir_name + os.sep + file + \"_\" + method + \"_file_po.jpg\", img2)\n # If q is pressed then exit program\n #k = cv2.waitKey(0)\n #if k == ord('q'):\n # cv2.destroyAllWindows()\n\n\ndef get_vector_angle(x, y):\n \"\"\"\n Vypocita uhol vectoru.\n :param x: bod x\n :param y: bod y\n :return: uhol\n \"\"\"\n vect = math.atan2(y, x)\n return vect\n\n\ndef get_position(x, y, angle, speed):\n \"\"\"\n Vypocita poziciu bodu podla uhlu a rychlosti.\n X=distance*cos(angle) + x0\n Y=distance*sin(angle) + y0\n :param x: povodna x-ova pozicia\n :param y: povodna y-ova pozicia\n :param angle: uhol\n :param speed: rychlost\n :return: novu poziciu [x, y]\n \"\"\"\n x_new = x + math.cos(angle)*speed\n y_new = y + math.cos(angle)*speed\n return x_new, y_new\n\n\ndef print_info(tracks, frameCount):\n #snimok\n #pocet tras\n #priemerna dlzka trasy\n print('Snimok=' + str(frameCount))\n pocet_tras = len(tracks)\n print('Pocet tras=' + str(pocet_tras))\n sum_len = 0\n if type(tracks) is dict:\n for key, track in tracks.items():\n sum_len += len(track.boundingBoxes)\n else:\n for track in tracks:\n sum_len += len(track.bounding_boxes)\n avg = sum_len / pocet_tras\n print('Avarage track=' + str(avg))\n\ndef angle(v1):\n # tangens alfa = protilahla / prilahla = y / x\n print(v1)\n if v1[0] == 0 or v1[1] == 0:\n return 0\n tan_v1_alfa = abs(v1[1] / v1[0])\n #tan_v2_alfa = abs(v2[1] / v2[0])\n\n v1_alfa = math.atan(tan_v1_alfa)\n #v2_alfa = math.atan(tan_v2_alfa)\n return v1_alfa\n\n\ndef get_longest_track(tracks):\n # TODO pomocna funkcia na testovanie\n new_array = []\n\n for track_index in range(len(tracks)):\n max = -1\n max_index = -1\n t = -1\n for index in range(len(tracks[track_index].bounding_boxes) - 1):\n track1 = tracks[track_index].bounding_boxes[index]\n track2 = tracks[track_index].bounding_boxes[index + 1]\n distance = get_distance_object(track1, track2)\n if distance > max:\n max = distance\n max_index = index\n t = track_index\n if t != -1:\n new_array.append([max, tracks[t]])\n '''if t != -1:\n print('max=' + str(max))\n print('max index=' + str(max_index))\n print('track=' + str(tracks[t].id))\n print('track=' + str(tracks[t].bounding_boxes[max_index]) + ' -> ' + str(tracks[t].bounding_boxes[max_index + 1]))'''\n new_array.sort(key=lambda track: track[0])\n #return [tracks[t]]\n length = len(new_array)\n print(new_array[length - 1][1].id)\n print(new_array[length - 2][1].id)\n print(new_array[length - 3][1].id)\n print(new_array[length - 4][1].id)\n return [new_array[length - 1][1],new_array[length - 2][1],new_array[length - 3][1],new_array[length - 4][1],new_array[length - 5][1],new_array[length - 6][1],new_array[length - 7][1]]\n\n\ndef track_object_to_matrix(tracks):\n \"\"\"\n Funkcia prerobi pole objektov Track na pole trackov.\n :param tracks: pole objektov Track\n :return: pole tras\n \"\"\"\n new_tracks = []\n for track in tracks:\n new_track = []\n for bb in track.bounding_boxes:\n new_bb = [bb.x,bb.y,bb.in_track,bb.frame_index,bb.width,bb.height]\n new_track.append(new_bb)\n new_tracks.append(new_track)\n\n return new_tracks\n\n\ndef compare_tracks(file_ground_truth, flow_matrix):\n import XMLParser\n tracks_gt, mat, src_name = XMLParser.parseXMLData(file_ground_truth)\n result = merge_tracks_flow_matrix(tracks_gt, flow_matrix, 5)\n print(result)\n '''tracks_gt = create_tracks(tracks_gt)\n for t in tracks_gt:\n for bb in t.bounding_boxes:\n if bb.x == 858 and bb.y == 200:\n print('found')\n print('daco')'''\n\n","repo_name":"killerwife/ImageCytometry","sub_path":"src/Python/Tracking/Tracking.py","file_name":"Tracking.py","file_ext":"py","file_size_in_byte":22339,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"4187483892","text":"from dateutil import parser\nfrom jazkarta.shop import storage\n\n\ndef get_order_from_id(id):\n \"\"\" An order_id is a string consisting of the userid and the\n order date in iso format, joined by the pipe character. \"\"\"\n userid, datestr = id.split('|')\n date = parser.parse(datestr)\n if userid == '_orders_':\n data = storage.get_shop_data(['orders', date])\n else:\n data = storage.get_shop_data([userid, 'orders', date])\n return data\n","repo_name":"jazkarta/jazkarta.shop","sub_path":"jazkarta/shop/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"8375509925","text":"\"\"\"Return the top n items in an array, in descending order.\nArgs:\n items (array): list or array-like object containing numerical values.\n n (int): number of top items to return.\n\n Returns:\n array: top n items, in descending order.\n\n Examples:\n >>> top_n([8, 3, 2, 7, 4], 3)\n [8, 7, 3]\n \"\"\"\ndef sum_array(array):\n \n '''Return sum of all items in array'''\n if len(array)== 1:\n return array[0]\n else:\n return array[0]+ sum_array(array[1:])\n\n\ndef factorial(n):\n \n '''Return n!'''\n if n == 1:\n return n\n else:\n lower_fact = factorial(n-1)\n current_fact = n * lower_fact\n return current_fact\n \ndef fibonacci(n):\n '''Return nth term in fibonacci sequence'''\n if n <= 1:\n return n\n else:\n return(recur_fibo(n-1) + recur_fibo(n-2))\n\n\n\ndef reverse(word):\n \n '''Return word in reverse'''\n if len(word) == 0:\n return word\n else:\n return reverse(word[1:]) + word[0] \n","repo_name":"osmanhlongwane/mypackage","sub_path":"mypackage/recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26523877289","text":"\"\"\"\nGiven a binary tree, flatten it to a linked list in-place.\n\nFor example,\nGiven\n\n 1\n / \\\n 2 5\n / \\ \\\n 3 4 6\nThe flattened tree should look like:\n 1\n \\\n 2\n \\\n 3\n \\\n 4\n \\\n 5\n \\\n 6\n\"\"\"\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\nclass Solution(object):\n def flatten(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: void Do not return anything, modify root in-place instead.\n \"\"\"\n if not root:\n return\n self.BuildFlatten(root)\n def BuildFlatten(self, root):\n if not root:\n return\n if not root.right and not root.left:\n return root\n if not root.left:\n return self.BuildFlatten(root.right)\n if not root.right:\n root.right = root.left\n root.left = None\n return self.BuildFlatten(root.right)\n anchor = root.right\n leaveleft = self.BuildFlatten(root.left)\n root.right = root.left\n root.left = None\n #plant the right subtree to the leave of subleft tree\n leaveleft.right = anchor\n #Get the leave node of right subtree\n leaveright = self.BuildFlatten(anchor)\n return leaveright\nSolve = Solution()\nroot = TreeNode(1)\nroot.left = TreeNode(2)\nroot.right = TreeNode(5)\nroot.left.left = TreeNode(3)\nroot.left.right = TreeNode(4)\nroot.right.right = TreeNode(6)\nSolve.flatten(root)","repo_name":"urashima9616/Leetcode_Python","sub_path":"Leet114_FlattenBinaryTree.py","file_name":"Leet114_FlattenBinaryTree.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18029879491","text":"\"\"\"\nBazel macro for creating a py_test that runs pytest.\n\"\"\"\n\nload(\"@rules_python//python:defs.bzl\", \"py_test\")\nload(\"@pip-setup//:requirements.bzl\", \"requirement\")\n\ndef py_pytest_test(name, srcs, deps = [], args = [], **kwargs):\n py_test(\n name = name,\n srcs = [\n \"//tools/pytest:pytest_wrapper.py\",\n ] + srcs,\n main = \"//tools/pytest:pytest_wrapper.py\",\n args = [\n \"-ra\",\n \"-vv\",\n ] + args + [\"$(location :%s)\" % x for x in srcs],\n python_version = \"PY3\",\n srcs_version = \"PY3\",\n deps = deps + [\n requirement(\"pytest\"),\n requirement(\"pytest-cov\"),\n ],\n **kwargs\n )\n","repo_name":"mvgijssel/setup","sub_path":"tools/pytest/pytest.bzl","file_name":"pytest.bzl","file_ext":"bzl","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"4505390643","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib\nfrom matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar\nfrom matplotlib.figure import Figure\n\nfrom PyQt4 import QtGui, QtCore\n\nclass MplCanvas(FigureCanvas):\n \"\"\"Class to represent the FigureCanvas widget\"\"\"\n def __init__(self):\n self.fig = Figure()\n self.input = self.fig.add_subplot(211)\n self.output = self.fig.add_subplot(212)\n \n FigureCanvas.__init__(self, self.fig)\n self.setSizePolicy(QtGui.QSizePolicy.Expanding,\n QtGui.QSizePolicy.Expanding)\n self.updateGeometry()\n\n\nclass Graph(QtGui.QWidget):\n def __init__(self, parent=None):\n super(Graph, self).__init__(parent)\n self.canvas = MplCanvas()\n self.navi = NavigationToolbar(self.canvas, self)\n self.vbl = QtGui.QVBoxLayout()\n self.vbl.addWidget(self.canvas)\n self.vbl.addWidget(self.navi)\n self.setLayout(self.vbl)\n \n def setInputValues(self, values):\n self.canvas.input.clear()\n self.canvas.output.clear()\n self.canvas.input.plot(range(0, len(values)), values)\n self.canvas.input.grid()\n self.canvas.draw()\n\n def setOutputValues(self, values):\n self.canvas.output.clear()\n self.canvas.output.plot(range(0, len(values)), values)\n self.canvas.output.grid()\n self.canvas.draw()\n ","repo_name":"oxullo/gpsfilter","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8893022740","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport requests\nfrom datetime import date, timedelta\nfrom django.shortcuts import render\nfrom django.db.models import Sum\nfrom django.views.generic import TemplateView\nfrom django.db.models import Q, CharField, Max, Value as V\nfrom django.db.models.functions import Concat\nfrom system.models import Union, CooperativeMember, MemberOrder\nfrom collections import Counter\nfrom django.db.models import Count\n\n\nclass DashboardView(TemplateView):\n template_name = \"dashboard.html\"\n \n def get_context_data(self, **kwargs):\n context = super(DashboardView, self).get_context_data(**kwargs)\n unions = Union.objects.all()\n cooperative = 'all'\n members = []\n agents = []\n mb = []\n districts = ['Guli', 'Kitgum', 'Lamwo', 'Pader', 'Agago', 'Amuru', 'Nwoya', 'Omoro', 'Amuru', 'Alebtong', 'Amolatar', 'Apac', 'Dokolo', 'Kole', 'Lira', 'Oyam', 'Otuke', 'Kwania', 'Kiryaongo']\n youth = 0\n myouth = 0\n fyouth = 0\n mc = 0\n fc = 0\n rc = 0\n acreage = list()\n farmers = list()\n orders_count = []\n for u in unions:\n queryset = CooperativeMember.objects.using(u.name.lower()).all()\n d = date.today() - timedelta(days=8900)\n date_ = d.strftime(\"%Y-%m-%d\")\n male = queryset.filter(Q(gender__iexact='Male')|Q(gender__iexact='M'))\n female = queryset.filter(Q(gender__iexact='Female')|Q(gender__iexact='F'))\n refugee = queryset.filter(is_refugee=True)\n my = 0\n fy = 0\n for q in male:\n if q.age() >= 13 and q.age() <= 25:\n my += 1\n myouth += my\n\n for q in female:\n if q.age() >= 13 and q.age() <= 25:\n fy += 1\n fyouth += fy\n\n amale = male.filter(date_of_birth__lte=d)\n afemale = female.filter(date_of_birth__lte=d)\n\n mc += amale.count()\n fc += afemale.count()\n rc += refugee.count()\n\n mb.append({'union': u.name, 'count': queryset.count(),\n 'male': mc, 'female': fc,\n 'refugee': refugee.count(), 'myouth': my, 'fyouth': fy\n })\n members.extend(queryset)\n\n aq = CooperativeMember.objects.using(u.name.lower()).values('district__name').annotate(Sum('land_acreage')).filter(district__name__in=districts)\n fq = CooperativeMember.objects.using(u.name.lower()).values('district__name').annotate(total=Count('id')).filter(district__name__in=districts).order_by('total')\n ord = MemberOrder.objects.using(u.name.lower()).all()\n orders_count.append({'union': u.name, 'count': ord.count()})\n acreage.extend(aq)\n farmers.extend(fq)\n\n cooperatives = []\n cp = []\n for u in unions:\n token = u.token\n url = '%s/endpoint/cooperative/list/' % u.url\n header = {'Authorization': 'Token %s' % token}\n r = requests.post(url, headers=header, verify=False)\n\n if r:\n cp.append({'union': u.name, 'count': len(r.json())})\n cooperatives.extend(r.json())\n\n ag = []\n for u in unions:\n token = u.token\n url = '%s/endpoint/user/list/' % u.url\n header = {'Authorization': 'Token %s' % token}\n r = requests.post(url, headers=header, verify=False)\n if r:\n ag.append({'union': u.name, 'count': len(r.json())})\n agents.extend(r.json())\n\n import pandas as pd\n df = pd.DataFrame(acreage)\n f = pd.DataFrame(farmers)\n g = df.groupby('district__name', as_index=False).sum()\n ff = f.groupby('district__name', as_index=False).sum()\n d = g.to_dict('r')\n dd = ff.to_dict('r')\n\n context['union_count'] = unions.count()\n context['cooperative_count'] = len(cooperatives)\n context['agent_count'] = len(agents)\n context['coop_lst'] = cp\n context['agent_lst'] = ag\n context['member_lst'] = mb\n context['member_count'] = len(members)\n context['male'] = mc\n context['female'] = fc\n context['myouth'] = myouth\n context['fyouth'] = fyouth\n context['refugee'] = rc\n context['acreage'] = d\n context['farmers'] = dd\n context['orders_count'] = orders_count\n return context","repo_name":"hamwetech/mykoop-admin","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39656893006","text":"\r\ndef sumofdivisors(x):\r\n '''Returns the sum of the proper divisors of x'''\r\n total = 0\r\n \r\n for divisor in range(1,x//2+1):\r\n if x%divisor==0:\r\n total+=divisor\r\n #x = x/divisor\r\n return(total)\r\n \r\ndef divdict(x):\r\n '''Returns a dictionary of the sum of each divisors \r\n for every integer less than x'''\r\n divs = {}\r\n \r\n for num in range(1,x):\r\n divs[num] = sumofdivisors(num)\r\n \r\n return divs\r\n\r\n\r\n\r\ndef amicablenumbers(x):\r\n total = 0 \r\n divisor = divdict(x)\r\n for key,value in divisor.items():\r\n for key1,value1 in divisor.items():\r\n if (key,value) == (value1,key1) and key != key1:\r\n #print(key,value, value1,key1)\r\n total += key\r\n return(total)\r\n \r\namicablenumbers(10000)\r\n ","repo_name":"justinmyersdata/ProjectEuler","sub_path":"21_Project_Euler.py","file_name":"21_Project_Euler.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70500969746","text":"import random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef line_through_points(x1, y1, x2, y2):\n # Calculate slope\n m = (y2 - y1) / (x2 - x1)\n # Calculate intercept\n b = y1 - m * x1\n # Return slope and intercept as tuple\n return m, b\n\n\ndef count_nodes_throgh_line(point1,point2):\n # calculate the line\n if point1[0] == point2[0]:\n nodes_num = abs(point2[1] - point1[1]) - 1\n elif point1[1] == point2[1]:\n nodes_num = abs(point2[0] - point1[0]) - 1\n else:\n k, b = line_through_points(*point1, *point2)\n nodes_num = 0\n for node in grid:\n x_bound_max = max(point1[0], point2[0])\n x_bound_min = min(point1[0], point2[0])\n\n y_bound_max = max(point1[1], point2[1])\n y_bound_min = min(point1[1], point2[1])\n if (x_bound_min < node[0] < x_bound_max) and (y_bound_min < node[1] < y_bound_max):\n y_pre = k * node[0] + b\n if y_pre == node[1]:\n nodes_num += 1\n nodes_num += 2 # add the start point and ending point\n return nodes_num\n\n\n# generate grid\ngrid_size = 5\ngrid = [(i, j) for i in range(grid_size) for j in range(grid_size)]\n\n# randomly sample two points\npoint1, point2 = random.sample(grid, 2)\nangle = np.rad2deg(np.arctan2(point2[1]-point1[1],\n point2[0]-point1[0]))\nnodes_num = count_nodes_throgh_line(point1,point2)\nprint(\"The number of nodes through the line:\", nodes_num)\nprint(\"The angle:\",angle)\n\n\n#%%\ngrid_size = 5\n# Define the grid\nx = range(grid_size)\ny = range(grid_size)\n\n# Define the arrow endpoints\narrow1_start = point1\narrow1_end = point2\narrow2_start = [0, 0]\narrow2_end = [1, 1]\n\n# Create the plot\nfig, ax = plt.subplots()\n\n# Plot the grid\nfor i in range(len(x)):\n for j in range(len(y)):\n ax.plot(x[i], y[j], 'o', color='white', markersize=5, mec='black', mew=2)\n\n# Add the arrows to the plot\n#ax.arrow(arrow1_start[0], arrow1_start[1], arrow1_end[0] - arrow1_start[0], arrow1_end[1] - arrow1_start[1],\n# head_width=0.2, head_length=0.2, fc='red', ec='red')\n#ax.arrow(arrow2_start[0], arrow2_start[1], arrow2_end[0]-arrow2_start[0], arrow2_end[1]-arrow2_start[1],\n# head_width=0.2, head_length=0.2, fc='blue', ec='blue')\n\n# Set the axis range and gridlines\nax.set_xlim([-0.5, 4.5])\nax.set_ylim([-0.5, 4.5])\nax.set_xticks(x)\nax.set_yticks(y)\nax.grid(True)\n\n# Set the axis labels\nax.set_xlabel('x-axis')\nax.set_ylabel('y-axis')\n\n# Set the plot title\nax.set_title('5x5 Square Grid with Arrows')\n\nplt.savefig(\"/home/dell/clash/demo.png\", dpi=300)\n# Show the plot\nplt.show()\n\n#%%\ngrid_size = 5\ngrid = [(i, j) for i in range(grid_size) for j in range(grid_size)]\nntrials = 10000\n# randomly sample two points\n\nntrials_nodes_num = []\nntrials_angle = []\n\nfor n in range(ntrials):\n point1, point2 = random.sample(grid, 2)\n ntrials_angle.append(np.rad2deg(np.arctan2(point2[1]-point1[1],\n point2[0]-point1[0])))\n ntrials_nodes_num.append(count_nodes_throgh_line(point1,point2))\n\n\n# sort the data by angles\nangles_sorted, y_true_sorted = zip(*sorted(zip(ntrials_angle, ntrials_nodes_num)))\ny_8fold = [np.cos(np.deg2rad(8*(angle-8))) for angle in angles_sorted]\n\n# create the plot\nfig, ax = plt.subplots()\nplt.plot(angles_sorted, y_true_sorted, '-',label='state_space')\nplt.plot(angles_sorted, y_8fold,label='8fold')\n\n# set the x-axis label\nplt.xlabel('Angles')\n# set the y-axis label\nplt.ylabel('number')\n\n# set the x-axis tick labels to be in 45-degree increments\nx_ticks = np.arange(-180, 181, 45)\nx_ticklabels = [str(x) + '°' for x in x_ticks]\nplt.xticks(x_ticks, x_ticklabels)\n\n# move the legend outside the plot\nax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.2), ncol=2,fontsize=12)\n\n# save the figure\nplt.savefig(\"/mnt/workdir/DCM/tmp/8fold&state_space.png\",dpi=300)\n\n# show the plot\nplt.show()\n","repo_name":"YukunQu/DCM","sub_path":"exp/simulation/state_space/state_space.py","file_name":"state_space.py","file_ext":"py","file_size_in_byte":3896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9310879267","text":"import heapq\r\n\r\n# Define the goal state and initial state\r\ngoal_state = [[1, 2, 3], [8, 0, 4], [7, 6, 5]]\r\ninitial_state = [[2, 8, 3], [1, 6, 4], [7, 0, 5]]\r\n\r\n# Define possible moves\r\nmoves = [(0, -1), (0, 1), (-1, 0), (1, 0)]\r\n\r\ndef is_valid_move(x, y):\r\n return 0 <= x < 3 and 0 <= y < 3\r\n\r\ndef solve_puzzle(initial_state, goal_state):\r\n open_set = [(0, initial_state, None)]\r\n closed_set = set()\r\n \r\n while open_set:\r\n cost, current_state, parent = heapq.heappop(open_set)\r\n \r\n if current_state == goal_state:\r\n # Goal state found\r\n path = []\r\n while parent:\r\n path.append(current_state)\r\n cost, current_state, parent = parent\r\n path.append(initial_state)\r\n return path[::-1]\r\n \r\n if tuple(map(tuple, current_state)) in closed_set:\r\n continue\r\n \r\n closed_set.add(tuple(map(tuple, current_state)))\r\n \r\n for dx, dy in moves:\r\n x, y = None, None\r\n for i in range(3):\r\n for j in range(3):\r\n if current_state[i][j] == 0:\r\n x, y = i, j\r\n new_x, new_y = x + dx, y + dy\r\n \r\n if is_valid_move(new_x, new_y):\r\n new_state = [list(row) for row in current_state]\r\n new_state[x][y], new_state[new_x][new_y] = new_state[new_x][new_y], new_state[x][y]\r\n new_cost = cost + 1 + heuristic(new_state, goal_state)\r\n heapq.heappush(open_set, (new_cost, new_state, (cost, current_state, parent)))\r\n \r\n return None\r\n\r\ndef heuristic(state, goal_state):\r\n # Manhattan distance heuristic\r\n total_distance = 0\r\n for i in range(3):\r\n for j in range(3):\r\n if state[i][j] != 0:\r\n x, y = divmod(state[i][j] - 1, 3)\r\n total_distance += abs(x - i) + abs(y - j)\r\n return total_distance\r\n\r\nsolution = solve_puzzle(initial_state, goal_state)\r\nif solution:\r\n for i, state in enumerate(solution):\r\n print(f\"Step {i + 1}:\")\r\n for row in state:\r\n print(row)\r\n print()\r\nelse:\r\n print(\"No solution found.\")\r\n","repo_name":"192211659/CSA1703","sub_path":"Exp-1.py.py","file_name":"Exp-1.py.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"554824507","text":"#! python3\n# -*- coding: utf-8\n\"\"\"\nCreated on Mon Apr 27 22:39:15 2020\n\n@author: pldesgagne\n\nDescription:\n Construct a Neighbor-Joining tree with VNTR data.\n\n Load VNTR data from a excel file.\n Scan and display information about the VNTR data.\n Calculate the genetic distance with either the Nei's distance formula\n or the Cavalli-Sforza chord distance formula.\n Saves a Neighbor-Joining tree that can be opened with a tree viewing\n program such as MEGA.\n\n The excel data should be formated as: (first line is ignored)\n Name Locus Allele # headers are ignored\n sample1 loci1 allele1\n sample1 loci2 allele1\n sample1 loci2 allele2\n sample2 loci1 allele1\n\"\"\"\n__version__ = \"1.0.1\"\nimport pandas as pd # using the readExcel method\nimport tkinter as tk # using the open filedialog\nfrom tkinter import filedialog # using the open filedialog\nfrom math import pi # for CavalliSforza algo\nimport numpy as np\nimport copy # Copying np.matrix\nfrom Bio.Phylo import write # Biopython\nfrom Bio.Phylo import BaseTree # Biopython\nimport re\n\n# Initialise tkinter to enable the uses of filedialog\nroot = tk.Tk()\nroot.withdraw() # Prevent a empty window to be opened\n\n# Title and icon for eventual GUI\n# root.title(\"VNTR to Neighbor-Joining tree\")\n# root.iconbitmap('phylotree.ico')\n\n\nclass DistanceMatrix(object):\n \"\"\"\n Distance matrix class.\n\n Contains a list of IDs and a distance matrix for the same IDs.\n IDs can be both indices or names (str).\n\n Attributes\n ----------\n names: a list of names (str)\n \"\"\"\n\n def __init__(self, names):\n\n # Initialize the matrix\n self.matrix = np.matrix(np.zeros(shape=(len(names),\n len(names)))).astype(float)\n self.names = names\n\n # ID-to-index dictionary\n self.indices = {} # {name:index}\n # index-to-ID dictionary\n self.index_to_name = {} # {index:name}\n\n # Create the indices dictionnary with the provided list of names\n self.__createIndices(names)\n\n def __createIndices(self, names):\n index = 0\n indices = {}\n index_to_name = {}\n for name in names:\n indices.update({name: index})\n index_to_name.update({index: name})\n index += 1\n self.indices = indices\n self.index_to_name = index_to_name\n\n # Dict-like behaviour\n\n def __getitem__(self, item):\n \"\"\"\n Get a distance between two sequences.\n\n Input\n -----\n item: a tuple of sequence names\n\n Return\n ------\n the genetic distance (float)\n \"\"\"\n assert type(item) is tuple\n assert len(item) == 2\n # Verify if names are supplied instead of indices\n if all(isinstance(x, str) for x in item):\n return self.matrix[self.indices[item[0]], self.indices[item[1]]]\n else:\n return self.matrix[item[0], item[1]]\n\n def __setitem__(self, key, value):\n \"\"\"\n Add an item to the matrix.\n\n Input\n -----\n a 2-item tuple\n key: name (str) or index (int)\n value: genetic distance (float)\n \"\"\"\n assert type(key) is tuple\n assert len(key) == 2\n if all(isinstance(x, str) for x in key):\n self.matrix[self.indices[key[0]], self.indices[key[1]]] = value\n self.matrix[self.indices[key[1]], self.indices[key[0]]] = value\n else:\n self.matrix[key[0], key[1]] = value\n self.matrix[key[1], key[0]] = value\n\n def __delitem__(self, ids):\n \"\"\"Remove a single ID.\"\"\"\n self.matrix = np.delete(np.delete(self.matrix, ids, axis=0),\n ids, axis=1)\n del self.names[ids]\n self.__createIndices(self.names)\n\n def __len__(self):\n \"\"\"Return count of IDs in the matrix.\"\"\"\n return len(self.indices)\n\n\nclass NJTreeConstructor():\n \"\"\"\n Construct a Neighbor-Joining tree with VNTR data.\n\n Creates a object that can load VNTR data from a excel file and return\n a Neighbor-Joining tree that can be opened with a tree viewing program\n such as MEGA.\n\n The excel data should be formated as: (first line is ignored)\n Name Locus Allele # headers are ignored\n sample1 loci1 allele1\n sample1 loci2 allele1\n sample1 loci2 allele2\n sample2 loci1 allele1\n ...\n \"\"\"\n\n def __init__(self):\n\n # VNTR data in a list of Pop()\n self.excelData = None\n\n # DistanceMatrix\n self.distanceMatrix = None\n\n # Tree instance of Biopython Phylo BaseTree module\n self.tree = None\n\n # List of all loci names\n self.lociNames = ''\n self.lociCount = 0\n\n def loadExcelData(self):\n \"\"\"\n Load the data from excel sheet into a list of Pop().\n\n The excel sheet contains name, locus and alleles for each samples.\n Saves results into class variable self.excelData\n\n Input:\n source: file name and address for the file containing the samples\n name and their allele value for each locus.\n format in each colomn should be:\n Name Locus Allele # headers names are ignored\n name1 loci1 allele1\n name1 loci2 allele2\n \"\"\"\n print(\"Select the excel file containing the VNTR data: \")\n print('(The excel file should contain',\n '3 columns \"Name\", \"Locus\", \"Allele\".)')\n\n # Ask for the excel file path\n source_file_path = filedialog.askopenfilename(\n title=\"Select an Excel File\",\n filetypes=((\"Excel files\", \"*.xlsx\"), (\"All files\", \"*.*\"))\n )\n\n if source_file_path == '':\n raise CancelException(\"Open file cancelled\")\n\n # Read the excel data into Pandas DataFrame\n fileData = pd.read_excel(source_file_path, header=0,\n names=[\"Name\", \"Locus\", \"Allele\"],\n usecols=\"A:C\",\n dtype={'Name': str,\n 'Locus': str,\n 'Allele': str})\n print(\"\\nLoading: \" + source_file_path)\n\n # load data into list of pop then into self.excelData\n pop_list = []\n # Initialize the progress bar at 0%\n self.__printProgressBar(0, fileData.shape[0], 'Loading Data:',\n 'Complete', 50)\n j = 0 # progress counter\n for i in fileData.itertuples(index=False):\n j += 1 # update progress counter\n for item in pop_list:\n if item.name == i.Name:\n item.addLocus(i.Locus, i.Allele)\n break\n else:\n currentPop = Pop(i.Name)\n currentPop.addLocus(i.Locus, i.Allele)\n pop_list.append(currentPop)\n # update the progress bar\n self.__printProgressBar(j, fileData.shape[0], 'Loading Data:',\n 'Complete', 50)\n\n # save data into self.excelData\n self.excelData = pop_list\n print(\"\\nExcel Data succesfully loaded:\")\n\n # scan the data\n self.__scanExcelData(self.excelData)\n\n def __scanExcelData(self, data):\n \"\"\"\n Print usefull information about the supplied VNTR data.\n\n Parse the excel data and show different information which may help the\n user find potential error in the provided excel files such as typos in\n names or wrong sample count.\n\n Input:\n data: the data to be scanned in format [Pop()]\n \"\"\"\n lociNames = {} # {key=Locus name: value=Locus count}\n numSamples = len(data)\n unusual_pop = [] # pop with less than minLociCount (potential typo)\n minLociCount = 3 # minimum loci to trigger a warning\n loci_missing_pop = [] # pop with missing loci\n non_int_allele = [] # \"pop, locus, alelle\" with non int allele value\n\n # Finds all the loci and their total count in the entire data\n for pop in data:\n pop_keys = pop.loci.keys()\n # saves loci with less than minLociCount (potential typo)\n if len(pop_keys) < minLociCount:\n unusual_pop.append(pop.name)\n for locus, alleles in pop.loci.items():\n if locus in lociNames:\n lociNames[locus] += 1\n else:\n lociNames[locus] = 1\n for allele in alleles.keys():\n if not str(allele).isdecimal():\n non_int_allele.append(\n f'Sample {pop.name} at locus {locus} = \"{allele}\"')\n self.lociNames = lociNames.keys()\n self.lociCount = len(lociNames)\n\n # Print the number of samples found\n tabs = '\\t'\n print(f\"{tabs}{numSamples} samples were found.\")\n\n # Print all the loci found and their count\n print(f\"{tabs}{len(lociNames)} different loci were found:\")\n tabs += '\\t'\n print(f\"{tabs}{'Locus name':<10}{'Count':>10}\")\n for name, count in lociNames.items():\n if name is np.nan:\n print(f\"{tabs}{'MISSING':<10}{count:>10}\")\n else:\n print(f\"{tabs}{name:<10}{count:>10}\")\n\n # Print pop with one or more missing loci\n # Raise a error if a pop name or loci name is empty\n for pop in data:\n if pop.name is np.nan:\n print(\"\\tERROR: One or more samples have no sample name.\")\n raise CancelException(\"Add names or delete empty lines.\")\n for locus in self.lociNames: \n if locus is np.nan:\n print(\"\\tERROR: One or more samples have loci without a name.\")\n raise CancelException(\"Add names or delete empty lines.\")\n if locus not in pop.loci:\n loci_missing_pop.append(f\"{pop.name} is missing locus \" +\n locus)\n if loci_missing_pop:\n # If more that 80% of sample are missing a specific locus,\n # this locus may be a typo\n if len(loci_missing_pop) <= len(data)*0.8:\n print(\"\\tWARNING:\")\n for message in loci_missing_pop:\n print(f\"{tabs}{message}\")\n else:\n print(\"\\tWARNING: Check for locus name typos.\")\n\n # Print pop with less than minLociCount (potential typo in pop name)\n if unusual_pop:\n print(\"\\tWARNING: The following samples have less\",\n f\"than {minLociCount} loci:\")\n for pop in unusual_pop:\n print(f\"{tabs}{pop}\")\n\n # Print pop with non numerical allele values, raise error\n if non_int_allele:\n print(\"\\tERROR: The following samples have alleles with\",\n \"non integer values:\")\n for allele in non_int_allele:\n print(f\"{tabs}{allele}\")\n raise CancelException(\"Fix the allele values.\")\n\n def __printProgressBar(self, iteration, total, prefix='', suffix='',\n length=100, fill='█', printEnd=\"\\r\"):\n \"\"\"\n Call in a loop to create terminal progress bar.\n\n Input:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n printEnd - Optional : end character (e.g. \"\\r\", \"\\r\\n\") (Str)\n \"\"\"\n percent = f\"{100 * (iteration / total):.1f}\"\n filledLength = int(length * iteration // total)\n bar = fill * filledLength + '-' * (length - filledLength)\n print(f'\\r{prefix} |{bar}| {percent}% {suffix}', end=printEnd)\n # Print New Line on Complete\n if iteration == total:\n print()\n\n def saveTreeFile(self):\n \"\"\"\n Save the tree data into a tree file.\n\n Ask the user for the name and destination of tree file created by\n this script.\n \"\"\"\n print('\\nEnter the destination for the tree file: ')\n dest_file_path = filedialog.asksaveasfilename(\n title=\"Select an destination and name for your Tree file\",\n filetypes=((\"Newick Tree\", \"*.nwk\"), (\"All files\", \"*.*\"))\n )\n if dest_file_path == '':\n raise CancelException(\"Save file cancelled\")\n # In case of file overwriting, prevent saving a \".nwk.nwk\" file\n if dest_file_path[-4:] == '.nwk':\n dest_file_path = dest_file_path[:-4]\n write(njtree.tree, f\"{dest_file_path}.nwk\", 'newick')\n # Correct the apostrophes caharacter in final file\n # for MEGA software usage (otherwise it raise error)\n self.correct_newick(f\"{dest_file_path}.nwk\")\n print(f'\\tTree saved in {dest_file_path}.nwk')\n\n def correct_newick(self, file_path):\n \"\"\"\n Correct the Newick tree apostrophes.\n\n Correct the Newick tree apostrophes from \"\\'\" to \"''\" to be readable\n with Mega (otherwise it raise a error). It overwrite the file with the\n modified version.\n\n Parameters\n ----------\n file_path : Str\n The file path of the Newick tree file (*.nwk)\n\n Returns\n -------\n None.\n\n \"\"\"\n # open file\n with open(file_path, mode='r') as f:\n file = f.read()\n # replace the apostrophes\n newfile = re.sub(r\"\\\\'\", \"''\", file)\n\n # overwrite the file\n with open(file_path, mode='w') as j:\n j.write(newfile)\n\n def buildTree(self, data=None, formula='Cavalli'):\n \"\"\"\n Take the VNTR data and return a Neighbor-Joining tree.\n\n The distance matrix can be calculated with either the Cavalli-Sforza\n chord distance formula or the Nei's distance formula.\n\n Input:\n data (optionnal): the VNTR data to be analysed.\n Instance data is used by default.\n formula (optionnal): Cavalli (default) or Nei\n \"\"\"\n # Verify if excel data is loaded / raise error otherwise\n if data is None and self.excelData:\n data = self.excelData\n else:\n raise AttributeError('No data was loaded into instance.')\n\n # Calculate the distance matrix from the loaded data according to\n # the specified formula\n algo = None\n if formula == 'Cavalli':\n algo = self.__cdCavalliSforza\n elif formula == 'Nei':\n algo = self.__neiDistance\n else:\n raise ValueError('\"' + formula + '\"' + \" doesn't exist.\")\n\n self.distanceMatrix = self.__geneticDistance(data, algo)\n\n # Build the tree from the distance matrix\n self.tree = self.__neighbor(self.distanceMatrix)\n\n def __cdCavalliSforza(self, dsum):\n \"\"\"\n Return the Cavalli-Sforza chord distance.\n\n Uses the Cavalli-Sforza chord distance formula.\n Distance of 0 indicate that 2 sample are identical.\n Max distance is (2/pi)*sqrt(2) = 0.900316\n\n Input:\n dsum: sum of the squareroot of the multiplication of each allele\n frequency between 2 pop.\n\n Return\n ------\n Cavalli-Sforza chord distance\n \"\"\"\n return (2/(pi*self.lociCount))*(2*abs(1-dsum))**0.5\n\n def __neiDistance(self, dsum):\n \"\"\"\n Return Nei's DA distance.\n\n Uses the Nei's DA distance 1983 formula.\n Distance of 0 indicate that 2 sample are identical.\n Max distance is 1.\n\n Input:\n dsum: sum of the squareroot of the multiplication of each allele\n frequency between 2 pop.\n\n Return\n ------\n Nei's distance\n \"\"\"\n return abs(1-dsum)/self.lociCount\n\n def __geneticDistance(self, data, algo):\n \"\"\"\n Build a distance matrix with the VNTR data.\n\n Input:\n data : the VNTR data loaded by the loadExcelData method.\n algo : the formula used to calculate the genetic distance\n\n Returns\n -------\n DistanceMatrix\n\n \"\"\"\n dmatrix = DistanceMatrix([pop.name for pop in data]) # Initialize\n dsum = 0 # sum of (allele frequency popA * allele frequency popB)**0.5\n distance = 0 # genetic distance between 2 pop\n num_pop = len(data) # For visual progress\n current_pop = 1 # For progress bar\n\n # Initialize the progress bar at 0%\n print()\n self.__printProgressBar(0, num_pop, 'Calculating Distances:',\n 'Complete', 50)\n # for each pop\n for pop in data:\n self.__printProgressBar(current_pop, num_pop,\n 'Calculating Distances:',\n f'Complete ({current_pop}/{num_pop})',\n 50)\n current_pop += 1\n # compare to all previous samples excluding self\n # for versus in data: # Square matrix\n for versus in data[:data.index(pop)]: # Lower triangular matrix\n distance = 0 # initialize distance for pop vs versus\n remainingLocus = self.lociCount\n # for each locus of pop\n for locus in pop.loci:\n remainingLocus -= 1\n dsum = 0 # initialize sum for a single locus\n # for each allele in locus of pop\n for allele in pop.loci[locus]:\n # Check if versus has current pop locus\n if locus in versus.loci:\n # Check if versus has same allele\n if allele in versus.loci[locus]:\n # sum of sqrt of allele frequencies product\n dsum += (pop.frequency(locus)[allele] *\n versus.frequency(locus)[allele])**0.5\n # Adding the distance for this locus\n distance += algo(dsum) # use the supplied formula fonction\n # Adding distance for missing locus\n distance += algo(0) * remainingLocus\n # Place calculated distance between pop and versus in matrix\n dmatrix[pop.name, versus.name] = distance\n return dmatrix\n\n def __neighbor(self, distance_matrix):\n \"\"\"\n Construct and return a Neighbor-Joining tree.\n\n Input:\n distance_matrix : a DistanceMatrix instance\n\n Returns\n -------\n Bio.Phylo.BaseTree instance\n \"\"\"\n print(\"\\nStarting Neighbor-Joining.\")\n # Formulas for the neighbor-joining matrix and minimum pair\n rptsum = lambda arr: np.repeat(np.sum(arr)/(np.size(arr)-2),\n np.size(arr))\n mapvsum = lambda mat: np.matrix([rptsum(line) for line in mat])\n idxmin = lambda mat: np.unravel_index(np.argmin(mat), np.shape(mat))\n\n # make a copy of the distance matrix to be used\n dm = copy.deepcopy(distance_matrix)\n tot_len = len(distance_matrix) # for progress bar\n\n # init terminal clades\n clades = [BaseTree.Clade(None, name) for name in dm.names]\n\n # init minimum index\n min_i = 0\n min_j = 0\n inner_count = 0\n # special cases for Minimum Alignment Matrices\n if len(dm) == 1:\n root = clades[0]\n\n return BaseTree.Tree(root, rooted=False)\n elif len(dm) == 2:\n # minimum distance will always be [1,0]\n min_i = 1\n min_j = 0\n clade1 = clades[min_i]\n clade2 = clades[min_j]\n clade1.branch_length = dm[min_i, min_j] / 2.0\n clade2.branch_length = dm[min_i, min_j] - clade1.branch_length\n inner_clade = BaseTree.Clade(None, \"Inner\")\n inner_clade.clades.append(clade1)\n inner_clade.clades.append(clade2)\n clades[0] = inner_clade\n root = clades[0]\n\n return BaseTree.Tree(root, rooted=False)\n\n # Initialize the progress bar at 0%\n self.__printProgressBar(0, tot_len, 'Joining:', 'Complete', 50)\n while len(dm) > 2:\n # Progress bar update\n current_pos = tot_len - len(dm)\n self.__printProgressBar(current_pos+3, tot_len, 'Joining:',\n f'Complete ({current_pos+3}/{tot_len})',\n 50)\n\n # calculate prerequisites for neighbor-joining matrix\n SH = mapvsum(dm.matrix)\n SV = SH.transpose()\n\n # Build the neighbor-joining matrix M\n Id = np.identity(len(dm.matrix))\n M = dm.matrix + (np.multiply(Id, SH + SV) - SH - SV)\n\n # Find minimum distance pair\n min_i, min_j = idxmin(M+Id) # +Id to prevent min_i == min_j\n\n # create clades with the minimum distance pair found\n clade1 = clades[min_i]\n clade2 = clades[min_j]\n inner_count += 1\n inner_clade = BaseTree.Clade(None, \"Inner\" + str(inner_count))\n inner_clade.clades.append(clade1)\n inner_clade.clades.append(clade2)\n\n # assign branch lengths\n clade1.branch_length = (\n dm[min_i, min_j] + SH[min_i, min_j] - SV[min_i, min_j]\n ) / 2.0\n clade2.branch_length = dm[min_i, min_j] - clade1.branch_length\n\n # update clades list with new clade pair\n clades[min_j] = inner_clade\n del clades[min_i]\n\n # rebuild distance matrix,\n # set the distances of new clade at the index of min_j\n u = [(dm[min_i, k] + dm[min_j, k] - dm[min_i, min_j]) / 2 for k in\n range(len(dm))]\n dm.matrix[min_j] = u\n dm.matrix[:, min_j] = np.matrix(u).transpose()\n dm.names[min_j] = \"Inner\" + str(inner_count)\n del dm[min_i]\n\n # set the last clade as one of the child of the inner_clade\n root = None\n if clades[0] == inner_clade:\n clades[0].branch_length = 0\n clades[1].branch_length = dm[1, 0]\n clades[0].clades.append(clades[1])\n root = clades[0]\n else:\n clades[0].branch_length = dm[1, 0]\n clades[1].branch_length = 0\n clades[1].clades.append(clades[0])\n root = clades[1]\n\n return BaseTree.Tree(root, rooted=False)\n\n def alleleFrequency(self):\n \"\"\"Print an allele frequency table.\"\"\"\n for pop in self.excelData:\n for locus in pop.loci:\n for allele, freq in pop.frequency(locus).items():\n print(pop.name, locus, allele, round(freq, ndigits=4))\n\n def executeCommand(self):\n \"\"\"Excute the workflow.\"\"\"\n try:\n # Load an excel file contain VNTR data\n self.loadExcelData()\n query = input('\\nIs the displayed information correct? [y/n] ')\n if query.lower() != 'y':\n raise CancelException(\n \"VNTR information considered incorrect by user.\")\n else:\n # Build a phylogenetic tree\n # self.buildTree(formula='Nei')\n self.buildTree(formula='Cavalli')\n print('\\nNeighbor-Joining tree constructed.')\n # Save the tree in specified file\n self.saveTreeFile()\n input('\\nPress Enter to exit.')\n except CancelException as e:\n print(f'\\n***{e.message}***')\n input('Press Enter to exit.')\n except Exception as e:\n print(f'\\n***Unexpected error: {e}***')\n input('Press Enter to exit.')\n\n\nclass Pop():\n \"\"\"\n Representation of a single sample.\n\n Attributes\n ----------\n name: Name of sample (str)\n \"\"\"\n\n def __init__(self, name):\n\n self.name = name\n\n # loci: a dict of each locus name containing alleles values.\n # {str_loci_name : {int_allele_value : int_count, }, }\n self.loci = {}\n\n def addLocus(self, locus: str, allele: int):\n \"\"\"\n Add a locus and its allele value in the loci dict.\n\n If the locus is already present, the new allele is added to the dict\n If the allele is already present for that loci, +1 to count.\n \"\"\"\n if locus not in self.loci:\n self.loci[locus] = {allele: 1}\n elif allele in self.loci[locus]:\n self.loci[locus][allele] += 1\n else:\n self.loci[locus][allele] = 1\n\n def frequency(self, locus):\n \"\"\"\n Return the allelic frequency.\n\n Return a dict {allele:frequency} of the frequency of each allele\n for the specified locus.\n\n Ex.: for 3 alleles {650:2, 720:1} => {650:0.6666, 720:0.3333}\n (Divides the count of each allele by the total number of allele for\n the specified loci.)\n \"\"\"\n nb_allele = 0\n for allele in self.loci[locus]:\n nb_allele += self.loci[locus][allele]\n return {i: (self.loci[locus][i]/nb_allele) for i in self.loci[locus]}\n\n def __str__(self):\n \"\"\"Return the name of the pop.\"\"\"\n return self.name\n\n\nclass CancelException(BaseException):\n \"\"\"Exception for dealing with the cancel command from user.\"\"\"\n\n def __init__(self, message):\n self.message = message\n\n\nif __name__ == '__main__':\n\n njtree = NJTreeConstructor()\n njtree.executeCommand()\n","repo_name":"Desgaplu/VNTR_NJtree","sub_path":"src/VNTR_NJtree.py","file_name":"VNTR_NJtree.py","file_ext":"py","file_size_in_byte":26133,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"8082385863","text":"import json\nimport os\nimport pytest\nfrom pathlib import Path\nfrom . import ColumnExtractTestData\n\n\n# This conftest.py file effectively configures pytest before any tests are run.\n#\n# We need to tell pytest to generate test cases for each of the subdirectories\n# inside the 'column-extractor-test-cases' directory. First, find that\n# directory relative to this file:\ncolumn_extractor_test_cases_directory = Path(os.path.dirname(os.path.realpath(__file__))) / 'column-extractor-test-cases'\n\n# ... then find each subdirectory of the column_extractor_test_cases directory:\ncolumn_extractor_test_case_paths = [dir for dir in column_extractor_test_cases_directory.iterdir() if dir.is_dir()]\n\n# ... and lastly, find the names of each of those directories. These will be\n# used to name the tests. That way, when a test fails, we'll know exactly which\n# one.\ncolumn_extractor_test_case_names = [path.name for path in column_extractor_test_case_paths]\n\n\n# Create a fixture to give to test functions (like the test_column_extract()\n# function). The fixture can be parametrized - meaning that a test case is\n# generated for each element of `params`.\n#\n# Note that this function takes an argument of type FixtureRequest. This is\n# provided by pytest when it is setting up the fixture. Notably, the\n# FixtureRequest object has an attribute called `param`. This attribute points\n# to an element in the `params` argument provided in the decorator.\n@pytest.fixture(\n # Generate a test case for each of the elements inside of this argument:\n params=column_extractor_test_case_paths,\n\n # ... and the name of each test case is determined by this argument. For\n # example, the first element in the previous argument will be named with\n # the first element in this argument, and so on.\n ids=column_extractor_test_case_names\n)\ndef test_column_extract_data(request: pytest.FixtureRequest) -> ColumnExtractTestData:\n # The param of this request is given by the `params` arguemnt in the\n # decorator. We know the type of this is Path - in fact, we know it is an\n # element of the column_extractor_test_case_paths list defined above.\n test_case_dir: Path = request.param\n\n # By convention, each test case holds the SQL 'CREATE TABLE' script in a\n # file called 'given.sql'.\n given_sql_file = test_case_dir / 'given.sql'\n\n # Likewise, by convention, each test case holds the list of columns present\n # in the 'CREATE TABLE' script in the 'expected_columns.json' file.\n expected_columns_file = test_case_dir / 'expected-columns.json'\n\n # Read in the two files and return their contents in a\n # ColumnExtractTestData object. This object is what is given to the\n # test_column_extract() test function.\n given_sql = given_sql_file.read_text()\n with open(expected_columns_file, 'r') as ecf:\n expected_columns = set(json.load(ecf))\n\n return ColumnExtractTestData(sql=given_sql, columns=expected_columns)\n","repo_name":"DavidPratt512/pytest-with-data-files","sub_path":"column-extractor/test/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2943,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"106447154","text":"\n\n#File Handling /Working with the text:\n## Example1: Writing data in to the text file\n\nfile = open(\"D:\\PYTHON-PROJECTS\\demofile\\demo.txt\", 'w') #this is use to open the file\nfile.write(\"This is my first statemtent....\\n\") # it is use to write statment in the txt file.\nfile.write(\"This is my second statemtent....\\n\")\nfile.write(\"This is my Third statemtent....\\n\")\nfile.write(\"This is my Fourth statemtent....\\n\")\nfile.write(\"This is my Fifth statemtent....\\n\")\nfile.write(\"This is my sixth statemtent....\\n\")\nfile.close() #when ever we opne the file we need to close it.\nprint(\"program completed\")\n\n## Reading data from the text file\n\nfile = open(\"D:\\PYTHON-PROJECTS\\demofile\\demo.txt\", 'r') # Here firt we are opening the file in read mode by 'r'\n#print(file.read()) # this will read the existing data from the file to the IDE output\nprint(\"Using readline method\")\n#print(file.readline()) #this will print only the first line from the file.\nprint(file.readlines()) #This will print the output of the file in real.\nfile.close()\n\n## Appending data to the text file, the new data in the file.\nfile = open(\"D:\\PYTHON-PROJECTS\\demofile\\demo.txt\", 'a')\nfile.write(\"This is my seventh line....\\n\")\nfile.write(\"This is my eight line....\\n\")\nfile.close() # this will sometime use to save the file,after change.\nprint(\"program is completed\")\n\n\n","repo_name":"hsrajput311621/AdobeGit","sub_path":"Python-Projects/Modules&Packages/pack1/package_stu/File_Handling.py","file_name":"File_Handling.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35094349040","text":"# -*- coding: utf-8 -*-\n# @Author: Yihao Chen\n# @Date: 2023-06-07 21:02:55\n# @Last Modified by: Yihao Chen\n# @Last Modified time: 2023-06-07 21:50:06\n\nfrom .gather import Gather\n\n__all__ = [\n \"Gather\"\n]","repo_name":"IDEA-Research/DisCo-CLIP","sub_path":"disco/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":209,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"48"} +{"seq_id":"25533682909","text":"import datetime\nimport time\nimport unittest\nfrom dataclasses import dataclass\n\nimport numpy as np\nimport pandas as pd\n\nfrom deephaven import DHError, dtypes, new_table, time as dhtime\nfrom deephaven import empty_table\nfrom deephaven.column import byte_col, char_col, short_col, bool_col, int_col, long_col, float_col, double_col, \\\n string_col, datetime_col, jobj_col, ColumnType\nfrom deephaven.constants import MAX_BYTE, MAX_SHORT, MAX_INT, MAX_LONG\nfrom deephaven.jcompat import j_array_list\nfrom tests.testbase import BaseTestCase\n\n\nclass ColumnTestCase(BaseTestCase):\n\n def test_column_type(self):\n normal_type = ColumnType.NORMAL.value\n self.assertEqual(ColumnType.NORMAL, ColumnType(normal_type))\n\n def test_column_error(self):\n jobj = j_array_list([1, -1])\n with self.assertRaises(DHError) as cm:\n bool_input_col = bool_col(name=\"Boolean\", data=[True, 'abc'])\n\n self.assertNotIn(\"bool_input_col\", dir())\n\n with self.assertRaises(DHError) as cm:\n _ = byte_col(name=\"Byte\", data=[1, 'abc'])\n\n with self.assertRaises(DHError) as cm:\n _ = char_col(name=\"Char\", data=[jobj])\n\n with self.assertRaises(DHError) as cm:\n _ = short_col(name=\"Short\", data=[1, 'abc'])\n\n with self.assertRaises(DHError) as cm:\n _ = int_col(name=\"Int\", data=[1, [1, 2]])\n\n with self.assertRaises(DHError) as cm:\n _ = long_col(name=\"Long\", data=[1, float('inf')])\n\n with self.assertRaises(DHError) as cm:\n _ = float_col(name=\"Float\", data=[1.01, 'NaN'])\n\n with self.assertRaises(DHError) as cm:\n _ = double_col(name=\"Double\", data=[1.01, jobj])\n\n with self.assertRaises(DHError) as cm:\n _ = string_col(name=\"String\", data=[1, -1.01])\n\n with self.assertRaises(TypeError) as cm:\n _ = datetime_col(name=\"Datetime\", data=[round(time.time()), False])\n\n with self.assertRaises(DHError) as cm:\n _ = jobj_col(name=\"JObj\", data=[jobj, CustomClass(-1, \"-1\")])\n\n def test_array_column(self):\n strings = [\"Str1\", \"Str1\", \"Str2\", \"Str2\"]\n doubles = [1.0, 2.0, 4.0, 8.0]\n numbers = [1, 2, 3, 4]\n characters = [65, 66, 67, 68]\n bools = [True, True, False, False]\n test_table = new_table([\n string_col(\"StringColumn\", strings),\n double_col(\"Decimals\", doubles),\n float_col(\"Floats\", doubles),\n byte_col(\"Bytes\", numbers),\n short_col(\"Shorts\", numbers),\n char_col(\"Chars\", characters),\n int_col(\"Ints\", numbers),\n long_col(\"Longs\", numbers),\n bool_col(\"Bools\", bools)\n ]\n )\n\n test_table = test_table.group_by([\"StringColumn\"])\n\n self.assertIsNone(test_table.columns[0].component_type)\n self.assertEqual(test_table.columns[1].component_type, dtypes.double)\n self.assertEqual(test_table.columns[2].component_type, dtypes.float32)\n self.assertEqual(test_table.columns[3].component_type, dtypes.byte)\n self.assertEqual(test_table.columns[4].component_type, dtypes.short)\n self.assertEqual(test_table.columns[5].component_type, dtypes.char)\n self.assertEqual(test_table.columns[6].component_type, dtypes.int32)\n self.assertEqual(test_table.columns[7].component_type, dtypes.long)\n self.assertEqual(test_table.columns[8].component_type, dtypes.bool_)\n\n def test_vector_column(self):\n t = empty_table(0).update_view(\"StringColumn=`abc`\").group_by()\n self.assertTrue(t.columns[0].data_type.j_name.endswith(\"ObjectVector\"))\n self.assertEqual(t.columns[0].component_type, dtypes.string)\n self.assertIsNone(t.columns[0].data_type.qst_type)\n\n def test_numeric_columns(self):\n x = [MAX_BYTE, MAX_SHORT, MAX_INT, MAX_LONG, 1, 999999]\n n = len(x)\n\n def get_x(i) -> int:\n return x[i]\n\n t_list = empty_table(n).update([\"X = x[i]\"])\n t_func = empty_table(n).update([\"X = get_x(i)\"])\n # We want to test that casting on both PyObject and JObject works as expected.\n self.assertEqual(t_list.columns[0].data_type, dtypes.PyObject)\n self.assertEqual(t_func.columns[0].data_type, dtypes.int_)\n t_func_str = t_func.to_string()\n for v in x:\n self.assertIn(str(int(v)), t_func_str)\n\n t_list_integers = t_list.update(\n [\"A = (byte)X\", \"B = (short)X\", \"C = (int)X\", \"D = (long)X\", \"E = (float)X\", \"F = (double)X\"])\n self.assertEqual(t_list_integers.columns[1].data_type, dtypes.byte)\n self.assertEqual(t_list_integers.columns[2].data_type, dtypes.short)\n self.assertEqual(t_list_integers.columns[3].data_type, dtypes.int32)\n self.assertEqual(t_list_integers.columns[4].data_type, dtypes.long)\n self.assertEqual(t_list_integers.columns[5].data_type, dtypes.float32)\n self.assertEqual(t_list_integers.columns[6].data_type, dtypes.double)\n\n t_func_integers = t_func.update(\n [\"A = (byte)X\", \"B = (short)X\", \"C = (int)X\", \"D = (long)X\", \"E = (float)X\", \"F = (double)X\"])\n self.assertEqual(t_func_integers.columns[1].data_type, dtypes.byte)\n self.assertEqual(t_func_integers.columns[2].data_type, dtypes.short)\n self.assertEqual(t_func_integers.columns[3].data_type, dtypes.int32)\n self.assertEqual(t_func_integers.columns[4].data_type, dtypes.long)\n self.assertEqual(t_list_integers.columns[5].data_type, dtypes.float32)\n self.assertEqual(t_list_integers.columns[6].data_type, dtypes.float64)\n\n def test_datetime_col(self):\n inst = dhtime.to_j_instant(round(time.time()))\n dt = datetime.datetime.now()\n _ = datetime_col(name=\"Datetime\", data=[inst, dt, None])\n self.assertEqual(_.data_type, dtypes.Instant)\n\n ts = pd.Timestamp(dt)\n np_dt = np.datetime64(dt)\n data = [ts, np_dt, dt]\n # test if we can convert to numpy datetime64 array\n np.array([pd.Timestamp(dt).to_numpy() for dt in data], dtype=np.datetime64)\n _ = datetime_col(name=\"Datetime\", data=data)\n self.assertEqual(_.data_type, dtypes.Instant)\n\n data = np.array(['1970-01-01T00:00:00.000-07:00', '2020-01-01T01:00:00.000+07:00'])\n np.array([pd.Timestamp(str(dt)).to_numpy() for dt in data], dtype=np.datetime64)\n _ = datetime_col(name=\"Datetime\", data=data)\n self.assertEqual(_.data_type, dtypes.Instant)\n\n data = np.array([1, -1])\n data = data.astype(np.int64)\n _ = datetime_col(name=\"Datetime\", data=data)\n self.assertEqual(_.data_type, dtypes.Instant)\n\n@dataclass\nclass CustomClass:\n f1: int\n f2: str\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"deephaven/deephaven-core","sub_path":"py/server/tests/test_column.py","file_name":"test_column.py","file_ext":"py","file_size_in_byte":6790,"program_lang":"python","lang":"en","doc_type":"code","stars":210,"dataset":"github-code","pt":"48"} +{"seq_id":"30880743867","text":"\n\nclass interface:\n def __init__(self):\n self.itemFile = open('myfile.txt','r')\n self.managerFile = open('managerDetails.txt','r')\n\n def MainMenu(self):\n print('*** Welcome to our Online store ***')\n print('Print 1 for manager and 2 for customer.')\n def managerMenu(self):\n print('hello manager!')\n def inputInteger(self):\n loop = True\n while loop:\n try:\n self.value1 = int(input())\n loop = False\n except:\n print('enter Intger value only!!')\n else:\n return self.value1\n\n def inputString(self):\n regex = '@_!#$%^&*()<>?/\\|}{~:'\n loop = True\n while loop:\n self.value2 = input()\n if self.value2.isdigit() or self.value2 in regex :\n print('Invalid Entry !!')\n continue\n elif self.value2 == '':\n print('write something !!')\n else:\n loop = False\n\n return self.value2\n\n\nclass check(interface):\n def checker(self):\n s = super().inputInteger()\n print(s)\n\n\na=interface()\nc= check()\n\n","repo_name":"TaimoorJawaid/shoppingTrolly","sub_path":"interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20800243243","text":"import pandas as pd\nimport numpy as np\n# import json\nimport os\n\nclass preprocessss:\n def exceltojson(excelfile):\n # print(excelfile)\n df = pd.read_excel(excelfile,na_filter=False, engine='openpyxl')\n df.columns = ( df.columns + \" \" + df.iloc[0])\n df = df.drop(0)\n df2 = df.drop_duplicates(keep='first')\n df2.columns = df2.columns.str.strip()\n name = excelfile.split('/')[2].split('.')[0]\n # print(name)\n # print(os.listdir())\n df2.to_json(path_or_buf = os.path.join('json_file',name+'.json'), orient ='records')\n\n# preprocessss.exceltojson('xlsx_file\\\\Scan_10.xlsx')","repo_name":"ron4u1998/hardwork","sub_path":"lasan/excel_to_json.py","file_name":"excel_to_json.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29393940446","text":"from collections import deque \n#頂点数と辺数\nN, M = map(int, input().split())\n\n#グラフ入力受取(無向グラフを想定)\nG = [[] for _ in range(N)]\nfor i in range(M):\n a, b = map(int, input().split())\n a-=1\n b-=1\n G[a].append(b)\n G[b].append(a)\n\n#BFSのためのデータ構造\ndist = [False]*N #全頂点を「未訪問」に初期化\nque = deque()\n\n#初期条件(頂点0を初期ノードとする)\ndist[0] = True\nque.append(0) #0を訪問予定頂点にする\nans = [0]*N\n\n#BFS開始(キューが空になるまで探索を行う)\nwhile len(que)!=0:\n v = que.popleft() #キューから先頭頂点を取り出す\n\n #v から辿れる頂点を全て調べる\n for nv in G[v]:\n #すでに発見済みの頂点は探索しない\n if dist[nv]:\n continue \n\n #新たな白色頂点 nv について距離情報を更新してキューに追加する\n dist[nv] = True\n ans[nv] = v \n que.append(nv)\n\n #結果出力(各頂点の頂点 0 からの距離を見る)\nif any(dist):\n print(\"Yes\")\n for v in range(1,N):\n print(ans[v]+1)\nelse:\n print(\"No\")\n\n","repo_name":"nasama/procon","sub_path":"atcoder.jp/abc168/abc168_d/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5287284866","text":"import random as rd\r\n\r\ndef gen_ligne(nbr_lignes):\r\n aps = []\r\n apcs = []\r\n rayonAPC = 45\r\n rayonAP = 30\r\n for i in range(1, nbr_lignes+1):\r\n rayon = rd.choice([rayonAP, rayonAPC])\r\n if rayon == rayonAPC and len(apcs) < nbr_lignes//10:\r\n apcs.append(i)\r\n else:\r\n rayon = rayonAP\r\n ligne = f\"{i} ({rd.randint(-100, 100)}, {rd.randint(-100, 100)}) {rayon}\"\r\n aps.append(ligne)\r\n return aps, apcs\r\n\r\ndef gen_fichier(longeur):\r\n aps, apcs = gen_ligne(longeur)\r\n with open(\"Test/test_AP.txt\", 'w') as f:\r\n for ap in aps:\r\n f.write(f'{ap}\\n')\r\n\r\n with open(\"Test/test_APC.txt\", 'w') as f:\r\n for apc in apcs:\r\n f.write(f\"{apc} \")\r\n\r\n\r\ngen_fichier(30)\r\n\r\n","repo_name":"AlexandroAR/SAE3.02","sub_path":"Test/generateur.py","file_name":"generateur.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"6742268275","text":"import cv2\nimport numpy as np\nfrom Cell import Cell\nfrom imutils import grab_contours\nfrom matplotlib import pyplot as plt\nfrom Transformation import Transformation\nfrom tensorflow.python.keras.saving.save import load_model\nfrom skimage.segmentation import clear_border\nfrom imutils.perspective import four_point_transform\nimport os\nfrom tensorflow import logging\nlogging.set_verbosity(logging.ERROR)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\n# image files\nfiles = [\n 'easy/sudoku1.png',\n 'easy/sudoku2.png',\n 'easy/sudoku3.png',\n 'easy/sudoku4.png',\n 'easy/sudoku5.png',\n 'easy/sudoku6.png',\n 'easy/sudoku7.png',\n 'medium/sudoku1.png',\n 'medium/sudoku2.png',\n 'medium/sudoku3.png',\n 'medium/sudoku4.png',\n 'medium/sudoku5.jpg',\n 'medium/sudoku6.jpg',\n 'medium/sudoku7.jpg',\n 'hard/sudoku1.jpg',\n 'hard/sudoku2.png',\n 'hard/sudoku3.png',\n 'hard/sudoku4.png',\n 'hard/sudoku5.png',\n 'hard/sudoku6.png',\n]\n\n\nmodel = load_model('digits.h5')\n\n\ndef predict_digit(img):\n \"\"\"Determine a digit from an image using CNN model\"\"\"\n\n resized_img = cv2.resize(img, (28, 28))\n np_img = np.array(resized_img)\n np_img = np_img.reshape(1, 28, 28, 1)\n np_img = np_img/255.0\n res = model.predict([np_img])[0]\n res = list(zip(range(0, 10), res))\n res.sort(key=lambda x: x[1])\n return res[-1][0] if res[-1][0] != 0 else res[-2][0]\n\n\ndef save_transformations(file_name, transformations, axes):\n \"\"\"Save prepared transformations to result directory\"\"\"\n\n # plot all transformations\n for ax, transformation in zip(axes, transformations.values()):\n transformation.plot(ax)\n\n # save transformation steps\n plt.savefig(f'./results/{file_name}', dpi=400)\n plt.close()\n\n\nfor file in files:\n print(f'Starting: ./images/{file}')\n # load image\n bgr_image = cv2.imread(f'./images/{file}')\n\n # color conversions\n rgb_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2RGB)\n grayscale_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)\n\n # transformations\n median = cv2.medianBlur(grayscale_image, 3)\n\n canny = cv2.Canny(median, 50, 220)\n\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))\n closing = cv2.morphologyEx(canny, cv2.MORPH_CLOSE, kernel)\n\n # find contours\n cv2_contours = cv2.findContours(\n closing, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n imutils_contours = sorted(grab_contours(cv2_contours),\n key=cv2.contourArea, reverse=True)\n\n # check contour shape\n approximated_contours = [cv2.approxPolyDP(\n contour, 0.02 * cv2.arcLength(contour, True), True) for contour in imutils_contours]\n four_pointed_contours = [\n contour for contour in approximated_contours if len(contour) == 4]\n\n # if no rectangle found then algorithm failed\n if not len(four_pointed_contours):\n\n # image transformations for plotting\n transformations = {}\n\n # matplotlib plots\n px = 1 / plt.rcParams['figure.dpi']\n fig, axes = plt.subplots(\n figsize=(1920 * px, 1080 * px), nrows=2, ncols=3)\n axes = list(np.array(axes).flat)\n\n # load error image\n error = cv2.imread(\n f'./images/others/no_solution.png', cv2.IMREAD_GRAYSCALE)\n\n # prepare for plotting picture transformations\n transformations['rgb'] = Transformation(\n rgb_image, 'Original Image', False)\n transformations['grayscale'] = Transformation(\n grayscale_image, 'Grayscale', True)\n transformations['median'] = Transformation(median, 'Median Blur', True)\n transformations['canny'] = Transformation(\n canny, 'Canny Edge Detection', True)\n transformations['closing'] = Transformation(\n closing, 'Dilation followed by Erosion', True)\n transformations['error'] = Transformation(\n error, 'Status', True)\n\n save_transformations(file, transformations, axes)\n # if rectangle found then find solution\n else:\n # assume that the biggest rectangle is the puzzle border\n sudoku_border = four_pointed_contours[0]\n\n # display border on original image\n outlined = rgb_image.copy()\n cv2.drawContours(outlined, [sudoku_border], -1, (0, 255, 0), 2)\n\n # crop the original image using found border\n cropped = four_point_transform(\n rgb_image, sudoku_border.reshape(4, 2))\n\n # calculate the size of puzzle\n cropped_height, cropped_width, _cropped_channels = cropped.shape\n cell_height = cropped_height // 9\n cell_width = cropped_width // 9\n\n # divide puzzle into cells\n cells = [[Cell({'original': cropped[y * cell_height: (y + 1) * cell_height, x *\n cell_width: (x + 1) * cell_width]}, None, None) for x in range(9)] for y in range(9)]\n\n # find digit for each cell\n for row in cells:\n for cell in row:\n # transformations\n cell.image['grayscale'] = cv2.cvtColor(\n cell.image['original'], cv2.COLOR_BGR2GRAY)\n cell.image['otsu'] = cv2.threshold(\n cell.image['grayscale'], 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n cell.image['inverted'] = cv2.bitwise_not(cell.image['otsu'])\n cell.image['no_borders'] = clear_border(cell.image['inverted'])\n cell.image['with_digit'] = np.zeros(\n (cell_height, cell_width, 3), np.uint8)\n if cv2.countNonZero(cell.image['no_borders']) / float(cell_height * cell_width) >= 1 / 30:\n # find the digit\n cell.contains_digit = True\n cell.digit = predict_digit(cell.image['no_borders'])\n # draw the digit\n x = int(cell_width * 0.33)\n y = int(cell_height * 0.8)\n cv2.putText(cell.image['with_digit'], str(\n cell.digit), (x, y), cv2.FONT_HERSHEY_COMPLEX, min(cell_width, cell_height) / (25 / 0.5), (0, 255, 0), 2)\n else:\n # no digit exists in the given cell\n cell.contains_digit = False\n cell.digit = 0\n\n # merge cells without borders\n merged = np.concatenate([np.concatenate(\n [cell.image['no_borders'] for cell in row], axis=1) for row in cells], axis=0)\n\n # board with found digits\n with_digits = np.concatenate([np.concatenate(\n [cell.image['with_digit'] for cell in row], axis=1) for row in cells], axis=0)\n\n # image transformations for plotting\n transformations = {}\n\n # matplotlib plots\n px = 1 / plt.rcParams['figure.dpi']\n fig, axes = plt.subplots(\n figsize=(1920 * px, 1080 * px), nrows=2, ncols=7)\n axes = list(np.array(axes).flat)\n\n # prepare for plotting picture transformations\n transformations['rgb'] = Transformation(\n rgb_image, 'Original Image', False)\n transformations['grayscale'] = Transformation(\n grayscale_image, 'Grayscale', True)\n transformations['median'] = Transformation(median, 'Median Blur', True)\n transformations['canny'] = Transformation(\n canny, 'Canny Edge Detection', True)\n transformations['closing'] = Transformation(\n closing, 'Dilation followed by Erosion', True)\n transformations['outlined'] = Transformation(\n outlined, 'Found Contour', False)\n transformations['cropped'] = Transformation(\n cropped, 'Cropped Image', False)\n\n # find cell with digit\n cell_position = [0, 0]\n\n for row_index, row in enumerate(cells):\n for cell_index, cell in enumerate(row):\n if cell.digit:\n cell_position = [cell_index, row_index]\n\n # prepare for plotting cell transformation\n transformations['original_cell'] = Transformation(\n cells[cell_position[1]][cell_position[0]].image['original'], 'Extracted Cell', True)\n transformations['grayscale_cell'] = Transformation(\n cells[cell_position[1]][cell_position[0]].image['grayscale'], 'Grayscale', True)\n transformations['otsu_cell'] = Transformation(\n cells[cell_position[1]][cell_position[0]].image['otsu'], 'Otsu\\'s Thresholding', True)\n transformations['inverted_cell'] = Transformation(\n cells[cell_position[1]][cell_position[0]].image['inverted'], 'Inverted', True)\n transformations['no_borders_cell'] = Transformation(\n cells[cell_position[1]][cell_position[0]].image['no_borders'], 'Without Borders', True)\n\n # prepare for plotting the end result\n transformations['concatenated_cells'] = Transformation(\n merged, 'Merged cells', True)\n transformations['with_numbers'] = Transformation(\n with_digits, 'End result', False)\n\n save_transformations(file, transformations, axes)\n print(f'Completed: ./images/{file}')\n","repo_name":"ryszard-put/kck-lab5-sudoku-reader","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":9128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21479212695","text":"import behave\nimport time\n\n\n@behave.then(u'Service was triggered on \"{resource_type}\" again')\ndef step_impl(context, resource_type):\n context.trigger_type = resource_type\n num_try = 60\n interval = 10\n triggered = False\n\n for i in range(num_try):\n time.sleep(interval)\n if resource_type == 'item':\n item = context.dataset.items.get(item_id=context.uploaded_item_with_trigger.id)\n if item.resource_executions.list().items_count == 3:\n triggered = True\n break\n elif resource_type == 'annotation':\n item = context.annotation.item.annotations.get(annotation_id=context.annotation.id)\n if item.label == \"Edited\":\n triggered = True\n break\n elif resource_type == 'dataset':\n if context.service.executions.list().items_count == 2:\n execution = context.service.executions.list()[0][0]\n assert resource_type in execution.input.keys()\n execution = context.service.executions.list()[0][1]\n assert resource_type in execution.input.keys()\n triggered = True\n break\n\n assert triggered\n","repo_name":"oruen/dtlpy","sub_path":"tests/features/steps/triggers_repo/test_triggers_item_update.py","file_name":"test_triggers_item_update.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"41278457348","text":"import pandas as pd\nimport re\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud, STOPWORDS\n\n\n# all lyrics\nTS_lyrics = pd.read_csv('/my_path/song_lyrics.csv')\nTS_lyrics = TS_lyrics.rename(columns={'Song ': 'song', 'Artist(s) ': 'artists', 'Writer(s) ': 'writers', 'Album ': 'album', 'Year ': 'year', 'Ref. ': 'ref'})\nTS_lyrics['year'] = TS_lyrics['year'].astype(int)\nTS_lyrics['lyrics'] = TS_lyrics['lyrics'].astype('string')\n\nTS_lyrics['word_count'] = TS_lyrics['lyrics'].apply(lambda x: x.split(' ')).apply(len)\n\nwords_per_year = TS_lyrics.groupby('year')['word_count'].mean()\nwords_per_year.plot.bar(x='year', y='word_count', title='Average number of words per song in each year')\nplt.show()\nplt.savefig('/my_path/all_length_year.png')\n\n\nwords_per_album = TS_lyrics.groupby('album').mean().sort_values('year', ascending=True)['word_count']\nwords_per_album.plot.bar(x='year', y='word_count', title='Average number of words per song in each album')\nplt.xticks(fontsize=8)\nplt.tight_layout()\nplt.show()\nplt.savefig('/my_path/all_length_album.png')\n\n# word cloud for the song lyrics\nall_lyrics = ' '.join(TS_lyrics.lyrics.tolist())\nstopwords = set(STOPWORDS)\nwordcloud_all_lyrics = WordCloud(stopwords=stopwords,background_color='white').generate(all_lyrics)\nplt.figure()\nplt.imshow(wordcloud_all_lyrics, interpolation='bilinear')\nplt.axis('off')\nplt.show()\nwordcloud_all_lyrics.to_file('/my_path/all_lyrics.png')\n\n\n\n# regular_lyrics: only contains songs that are from regular albums or singles\nregular_albums = {'Taylor Swift':2006, 'Fearless':2008, 'Speak Now':2010, 'Red':2012, '1989':2014, 'Reputation':2017, 'Lover':2019, 'Folklore':2020}\n\nregular = []\nstem = []\nrelease = []\nfor index, row in TS_lyrics.iterrows():\n album_stem = re.sub(r'\\([^)]*\\)', '', row.album)\n stem.append(album_stem)\n if 'single' in album_stem:\n regular.append(True)\n release.append('single')\n elif album_stem.rstrip() in regular_albums.keys():\n regular.append(True)\n release.append(album_stem.rstrip())\n else:\n regular.append(False)\n release.append('other')\n\nTS_lyrics['regular'] = regular\nTS_lyrics['release'] = release\n\nTS_regular_lyrics = TS_lyrics[TS_lyrics['regular']==True]\n\nwords_per_year = TS_regular_lyrics.groupby('year')['word_count'].mean()\nwords_per_year.plot.bar(x='year', y='word_count', title='Average number of words per song in each year')\nplt.show()\nplt.savefig('/my_path/regular_length_year.png')\n\n\nwords_per_album = TS_regular_lyrics.groupby('release').mean().sort_values('year', ascending=True)['word_count']\nwords_per_album.plot.bar(x='year', y='word_count', title='Average number of words per song in each album (regular album & singles)')\nplt.xticks(fontsize=8)\nplt.tight_layout()\nplt.show()\nplt.savefig('/my_path/all_length_album.png')\n\n# word cloud for the song lyrics\nall_lyrics = ' '.join(TS_regular_lyrics.lyrics.tolist())\nstopwords = set(STOPWORDS)\nwordcloud_regular_lyrics = WordCloud(stopwords=stopwords,background_color='white').generate(all_lyrics)\nplt.figure()\nplt.imshow(wordcloud_regular_lyrics, interpolation='bilinear')\nplt.axis('off')\nplt.show()\nwordcloud_regular_lyrics.to_file('/my_path/regular_lyrics.png')\n# Woo, a lot of (sad) love stories: never know love\n\n\ndef draw_wordcloud(text):\n stopwords = set(STOPWORDS)\n wordcloud_album_lyrics = WordCloud(stopwords=stopwords,background_color='white').generate(text)\n plt.figure()\n plt.imshow(wordcloud_regular_lyrics, interpolation='bilinear')\n plt.axis('off')\n plt.show()\n return wordcloud_album_lyrics\n\n\nfor album in set(TS_regular_lyrics.release):\n text = ' '.join(TS_regular_lyrics[TS_regular_lyrics['release'] == album].lyrics.tolist())\n wordcloud_album_lyrics = draw_wordcloud(text)\n wordcloud_album_lyrics.to_file('/my_path/{}_lyrics.png'.format(album))\n plt.clf()\n\n","repo_name":"tuotadiwang/Song_lyrics_TS","sub_path":"explore_lyrics.py","file_name":"explore_lyrics.py","file_ext":"py","file_size_in_byte":3845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37982341253","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\n@File: webdriver.py\n@Desc: None\n\"\"\"\nimport os\nimport time\n\nimport ddddocr\nimport requests\nfrom rich import print\nfrom rich.progress import track\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom webdriver_manager.chrome import ChromeDriverManager\n\nfrom src.spider import Spider\n\n\nclass WebDriver(object):\n def __init__(self):\n self.driver = self.__generateDriver()\n\n def fetchCustoms(self, hs: str, criteria: str):\n print(\"[bold green]破解中,请耐心等待;如果失败,请多尝试几次。[/bold green]:smiley:\")\n url = self.__generateUrl(hs, criteria)\n self.driver.get(url)\n time.sleep(1)\n self.__handleLogin()\n time.sleep(5)\n self.driver.implicitly_wait(5)\n self.driver.get(url)\n time.sleep(2)\n\n sp = Spider()\n time.sleep(1)\n sp.generateDom(self.driver.page_source)\n time.sleep(1)\n pageTotal = sp.countPage()\n\n datas = []\n datas.extend(sp.crawlData())\n for p in track(range(1, pageTotal), description=\"爬取进度\"):\n self.driver.execute_script(f\"__doPostBack('ctl00$PageContent$MyGridView1','Page${p + 1}')\")\n time.sleep(1)\n sp.generateDom(self.driver.page_source)\n datas.extend(sp.crawlData())\n\n self.driver.close()\n\n return datas\n\n def __handleLogin(self):\n \"\"\"\n dispatch Login\n\n :return: None\n \"\"\"\n self.driver.execute_script(\"Login()\")\n time.sleep(1)\n # find form element, then input user data.\n self.driver.find_element(By.ID, \"Username\").send_keys(\"royal872@163.com\")\n time.sleep(1)\n self.driver.find_element(By.ID, \"Password\").send_keys(\"jngj8sl\")\n time.sleep(1)\n self.driver.find_element(By.CLASS_NAME, \"mb-1\").click()\n time.sleep(1)\n\n # If url is login page, verify code\n if self.driver.current_url == \"https://www.trademap.org/stCaptcha.aspx\":\n codeImgUrl = self.driver.find_element(By.XPATH, \"//img[@width=200]\").get_attribute(\"src\")\n time.sleep(1)\n code = self.__verifyCode(codeImgUrl)\n time.sleep(1)\n self.driver.find_element(By.ID, \"ctl00_PageContent_CaptchaAnswer\").send_keys(code)\n time.sleep(1)\n el = self.driver.find_element(By.NAME, \"ctl00$PageContent$ButtonvalidateCaptcha\")\n el.click()\n\n def __verifyCode(self, codeImgUrl: str):\n imgPath = self.__generateCodeImg(codeImgUrl)\n orc = ddddocr.DdddOcr(show_ad=False)\n with open(imgPath, 'rb') as f:\n image = f.read()\n code = orc.classification(image)\n\n os.remove(imgPath)\n\n return code\n\n def __generateDriver(self):\n options = webdriver.ChromeOptions()\n options.add_argument(\"--headless\") # 无头模式\n options.add_argument(\"--disable-gpu\") # 禁止弹窗\n options.add_argument('--incognito') # 无痕隐身\n options.add_experimental_option(\"excludeSwitches\", ['enable-logging', \"enable-automation\"]) # 禁止打印日志.规避检测\n options.add_argument('blink-settings=imagesEnabled=false') # 禁止图片\n # options.add_argument(\"start-maximized\")\n options.add_experimental_option('useAutomationExtension', False)\n options.add_argument(\"--log-level=3\") # 关闭日志打印\n\n driver = webdriver.Chrome(executable_path=ChromeDriverManager(log_level=40).install(),\n options=options)\n with open(f'{os.path.dirname(__file__)}/../stealth.min.js') as f:\n js = f.read()\n driver.execute_cdp_cmd(\"Page.addScriptToEvaluateOnNewDocument\", {\n \"source\": js\n })\n\n return driver\n\n def __generateCodeImg(self, imgUrl: str):\n \"\"\"\n create remote img.\n\n :param imgUrl: img url.\n :return: img path.\n \"\"\"\n imgPath = f'{os.path.dirname(__file__)}/../cache/tmp.jpg'\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36\",\n }\n img = requests.get(imgUrl, verify=False, headers=headers).content\n tmpFile = open(imgPath, 'wb')\n tmpFile.write(img)\n tmpFile.close()\n\n return imgPath\n\n def __generateUrl(self, hs: str, criteria: str):\n criteriaList = {\n \"exports\": f\"https://www.trademap.org/Country_SelProduct.aspx?nvpm=1%7c%7c%7c%7c%7c{hs}%7c%7c%7c2%7c1%7c1%7c2%7c1%7c%7c2%7c1%7c1%7c1\",\n \"imports\": f\"https://www.trademap.org/Country_SelProduct.aspx?nvpm=1%7c%7c%7c%7c%7c{hs}%7c%7c%7c2%7c1%7c1%7c1%7c1%7c%7c2%7c1%7c1%7c1\",\n }\n\n return criteriaList[criteria]\n","repo_name":"kristianhuang/customs-spider","sub_path":"src/webdriver.py","file_name":"webdriver.py","file_ext":"py","file_size_in_byte":4850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"9728127857","text":"parenteses_abertos = list ()\r\nparenteses_fechados = list ()\r\nexpressao = list()\r\n\r\nexpressao = str(input('Digite uma expressão matermática: '))\r\n\r\nfor i in expressao:\r\n \r\n if i == '(':\r\n parenteses_abertos.append(i)\r\n \r\n if i == ')':\r\n parenteses_fechados.append(i)\r\n \r\nif len(parenteses_abertos) == len(parenteses_fechados):\r\n print('Sua expressão é válida.')\r\nelse:\r\n print('Sua expressão não é válida.')\r\n","repo_name":"joaobosco2011/Python-Learning","sub_path":"ex083.py","file_name":"ex083.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7440097253","text":"\"\"\"Polygon classes with area property\n\nSome Useful Information:\n1. Each class has a property \"area\". The value of area is determined by other attributes such as\n base and height. If other attributes change, area will also change accordingly. And area is\n not allowed to be directly set to a value.\n2. When a property is defined inside a class, the \"@property\" decorator is added before a function\n of the same name. Properties are accessed the same way as other attributes, but accessing a\n property is actually invoking a function internally. So, more can be done when accessing\n properties compared to attributes.\n\"\"\"\n\n\nclass Parallelogram:\n \"\"\"Parallelogram Class\n \n Attributes\n ----------\n base: base of parallelogram.\n height: height of parallelogram.\n\n Properties\n ----------\n area: area of parallelogram.\n \"\"\"\n def __init__(self, base=1, height=1):\n self.base = base\n self.height = height\n\n @property\n def area(self):\n return self.base * self.height\n\n def __repr__(self):\n return ('Parallelogram\\n'\n + f'base: {self.base}\\n'\n + f'height: {self.height}\\n'\n + f'area: {self.area}\\n')\n\n\nclass Triangle:\n \"\"\"Triangle Class\n \n Attributes\n ----------\n base: base of triangle.\n height: height of triangle.\n\n Properties\n ----------\n area: area of triangle.\n \"\"\"\n def __init__(self, base=1, height=1):\n self.base = base\n self.height = height\n\n @property\n def area(self):\n result = self.base * self.height / 2\n return int(result) if int(result) == result else result\n\n def __repr__(self):\n return ('Triangle\\n'\n + f'base: {self.base}\\n'\n + f'height: {self.height}\\n'\n + f'area: {self.area}\\n')\n\n\nclass Trapezoid:\n \"\"\"Trapezoid Class\n \n Attributes\n ----------\n base1/base2: two bases of trapezoid.\n height: height of trapezoid.\n\n Properties\n ----------\n area: area of trapezoid.\n \"\"\"\n def __init__(self, base1=1, base2=2, height=1):\n self.base1 = base1\n self.base2 = base2\n self.height = height\n\n @property\n def area(self):\n result = (self.base1 + self.base2) * self.height / 2\n return int(result) if int(result) == result else result\n\n def __repr__(self):\n shape = 'Trapezoid'\n if self.base1 == self.base2:\n shape = 'Parallelogram'\n return (f'{shape}\\n'\n + f'base1: {self.base1}\\n'\n + f'base2: {self.base2}\\n'\n + f'height: {self.height}\\n'\n + f'area: {self.area}\\n')\n\n\nparallelogram = Parallelogram(4, 3)\nprint(parallelogram.area)\n# parallelogram.area = 10 # AttributeError: can't set attribute 'area'\nparallelogram.base = 5\nprint(parallelogram)\n\n# triangle = Triangle(4, 3)\n# print(triangle)\n\n# trapezoid = Trapezoid(3, 5, 4)\n# print(trapezoid)\n","repo_name":"feli10/math-coding","sub_path":"_en/g516_polygon_area/polygon_classes.py","file_name":"polygon_classes.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74552965906","text":"#this will test the turtleplus module\n\nimport turtleplus as tp\n\n\n#create a turtle\nt = tp.t.Turtle()\n\ntp.t.title(\"TurtlePlus Python Graphics\")\n\nprint(t)\n\n#draw a square\ntp.square(t, 100)\n\n#draw a triangle\ntp.triangle(t, 150)\n\n#create 2 turtles with the create_turtles command that will both draw different shapes\nturtles = tp.create_turtles(2)\n\n#turtle 1 will be blue and draw a hexagon\nturtles[0].color(\"blue\")\ntp.hexagon(turtles[0], 100)\n\n#turtle 2 will be green draw a pentagon\nturtles[1].color(\"green\")\ntp.pentagon(turtles[1], 100)\n\n#draw a nonagon\ntp.regular_polygon(t, 9, 100)\n\n#now we use a regular turtle command\ntp.t.rt(34)\ntp.t.fd(100)\n\n#that is the end of the test\ntp.t.mainloop()","repo_name":"LeWolfYT/turtleplus","sub_path":"turtleplustest.py","file_name":"turtleplustest.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29414451843","text":"# import tensorflow.keras.models.Sequential as tf\n\nfrom keras.callbacks import TensorBoard\nfrom keras.layers import Conv2D\nfrom keras.layers import Dense\nfrom keras.layers import Flatten\nfrom keras.layers import MaxPooling2D\nfrom keras.models import Sequential\n\n# tf.keras.models.Sequential\n# initialise sequential layer of cnn\nmyclassifier = Sequential()\n# step 1: convolution layer\n\nmyclassifier.add(\n Conv2D(filters=32, kernel_size=(5, 5), strides=(1, 1), padding=\"Same\", activation='relu', input_shape=(64, 64, 3)))\nmyclassifier.add(MaxPooling2D(pool_size=(2, 2)))\n\nmyclassifier.add(Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding=\"Same\", activation='relu'))\nmyclassifier.add(MaxPooling2D(pool_size=(2, 2)))\n# convert matrix in single row matrix\nmyclassifier.add(Flatten())\n\n# fully connected layer\nmyclassifier.add(Dense(output_dim=128, activation=\"relu\"))\n\n# output layer?\nmyclassifier.add(Dense(output_dim=1, activation='sigmoid'))\n\nmyclassifier.compile(optimizer=\"adam\", loss='binary_crossentropy', metrics=['accuracy'])\n\nprint(myclassifier.summary())\n\n# image argumentation\nfrom keras.preprocessing.image import ImageDataGenerator\n\ntrain_datagen = ImageDataGenerator(rescale=1. / 255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1. / 255)\n# this is for winodows path\n'''training_data = train_datagen.flow_from_directory(directory=\"C:/Users/mayank/Documents/Datasets/Cat_dogs/train\",\n target_size=(64, 64),\n batch_size=32,\n class_mode='binary')'''\ntraining_data = train_datagen.flow_from_directory(directory=\"../../../../Datasets/cat_dogs/train\",\n target_size=(64, 64),\n batch_size=32,\n class_mode='binary')\n\n# this is for winodows path\n'''test_validation = test_datagen.flow_from_directory(\"C:/Users/mayank/Documents/Datasets/Cat_dogs/test\",\n target_size=(64, 64),\n batch_size=32,\n class_mode='binary')'''\ntest_validation = test_datagen.flow_from_directory(\"../../../../Datasets/cat_dogs/test\",\n target_size=(64, 64),\n batch_size=32,\n class_mode='binary')\n\n# keras_backend.set_session(tf_debug.TensorBoardDebugWrapperSession(tf.Session(),\"localhost:7000\"))\n\ncb = TensorBoard(log_dir=(\n \"/home/mayank-s/PycharmProjects/Data_Science/output_graph/try4\")) # ,histogram_freq = 1, batch_size = 32,write_graph =\"TRUE\" )\n\nmyclassifier.fit_generator(generator=training_data,\n steps_per_epoch=30,\n epochs=10,\n validation_data=test_validation,\n validation_steps=200, callbacks=[cb])\n\n# making new prediction\n\nimport numpy as np\nfrom keras.preprocessing import image\n\n# test_image=image.load_img(path=\"C:/Users/mayank/Documents/Datasets/Cat_dogs/test1/509.jpg\",target_size=(64,64))\n# linux path\ntest_image = image.load_img(path=\"../../Datasets/cat_dogs/test1/2.jpg\", target_size=(64, 64))\ntest_image = image.img_to_array(test_image)\ntest_image = np.expand_dims(test_image, axis=0)\nresult = myclassifier.predict(test_image)\nprint(training_data.class_indices)\nprint((int(result)))\n\n# import tensorflow as tf\n# from keras import backend as keras_backend\n# from tensorflow.python import debug as tf_debug\n\n# keras_backend.set_session(tf_debug.LocalCLIDebugWrapperSession(tf.Session()),\"localhost:7000\")\n# keras_backend.set_session(tf_debug.TensorBoardDebugWrapperSession(tf.Session(),\"localhost:7000\"))\n\n# Define your keras model, called \"model\".\n# myclassifier.fit(...) # This will break into the TFDBG CLI.\n","repo_name":"mayanks888/AI","sub_path":"Deep learning/TensorFlow/keras practise/keras_1.py","file_name":"keras_1.py","file_ext":"py","file_size_in_byte":4204,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30077975315","text":"\nfrom numpy import *\nimport matplotlib.pyplot as plt\nimport csv\nfrom sklearn import metrics\nfrom sklearn.metrics import auc\nimport numpy as np\n\n#从文件中加载数据:特征X,标签label\ndef loadDataSet(fileName):\n dataMatrix=[]\n dataLabel=[]\n\n\n with open(fileName, 'r') as f:\n reader = csv.reader(f)\n m = 23 #特征数量\n print(m)\n average_data = zeros(m) #记录每个特征的平均值,先累加后除以数量\n valid_num = zeros(m)\n for line in reader:\n # print(line)\n oneline=[]\n for i in range(len(line)-1):\n if (line[i]!=''):\n oneline.append(float(line[i]))\n average_data[i] += float(line[i])\n valid_num[i] += 1\n else:\n oneline.append(-1)\n dataMatrix.append(oneline)\n dataLabel.append(int(line[23]))\n\n #calculate average\n for i in range(m):\n average_data[i] = float(average_data[i]) / valid_num[i]\n print(\"average_data\")\n print((average_data))\n\n #fill the miss data\n for i in range(len(dataMatrix)):\n for j in range(len(dataMatrix[0])):\n if dataMatrix[i][j]==-1:\n dataMatrix[i][j] = average_data[j]\n\n #print\n for i in range(len(dataMatrix)):\n print(dataMatrix[i])\n print(dataLabel)\n print(mat(dataLabel).transpose())\n matLabel=mat(dataLabel).transpose()\n\n return dataMatrix,matLabel\n\n#logistic回归使用了sigmoid函数\ndef sigmoid(inX):\n return 1/(1+exp(-inX))\n\n#函数中涉及如何将list转化成矩阵的操作:mat()\n#同时还含有矩阵的转置操作:transpose()\n#还有list和array的shape函数\n#在处理矩阵乘法时,要注意的便是维数是否对应\n\n#graAscent函数实现了梯度上升法,隐含了复杂的数学推理\n#梯度上升算法,每次参数迭代时都需要遍历整个数据集\ndef graAscent(dataMatrix,matLabel):\n m,n=shape(dataMatrix)\n matMatrix=mat(dataMatrix)\n\n w=ones((n,1))\n alpha=0.001\n num=500\n for i in range(num):\n error=sigmoid(matMatrix*w)-matLabel\n w=w-alpha*matMatrix.transpose()*error\n return w\n\n\n#随机梯度上升算法的实现,对于数据量较多的情况下计算量小,但分类效果差\n#每次参数迭代时通过一个数据进行运算\n#m个样本n个特征\ndef stocGraAscent(dataMatrix,matLabel):\n m,n=shape(dataMatrix)\n matMatrix=mat(dataMatrix)\n\n w=ones((n,1))\n alpha=0.001\n num=20 #这里的这个迭代次数对于分类效果影响很大,很小时分类效果很差\n for i in range(num):\n for j in range(m):\n error=sigmoid(matMatrix[j]*w)-matLabel[j]\n w=w-alpha*matMatrix[j].transpose()*error\n return w\n\n#改进后的随机梯度上升算法\n#从两个方面对随机梯度上升算法进行了改进,正确率确实提高了很多\n#改进一:对于学习率alpha采用非线性下降的方式使得每次都不一样\n#改进二:每次使用一个数据,但是每次随机的选取数据,选过的不在进行选择\ndef stocGraAscent1(dataMatrix,matLabel):\n m,n=shape(dataMatrix)\n matMatrix=mat(dataMatrix)\n\n w=ones((n,1))\n num=200 #这里的这个迭代次数对于分类效果影响很大,很小时分类效果很差\n setIndex=set([])\n for i in range(num):\n for j in range(m):\n alpha=4/(1+i+j)+0.01\n\n dataIndex=random.randint(0,100)\n while dataIndex in setIndex:\n setIndex.add(dataIndex)\n dataIndex=random.randint(0,100)\n error=sigmoid(matMatrix[dataIndex]*w)-matLabel[dataIndex]\n w=w-alpha*matMatrix[dataIndex].transpose()*error\n return w\n\ndef LR_predict(weight,testDataMatrix, testMatLabel):\n prediction = []\n TP=0\n TN=0\n FP=0\n FN=0\n for i in range(len(testDataMatrix)):\n tempPre = float(testDataMatrix[i]*weight)\n tempLabel = float(testMatLabel[i])\n prediction.append(tempPre)\n tempPre = round(tempPre)\n print(tempPre,tempLabel)\n if tempPre==1 and tempLabel==1:\n TP +=1\n elif tempPre==0 and tempLabel==0:\n TN +=1\n elif tempPre==1 and tempLabel==0:\n FP +=1\n elif tempPre==0 and tempLabel==1:\n FN +=1\n print(\"TP:\",TP)\n print(\"TN:\", TN)\n print(\"FP:\", FP)\n print(\"FN:\", FN)\n trueRate = (TP+TN)/(TP+TN+FP+FN)\n\n print(\"trueRate:\")\n print(trueRate)\n\n label = np.array(testMatLabel)\n prediction = np.array(prediction)\n fpr, tpr, thresholds = metrics.roc_curve(label,prediction, pos_label=1)\n auc = metrics.auc(fpr, tpr)\n print(\"fpr\")\n print(fpr)\n print(\"tpr\")\n print(tpr)\n print(\"thresholds\")\n print(thresholds)\n print(\"auc\")\n print(auc)\n\n return prediction,auc\n\n\n\n\n\n#绘制图像\ndef draw(weight):\n x0List=[];y0List=[];\n x1List=[];y1List=[];\n f=open('testSet.txt','r')\n for line in f.readlines():\n lineList=line.strip().split()\n if lineList[2]=='0':\n x0List.append(float(lineList[0]))\n y0List.append(float(lineList[1]))\n else:\n x1List.append(float(lineList[0]))\n y1List.append(float(lineList[1]))\n\n fig=plt.figure()\n ax=fig.add_subplot(111)\n ax.scatter(x0List,y0List,s=10,c='red')\n ax.scatter(x1List,y1List,s=10,c='green')\n\n xList=[];yList=[]\n x=arange(-3,3,0.1)\n for i in arange(len(x)):\n xList.append(x[i])\n\n y=(-weight[0]-weight[1]*x)/weight[2]\n for j in arange(y.shape[1]):\n yList.append(y[0,j])\n\n ax.plot(xList,yList)\n plt.xlabel('x1');plt.ylabel('x2')\n plt.show()\n\n\nif __name__ == '__main__':\n # dataMatrix,matLabel=loadDataSet('D:/code/SummerProject/shallowClassifier/data/train01.csv')\n # trainDataMatrix = dataMatrix[0:-1000]\n # trainMatLabel = matLabel[0:-1000]\n # testDataMatrix = dataMatrix[-1000:]\n # testMatLabel = matLabel[-1000:]\n trainDataMatrix, trainMatLabell = loadDataSet('D:/code/SummerProject/shallowClassifier/data/train01.csv')\n testDataMatrix, testMatLabel = loadDataSet('D:/code/SummerProject/shallowClassifier/data/test01.csv')\n\n weight = stocGraAscent1(trainDataMatrix, trainMatLabell)\n print(\"weight\")\n print(weight)\n\n prediction, auc = LR_predict(weight, testDataMatrix, testMatLabel)\n\n # draw(weight)\n\n\n\n # weight=graAscent(dataMatrix,matLabel)\n # weight=stocGraAscent1(dataMatrix,matLabel)\n # print(weight)\n # draw(weight)","repo_name":"seekelvis/SummerProject","sub_path":"shallowClassifier/LR01.py","file_name":"LR01.py","file_ext":"py","file_size_in_byte":6527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28559946367","text":"import netaddr\nimport webob\nfrom webob import exc\n\nfrom nova.api.openstack import extensions\nfrom nova.api.openstack import wsgi\nfrom nova import context as nova_context\nfrom nova import exception\nfrom nova.i18n import _\nfrom nova import network\nfrom nova.objects import base as base_obj\nfrom nova.objects import fields as obj_fields\n\nauthorize = extensions.extension_authorizer('compute', 'networks')\nauthorize_view = extensions.extension_authorizer('compute',\n 'networks:view')\nextended_fields = ('mtu', 'dhcp_server', 'enable_dhcp', 'share_address')\n\n\ndef network_dict(context, network, extended):\n fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2',\n 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6')\n admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted',\n 'injected', 'bridge', 'vlan', 'vpn_public_address',\n 'vpn_public_port', 'vpn_private_address', 'dhcp_start',\n 'project_id', 'host', 'bridge_interface', 'multi_host',\n 'priority', 'rxtx_base')\n if network:\n # NOTE(mnaser): We display a limited set of fields so users can know\n # what networks are available, extra system-only fields\n # are only visible if they are an admin.\n if context.is_admin:\n fields += admin_fields\n if extended:\n fields += extended_fields\n # TODO(mriedem): Remove the NovaObject type check once the\n # network.create API is returning objects.\n is_obj = isinstance(network, base_obj.NovaObject)\n result = {}\n for field in fields:\n # NOTE(mriedem): If network is an object, IPAddress fields need to\n # be cast to a string so they look the same in the response as\n # before the objects conversion.\n if is_obj and isinstance(network.fields[field].AUTO_TYPE,\n obj_fields.IPAddress):\n # NOTE(danms): Here, network should be an object, which could\n # have come from neutron and thus be missing most of the\n # attributes. Providing a default to get() avoids trying to\n # lazy-load missing attributes.\n val = network.get(field, None)\n if val is not None:\n result[field] = str(val)\n else:\n result[field] = val\n else:\n # It's either not an object or it's not an IPAddress field.\n result[field] = network.get(field, None)\n uuid = network.get('uuid')\n if uuid:\n result['id'] = uuid\n return result\n else:\n return {}\n\n\nclass NetworkController(wsgi.Controller):\n\n def __init__(self, network_api=None, ext_mgr=None):\n self.network_api = network_api or network.API()\n if ext_mgr:\n self.extended = ext_mgr.is_loaded('os-extended-networks')\n else:\n self.extended = False\n\n def index(self, req):\n context = req.environ['nova.context']\n authorize_view(context)\n networks = self.network_api.get_all(context)\n result = [network_dict(context, net_ref, self.extended)\n for net_ref in networks]\n return {'networks': result}\n\n @wsgi.action(\"disassociate\")\n def _disassociate_host_and_project(self, req, id, body):\n context = req.environ['nova.context']\n authorize(context)\n # NOTE(shaohe-feng): back-compatible with db layer hard-code\n # admin permission checks. call db API objects.Network.associate\n nova_context.require_admin_context(context)\n\n try:\n self.network_api.associate(context, id, host=None, project=None)\n except exception.NetworkNotFound:\n msg = _(\"Network not found\")\n raise exc.HTTPNotFound(explanation=msg)\n except NotImplementedError:\n msg = _('Disassociate network is not implemented by the '\n 'configured Network API')\n raise exc.HTTPNotImplemented(explanation=msg)\n return webob.Response(status_int=202)\n\n def show(self, req, id):\n context = req.environ['nova.context']\n authorize_view(context)\n\n try:\n network = self.network_api.get(context, id)\n except exception.NetworkNotFound:\n msg = _(\"Network not found\")\n raise exc.HTTPNotFound(explanation=msg)\n return {'network': network_dict(context, network, self.extended)}\n\n def delete(self, req, id):\n context = req.environ['nova.context']\n authorize(context)\n try:\n self.network_api.delete(context, id)\n except exception.NetworkInUse as e:\n raise exc.HTTPConflict(explanation=e.format_message())\n except exception.NetworkNotFound:\n msg = _(\"Network not found\")\n raise exc.HTTPNotFound(explanation=msg)\n return webob.Response(status_int=202)\n\n def create(self, req, body):\n context = req.environ['nova.context']\n authorize(context)\n # NOTE(shaohe-feng): back-compatible with db layer hard-code\n # admin permission checks. call db API objects.Network.create\n nova_context.require_admin_context(context)\n\n def bad(e):\n return exc.HTTPBadRequest(explanation=e)\n\n if not (body and body.get(\"network\")):\n raise bad(_(\"Missing network in body\"))\n\n params = body[\"network\"]\n if not params.get(\"label\"):\n raise bad(_(\"Network label is required\"))\n\n cidr = params.get(\"cidr\") or params.get(\"cidr_v6\")\n if not cidr:\n raise bad(_(\"Network cidr or cidr_v6 is required\"))\n\n if params.get(\"project_id\") == \"\":\n params[\"project_id\"] = None\n\n params[\"num_networks\"] = 1\n try:\n params[\"network_size\"] = netaddr.IPNetwork(cidr).size\n except netaddr.AddrFormatError:\n msg = _('%s is not a valid IP network') % cidr\n raise exc.HTTPBadRequest(explanation=msg)\n\n if not self.extended:\n create_params = ('allowed_start', 'allowed_end')\n for field in extended_fields + create_params:\n if field in params:\n del params[field]\n\n try:\n network = self.network_api.create(context, **params)[0]\n except (exception.InvalidCidr,\n exception.InvalidIntValue,\n exception.InvalidAddress,\n exception.NetworkNotCreated) as ex:\n raise exc.HTTPBadRequest(explanation=ex.format_message)\n except exception.CidrConflict as ex:\n raise exc.HTTPConflict(explanation=ex.format_message())\n return {\"network\": network_dict(context, network, self.extended)}\n\n def add(self, req, body):\n context = req.environ['nova.context']\n authorize(context)\n # NOTE(shaohe-feng): back-compatible with db layer hard-code\n # admin permission checks. call db API objects.Network.associate\n nova_context.require_admin_context(context)\n if not body:\n raise exc.HTTPUnprocessableEntity()\n\n network_id = body.get('id', None)\n project_id = context.project_id\n\n try:\n self.network_api.add_network_to_project(\n context, project_id, network_id)\n except NotImplementedError:\n msg = (_(\"VLAN support must be enabled\"))\n raise exc.HTTPNotImplemented(explanation=msg)\n except (exception.NoMoreNetworks,\n exception.NetworkNotFoundForUUID) as e:\n raise exc.HTTPBadRequest(explanation=e.format_message())\n\n return webob.Response(status_int=202)\n\n\nclass Os_networks(extensions.ExtensionDescriptor):\n \"\"\"Admin-only Network Management Extension.\"\"\"\n\n name = \"Networks\"\n alias = \"os-networks\"\n namespace = (\"http://docs.openstack.org/compute/\"\n \"ext/os-networks/api/v1.1\")\n updated = \"2011-12-23T00:00:00Z\"\n\n def get_resources(self):\n member_actions = {'action': 'POST'}\n collection_actions = {'add': 'POST'}\n res = extensions.ResourceExtension(\n 'os-networks',\n NetworkController(ext_mgr=self.ext_mgr),\n member_actions=member_actions,\n collection_actions=collection_actions)\n return [res]\n","repo_name":"BU-NU-CLOUD-SP16/Trusted-Platform-Module-nova","sub_path":"nova/api/openstack/compute/legacy_v2/contrib/os_networks.py","file_name":"os_networks.py","file_ext":"py","file_size_in_byte":8495,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"33040854296","text":"#check that you call functions with the right arities\n #including data constructors (in the arguments and also definition)\n#this also builds the information for that which gets used in the executor\n\nfrom functools import reduce\nfrom sys import exit\nfrom Stdlib import stdlib_args\n\ndata_arities = {} #data arities are global list because it needs to keep memory between file loading\nfunction_arities = {} #same here\n\ndef get_data_arities(): #called in executor. used to avoid double computation. THis technique should be used more often.\n return data_arities #global keyword unecersarry\n\ndef construct_data_arities(data):\n for n in reduce(lambda n1,n2: n1+n2, map(lambda n: n[2], data), []): #only get constructors. Ignoring the name of the data\n if type(n) is list: #due to an oddity in the parser\n if n[0] in data_arities:\n print(\"error: redefinition of \\\"\".upper(), n[0], \"\\\"\")\n exit()\n data_arities[n[0]] = 0\n elif type(n) is tuple:\n if n[1] in data_arities:\n print(\"error: redefinition of \\\"\".upper(), n[1], \"\\\"\")\n exit()\n data_arities[n[1]] = len(n[2])\n else:\n raise Exception(\"unknown data type passed to construct data arities\")\n\ndef construct_function_arities(functions):\n for n in list(map(lambda n: n[1], functions)):\n if n[1] in function_arities and function_arities[n[1]] != len(n[2]): #second case not matching\n print(\"error: function \\\"\".upper(), n[1], \"\\\" has cases with different arities\".upper())\n exit()\n else:\n function_arities[n[1]] = len(n[2])\n\n#entry function\ndef Arity_Checker(functions, data):\n construct_data_arities(data)\n construct_function_arities(functions)\n for n in functions:\n tmp = check_arity(n) #global data_arities used\n if tmp != True:\n print(\"error: function \\\"\".upper(), n[1][1], \"\\\" calls function \\\"\".upper(), tmp, \"\\\" with incorrect amount of arguments\")\n exit()\n return True\n\ndef check_arity(function):\n return arity_rec(function[2])\n\ndef arity_rec(func_call):\n if type(func_call) is list: return True\n am = -1\n if func_call[1] in stdlib_args:\n am = stdlib_args[func_call[1]]\n elif func_call[0] == \"data-constructor\":\n if func_call[1] not in data_arities:\n print(\"error: call to non-existent data-constructor\".upper(), func_call[1])\n exit()\n am = data_arities[func_call[1]]\n else:\n if func_call[1] not in function_arities:\n print(\"error: call to non-existent function\".upper(), func_call[1])\n exit()\n am = function_arities[func_call[1]]\n if len(func_call[2]) != am and func_call[1] not in stdlib_args: #check if 0 constructors follow this law\n return func_call[1]\n for n in func_call[2]:\n tmp = arity_rec(n)\n if tmp != True:\n return tmp\n return True\n","repo_name":"Glubs9/total_functional_programming_language","sub_path":"ArityChecker.py","file_name":"ArityChecker.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14010309414","text":"import json\nimport os\nfrom IamDB_A.models import movie\nfrom django.http import HttpResponse\n\ndef def_loader(request):\n with open(os.getcwd()+\"/DB_Loader/sample_data.json\",\"r\") as json_file:\n jsn = json.load(json_file)\n for i in jsn:\n _99popularity = i[\"99popularity\"]\n director = i[\"director\"]\n score = i[\"imdb_score\"]\n name = i[\"name\"]\n genre = list()\n for i in i['genre']:\n genre.append(i)\n\n data_obj = movie()\n data_obj.name = name\n data_obj._99popularity = _99popularity\n data_obj.score = score\n data_obj.director = director\n data_obj.genre = genre\n data_obj.save()\n\n return HttpResponse(\"Sample Database Creation Successful\")","repo_name":"Workspace-SaurabhMishra/IamDB","sub_path":"DB_Loader/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9013034544","text":"from flask import Flask, render_template\nimport requests\nimport datetime\nfrom post import Post\n\n\nresponse = requests.get(\"https://api.npoint.io/c790b4d5cab58020d391\")\nposts_data = response.json()\n\nposts = []\nfor post in posts_data:\n posts.append(Post(post))\n\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef index():\n year_today = datetime.date.today().year\n return render_template(\"index.html\", year_today=year_today)\n\n\n@app.route(\"/blogs\")\ndef blogs():\n return render_template(\"blogs.html\", posts=posts)\n\n\n@app.route(\"/blog/\")\ndef single_blog(post_id):\n requested_post = [post for post in posts if str(post.id) == str(post_id)][0]\n print(requested_post)\n return render_template(\"single-blog.html\", post=requested_post)\n\n\n@app.route(\"/guess/\")\n@app.route(\"/guess/\")\ndef guess(name):\n genderize_response = requests.get(\n f\"https://api.genderize.io?name={name}\", timeout=130\n )\n genderize_data = genderize_response.json()\n gender = genderize_data[\"gender\"]\n\n agify_response = requests.get(f\"https://api.agify.io?name={name}\", timeout=130)\n agify_data = agify_response.json()\n age = agify_data[\"age\"]\n\n return render_template(\"guess.html\", name=name, gender=gender, age=age)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"rvitality/100-days-of-python","sub_path":"day-57/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33937428600","text":"import webapp2\n\n\n\npage_header = \"\"\"\n\n\n\n Flicklist\n\n\n\n\"\"\"\n\npage_footer = \"\"\"\n\n\n\"\"\"\n\nclass Index(webapp2.RequestHandler):\n\n def get(self):\n add_form = \"\"\"\n
    \n \n \n
    \n \"\"\"\n\n response = page_header + add_form + page_footer\n self.response.write(response)\n\nclass AddMovie(webapp2.RequestHandler):\n\n def post(self):\n\n added_movie = self.request.get(\"added_movie\")\n movie_element = \"\" + added_movie + \"\"\n confirmation = \"

    \" + movie_element + \" has been added to your watchlist!\" + \"

    \"\n response = page_header + confirmation + page_footer\n self.response.write(response)\n\napp = webapp2.WSGIApplication([\n ('/', Index),\n ('/add', AddMovie)\n], debug=True)\n","repo_name":"emeznar/PythonPrograms","sub_path":"working_flicklist-python.py","file_name":"working_flicklist-python.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"19153664499","text":"from tkinter import *\nfrom tkinter.colorchooser import *\n\ndef callback():\n global result\n result = askcolor(title=\"Color Chooser\")\n result = result[1]\n\nroot = Tk()\n\ncanvas = Canvas(root, width=500, height=500)\ncolor = 'red'\nresult = '#476042'\n\nB1 = Button(root, text='Choose Color', fg=\"darkgreen\", command=callback)\nB1.pack(side=LEFT, padx=10)\n\nlastx, lasty = 0, 0\n\ndef xy(event):\n global lastx, lasty\n lastx, lasty = event.x, event.y\n\ndef addLine(event):\n global lastx, lasty\n canvas.create_line((lastx, lasty, event.x, event.y), fill=result)\n lastx, lasty = event.x, event.y\n\nroot.columnconfigure(0, weight=1)\nroot.rowconfigure(0, weight=1)\n\ncanvas.pack()\ncanvas.bind(\"\", xy)\ncanvas.bind(\"\", addLine)\n\nroot.mainloop()","repo_name":"Ohgyuchan/cs-study","sub_path":"python/tkinter/tkinter_example.py","file_name":"tkinter_example.py","file_ext":"py","file_size_in_byte":765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"16232335175","text":"import sys\r\ninput=sys.stdin.readline\r\n# N=int(input())\r\n# cnt=0\r\n# for i in range(1,N+1):\r\n# if i%5==0:\r\n# cnt+=1\r\n# if i%25==0:\r\n# cnt+=1\r\n# if i%125==0:\r\n# cnt+=1\r\n# print(cnt)\r\nN=int(input())\r\ndef five_count(n):\r\n cnt=0\r\n while n!=0:\r\n n//=5\r\n cnt+=n\r\n return cnt\r\nprint(five_count(N))","repo_name":"KyunghoonJeon/BaekJoon_Judge","sub_path":"Number_Theory/question1676.py","file_name":"question1676.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9011022036","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Python_worksheet-1\n\n# In[ ]:\n\n\n\n\n\n# In[1]:\n\n\n#Code to find factorial of a number\ndef factorial(x):\n if x==1:\n return 1\n else:\n return (x*factorial(x-1))\n\n\n# In[2]:\n\n\nnum=int(input(\"Enter any number\"))\nresult= factorial(num)\nresult\n\n\n# In[6]:\n\n\n# Code to find a prime and composite number\nn= int (input(\"Enter any value\"))\nif (n==0 or n==1):\n print (n, \"number is neither prime nor composite\")\nelif n>1:\n if (n%2==0):\n print (n, \"number is composite number\")\n else:\n print (n, \"number is prime number\")\nelse:\n print(\"Print positive number only\")\n\n\n# In[25]:\n\n\n#code to find out wheather a string is palindrome or not\n\nStr = \"civic\"\ndef string(Str):\n Str = str(input(\"The Word \"))\n Str= Str.casefold()\n rev_str = reversed(Str)\n\n if list(Str) == list(rev_str):\n print (\"the string is a palindrome\")\n else:\n print (\"the string is not a palindrome\")\n\n\n# In[26]:\n\n\nResult = string(Str)\n\n\n# In[27]:\n\n\nResult1 = string(Str)\n\n\n# In[ ]:\n\n\n\n\n\n# In[30]:\n\n\n# Code to find out third side of right angle triangle from two given side\n\ndef triangle():\n AB = int(input(\"First Side \"))\n BC = int(input(\"Second Side \"))\n AC = ((AB**2)+ (BC**2))**(1/2)\n print (\"The thirld side of triangle is\", AC)\n\n\n# In[31]:\n\n\nresult= triangle()\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[33]:\n\n\nString = \"hhmmtt\"\nstr1= list(String)\n\n\n# In[34]:\n\n\nstr1\n\n\n# In[35]:\n\n\nstrlist=[]\n\n\n# In[36]:\n\n\nstrlist\n\n\n# In[37]:\n\n\nfreq= str1\n\n\n# In[38]:\n\n\nfreq\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Rgarg2310/Internship","sub_path":"Python_Worksheet1.py","file_name":"Python_Worksheet1.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"13291086421","text":"# https://www.py4e.com/html3/09-dictionaries\nfrom pprint import pprint\n\n\ndef read_words():\n w_dict = {}\n infile = open('words.txt')\n # count = 0\n for line in infile:\n words = line.split()\n # print 'Debug:', words\n for word in words:\n if word not in w_dict:\n w_dict.setdefault(word, None)\n pprint(w_dict)\n\n\nif __name__ == '__main__':\n read_words()\n","repo_name":"markvogel/100days","sub_path":"2019/day171_2019.py","file_name":"day171_2019.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"35980381064","text":"class Word():\n\tdef __init__(self, word):\n\t\tself.word = word\n\t\tself.phone = []\n\t\tself.syns = []\n\t\tself.pos = ''\n\n\tdef __str__(self):\n\t\tret = ''\n\t\tret += self.word + ': '\n\t\tret += self.pos + ', '\n\t\tret += ' '.join(self.phone) + ', ' \n\t\tret += '[' + ', '.join(self.syns[:3]) + '...]' \n\t\treturn ret","repo_name":"ryanmp/linguistics","sub_path":"word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23627091302","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom requests.models import HTTPError\nimport spotipy\nfrom spotipy.oauth2 import SpotifyOAuth\nimport pprint\n\nweek = input('Which year do you want to travel to? Type the date in this format YYYY-MM-DD:')\n\nURL = \"https://www.billboard.com/charts/hot-100/\"\n\nresponse = requests.get(url = f\"{URL}{week}/\").text\n\nsoup = BeautifulSoup(response, 'html.parser')\n\nsong_row = soup.find_all('h3', class_='a-font-primary-bold-s')\nsong_row = song_row[2:]\nsongs = [song.get_text().strip(\"\\n\") for song in song_row]\n\nscope = \"playlist-modify-private\"\nredirect_uri = \"https://example.com\"\n\nsp = spotipy.Spotify(auth_manager=SpotifyOAuth(scope=scope,\n redirect_uri=redirect_uri))\n\n\nYYYY = week.split('-')[0]\nsong_URI = []\nfor song in songs:\n \n\n try:\n results = sp.search(f\"track:{song} year:{YYYY}\", type='track', limit=1, market='US')\n result_URI = results['tracks']['items'][0]['uri']\n song_URI.append(result_URI)\n\n except IndexError:\n pass\n\nnew_playlist = sp.user_playlist_create(user='', \n name='python playlist',\n public= False,\n description='This is the playlist I created with python')\nnew_playlist_id = new_playlist['id']\nsp.playlist_add_items(playlist_id=new_playlist_id, items=song_URI)\n","repo_name":"gvarg75/UdemyDSCourse","sub_path":"100_days_of_python/day_46/spotify_playlist_maker/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"22626400544","text":"import pygame, sys, random, math\nfrom constants import w_width, w_height,playerSpeed\nimport json\n\nclass vector():\n def __init__(self,x,y):\n self.x = x\n self.y = y\n\n def negateX(self):\n self.x *= -1\n\n def negateY(self):\n self.y *= -1\n\ndef createPoints():\n \n x,y = random.randrange(w_width),random.randrange(w_height)\n\n goodX = (x > 700) or (x < 450)\n goodY = (y > 500) or (y < 250)\n \n while not goodX or not goodY:\n if not goodX:\n x = random.randrange(0,1200)\n if not goodY:\n y = random.randrange(0,700)\n\n goodX = (x > 700) or (x < 450)\n goodY = (y > 500) or (y < 250)\n\n return x,y\n\nclass Enemy(pygame.sprite.Sprite):\n def __init__(self,startingPos,imagePath,speed = 10, inflate = 100,bearDex = 0):\n pygame.sprite.Sprite.__init__(self) # calls the parent class constructor\n\n self.sheet = pygame.image.load(imagePath[0] + imagePath[1]) # load and assign spritesheet\n\n # using with to load clip co-ordinates ensures the files\n # are closed\n with open(imagePath[0] + 'ls.json', \"rb\") as ls,\\\n open(imagePath[0] + 'us.json', \"rb\") as us,\\\n open(imagePath[0] + 'rs.json', \"rb\") as rs,\\\n open(imagePath[0] + 'ds.json', \"rb\") as ds:\n self.left_states = {int(key):tuple(value) for key, value in json.load(ls).items()}\n self.up_states = {int(key):tuple(value) for key, value in json.load(us).items()}\n self.right_states = {int(key):tuple(value) for key, value in json.load(rs).items()}\n self.down_states = {int(key):tuple(value) for key, value in json.load(ds).items()}\n\n clip_area = pygame.Rect(self.left_states[0]) # this is the size of a frame\n self.sheet.set_clip(clip_area)\n\n # create a rectangle that is the player sprite\n self.image = self.sheet.subsurface(self.sheet.get_clip())\n self.rectangle = self.image.get_rect()\n self.rectangle.topleft = startingPos\n\n # used to cycle through frames\n self.frame = 0\n self.speed = speed\n\n # This is hacky but it's to stop the carnage\n self.caughtHim = 0\n\n # for level 2\n self.rampage = 0\n self.stepCounter = 50\n self.headingX = 0\n self.headingY = 0\n\n # for level 3\n self.chasing = False\n\n # for level4\n self.crystals = 2\n self.inInterval = 0\n self.bearID = bearDex\n self.bearLims = {0:{'left':150,'right':750,'up':150,'down':650},1:{'left':1700,'right':2300,'up':150,'down':650},2:{'left':1700,'right':2300,'up':1700,'down':2250},3:{'left':150,'right':750,'up':1700,'down':2250}}\n\n\n self.heading = self.createRandomHeading()\n self.direction = 'left'\n self.inflate = inflate\n self.detection = self.rectangle.inflate(self.inflate,self.inflate)\n \n\n def update(self,keith,bg,keys,collision,obstacles,bumped):\n if not collision:\n if keys[pygame.K_a]:\n self.rectangle.x += keith.speed\n if keys[pygame.K_d]:\n self.rectangle.x -= keith.speed\n if keys[pygame.K_w]:\n self.rectangle.y += keith.speed\n if keys[pygame.K_s]:\n self.rectangle.y -= keith.speed\n\n if self.detection.colliderect(keith.rectangle) and not bumped:\n self.chase(keith.rectangle)\n else:\n self.patrol(bg,obstacles)\n self.caughtHim = 0\n self.detection = self.rectangle.inflate(self.inflate,self.inflate)\n\n def patrol(self,bg,obstacles,ghost=False):\n nextXPos = self.rectangle.x + (self.speed*self.heading.x)\n nextYPos = self.rectangle.y + (self.speed*self.heading.y)\n\n fnxt = math.floor(nextXPos)\n fnyt = math.floor(nextYPos)\n\n # collisions\n if not ghost:\n for obstacle in obstacles:\n x = obstacle.rect.x\n y = obstacle.rect.y\n width = obstacle.rect.width\n height = obstacle.rect.height\n if (nextXPos > x-self.rectangle.width) and (nextXPos < x +width) and (nextYPos > y-self.rectangle.height) and (nextYPos < y+height):\n\n if fnxt in range(x-self.rectangle.width-3,x-self.rectangle.width+3):\n # send back east\n newHeading = vector((-1*self.heading.x),self.heading.y)\n self.heading = newHeading\n nextXPos = x-self.rectangle.width\n elif fnxt in range(x + width-5,x+width+5):\n # send back west\n newHeading = vector((-1*self.heading.x),self.heading.y)\n self.heading = newHeading\n nextXPos = x+width\n elif fnyt in range(y-self.rectangle.height-5,y-self.rectangle.height+5):\n # send back north\n newHeading = vector(self.heading.x,(-1*self.heading.y))\n self.heading = newHeading\n nextYPos = y-self.rectangle.height\n elif fnyt in range(y+height-5,y+height+5):\n # send back south\n newHeading = vector(self.heading.x,(-1*self.heading.y))\n self.heading = newHeading\n nextYPos = y+height\n\n if nextXPos - bg.x > (bg.resolution[0]-self.rectangle.width):\n newHeading = vector((-1*self.heading.x),self.heading.y)\n self.heading = newHeading\n nextXPos = (bg.resolution[0]-self.rectangle.width)+bg.x\n if nextXPos - bg.x <= 0:\n newHeading = vector((-1*self.heading.x),self.heading.y)\n self.heading = newHeading\n nextXPos = bg.x\n if nextYPos - bg.y > (bg.resolution[1]-self.rectangle.height):\n newHeading = vector(self.heading.x,(-1*self.heading.y))\n self.heading = newHeading\n nextYPos = (bg.resolution[1]-self.rectangle.width)+bg.y\n if nextYPos -bg.y <= 0:\n newHeading = vector(self.heading.x,(-1*self.heading.y))\n self.heading = newHeading\n nextYPos = bg.y\n\n self.rectangle.x = nextXPos\n self.rectangle.y = nextYPos\n\n if nextYPos > nextXPos: # animate vertical\n if self.heading.y > 0: # moving down\n self.move(self.down_states)\n else:\n self.move(self.up_states)\n else: # animate hroizontal\n if self.heading.x > 0: # moving right\n self.move(self.right_states)\n else:\n self.move(self.left_states)\n\n self.image = self.sheet.subsurface(self.sheet.get_clip())\n\n\n def chase(self,playerRect,ghost=False):\n\n if self.caughtHim == 1:\n self.detection = self.rectangle.inflate(400,300)\n return\n\n # increase detection range\n if not ghost:\n self.detection = self.rectangle.inflate(400,300)\n\n x = (playerRect.x - self.rectangle.x)\n y = (playerRect.y - self.rectangle.y)\n\n length = math.sqrt((x*x)+(y*y))\n\n if length == 0:\n length = 1\n\n headingX = float(x/length)\n headingY = float(y/length)\n\n nextXPos = self.rectangle.x + (self.speed*headingX)\n nextYPos = self.rectangle.y + (self.speed*headingY)\n\n self.rectangle.x = nextXPos\n self.rectangle.y = nextYPos\n\n if y > 0: # player below\n self.move(self.down_states)\n elif y < 0: # player above\n self.move(self.up_states)\n elif x > 0: # player to right\n self.move(self.right_states)\n elif x < 0: # player to left\n self.move(self.left_states)\n\n self.image = self.sheet.subsurface(self.sheet.get_clip())\n\n def move(self, movement):\n if type(movement) is dict:\n # regular case, where we call self.move with our dictionary of coordinates\n self.frame += 1 # cycle through\n if self.frame > (len(movement) -1):\n self.frame = 0\n coords = movement[self.frame]\n else:\n # in the case we want to stop moving, and pass in\n # a single stand frame\n coords = movement\n\n new_rect = pygame.Rect(coords)\n\n self.sheet.set_clip(new_rect)\n return movement\n\n def reverseHeading(self,sink):\n x = sink.rect.x\n y = sink.rect.y\n width = sink.rect.width\n height = sink.rect.height\n nextXPos = self.rectangle.x\n nextYPos = self.rectangle.y\n\n if nextXPos in range(x-self.rectangle.width-5,x-self.rectangle.width+5):\n # send back east\n newHeading = vector((-1*self.heading.x),self.heading.y)\n self.heading = newHeading\n elif nextXPos in range(x + width-5,x+width+5):\n # send back west\n newHeading = vector((-1*self.heading.x),self.heading.y)\n self.heading = newHeading\n elif nextYPos in range(y-self.rectangle.height-5,y-self.rectangle.height+5):\n # send back north\n newHeading = vector(self.heading.x,(-1*self.heading.y))\n self.heading = newHeading\n elif nextYPos in range(y+height-5,y+height+5):\n # send back south\n newHeading = vector(self.heading.x,(-1*self.heading.y))\n self.heading = newHeading\n\n\n def createRandomHeading(self):\n angle = random.randint(0,360)\n angle = angle * (3.14159/180)\n\n x = math.cos(angle)\n y = math.sin(angle)\n v = vector(x,y)\n return v\n \n \n","repo_name":"sabrinagannon/space-evaders","sub_path":"current version/levels/enemy.py","file_name":"enemy.py","file_ext":"py","file_size_in_byte":9858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6710485680","text":"\"\"\"\r\nCreated on Wed Aug 28 14:19:06 2019\r\nDC-based TCEP using PTDF\r\n@author: Victor Hinojosa\r\n\"\"\"\r\n\r\nfrom gurobipy import *\r\nfrom scipy.sparse import csr_matrix as sparse\r\nfrom scipy.sparse import identity as sparseI\r\nfrom numpy import pi, append, array, ones, zeros, arange, ix_, r_, flatnonzero as find\r\nfrom numpy.linalg import inv\r\nimport numpy as np\r\nimport time\r\n\r\nt00 = time.time() #formulation time\r\n\r\nif False:\r\n import case3b_TCEP as mpc\r\n sep=mpc.case3b_TCEP()\r\nelse:\r\n import Garver as mpc\r\n sep=mpc.Garver()\r\n #import case6ww as mpc\r\n #sep=mpc.case6ww()\r\n #import case118 as mpc\r\n #sep=mpc.case118()\r\n\r\n# Initializing model\r\nm = Model('TCEP_PTDF')\r\n#m.Params.MIPGap=1e-6\r\nm.Params.OutputFlag=0 #m.setParam('OutputFlag', False)\r\n\r\n# SEP parameters\r\ntE = 8760\r\nM=2*pi*sep['baseMVA']\r\nng = len(sep['gen'])\r\nnb = len(sep['bus'])\r\nnl = len(sep['branch'])\r\n#total\r\nb = 1 / (sep['branch'][:,3] / (sep['branch'][:,13]+sep['branch'][:,15]))\r\nf = sep['branch'][:, 0]-1\r\nt = sep['branch'][:, 1]-1\r\nI = r_[range(nl), range(nl)]\r\nS = sparse((r_[ones(nl), -ones(nl)], (I, r_[f, t])), (nl, nb))#total\r\nBf = sparse((r_[b, -b], (I, r_[f, t])), (nl,nb))\r\nBbus = S.T * Bf\r\nslack_bus=find(sep['bus'][:,1]==3)\r\nbuses = arange(1, nb)\r\nnoslack = find(arange(nb) != slack_bus)\r\nSF_aux = zeros((nl, nb))\r\nSF_aux[:,noslack]=Bf[:, buses].todense()*inv(Bbus[ix_(noslack, buses)].todense()) \r\nSF = zeros((nl, nb))\r\nfor i in range(nb):\r\n SF[:,i]=SF_aux[:,i]/ (sep['branch'][:,13]+sep['branch'][:,15])\r\nPF_sl=np.dot(SF,sep['bus'][:,2].T) #Slack PF\r\nrhs_1=-sep['branch'][:,5]-PF_sl #FM - Slack PF\r\nrhs_2=-sep['branch'][:,5]+PF_sl\r\n#exiting\r\npos_le = find(sep['branch'][:,13]==1)\r\nnle = len(pos_le)\r\n#new\r\npos_ln_aux = find(sep['branch'][:,15]!=0)\r\nnln_aux = len(pos_ln_aux)\r\npos_ln = []\r\nfor i in range(nln_aux):\r\n for j in range(int(sep['branch'][pos_ln_aux[i],15])):\r\n pos_ln.append(pos_ln_aux[i])\r\nn_var = len(pos_ln) \r\nf = sep['branch'][pos_ln, 0]-1 #new\r\nt = sep['branch'][pos_ln, 1]-1\r\nIn = r_[range(n_var), range(n_var)]\r\nCf = sparse((r_[ones(n_var), -ones(n_var)], (In, r_[f, t])), (n_var, nb)) # S new lines\r\nPTDFe = SF[pos_le] * Cf.T \r\nPTDFn = SF[pos_ln] * Cf.T \r\n\r\n# VARIABLE DEFINITIONS\r\np = m.addVars(range(ng), vtype=GRB.CONTINUOUS, lb=0, name='Pg') #power unit generation\r\nvar_p = [p[i] for i in range(ng)]\r\nn = m.addVars(range(n_var), vtype=GRB.BINARY, name='n') #investment decitions\r\nvar_n = [n[i] for i in range(n_var)]\r\nfv = m.addVars(range(n_var), vtype=GRB.CONTINUOUS, ub=M, lb=-M, name='fv') #investment decitions\r\nvar_fv = [fv[i] for i in range(n_var)]\r\n\r\n# OPTIMIZATION PROBLEM - OF\r\nCOp = tE* quicksum(var_p*sep['gencost'][:,4])\r\nCInv = quicksum(var_n*sep['branch'][pos_ln,14])\r\n\r\nm.setObjective(COp/1e6+CInv, GRB.MINIMIZE)\r\n\r\n# s.t.\r\nm.addConstr(sum(var_p),GRB.EQUAL,sum(sep['bus'][:,2]),'Balance') # Nodal balance\r\n\r\nfor i in range(nle): # existing power flows \r\n expr1 = quicksum(var_p*SF[pos_le[i],sep['gen'][:,0]-1])\r\n expr2 = quicksum(var_fv*PTDFe[i])\r\n tx = str(int(sep['branch'][pos_le[i],0])) + str(int(sep['branch'][pos_le[i],1]))\r\n m.addConstr(-expr1-expr2,GRB.GREATER_EQUAL,rhs_1[pos_le[i]],'fe%sm' % tx)\r\n m.addConstr(expr1+expr2,GRB.GREATER_EQUAL,rhs_2[pos_le[i]],'fe%sM' % tx)\r\n\r\nI_PTDF = sparseI(n_var) - PTDFn\r\nfor i in range(n_var): # future power flows \r\n expr1 = quicksum(var_p*SF[pos_ln[i],sep['gen'][:,0]-1])\r\n aux=array(I_PTDF[i])\r\n expr2 = quicksum(var_fv*aux[0])\r\n tx = str(int(sep['branch'][pos_ln[i],0])) + str(int(sep['branch'][pos_ln[i],1]))\r\n m.addConstr(expr1-expr2+sep['branch'][pos_ln[i],5]*n[i],GRB.GREATER_EQUAL,PF_sl[pos_ln[i]],'fv%sm' % tx)\r\n m.addConstr(-expr1+expr2+sep['branch'][pos_ln[i],5]*n[i],GRB.GREATER_EQUAL,-PF_sl[pos_ln[i]],'fv%sM' % tx)\r\n\r\nfor i in range(n_var): #Fv min & max\r\n m.addConstr(-fv[i]-M*n[i],GRB.GREATER_EQUAL,-M,'fv2_m%s' % i)\r\n m.addConstr(fv[i]-M*n[i],GRB.GREATER_EQUAL,-M,'fv2_M%s' % i)\r\n\r\nfor i in range(ng): #P min & P max\r\n m.addConstr(p[i],GRB.GREATER_EQUAL,sep['gen'][i,9],'Pmin%d' % sep['gen'][i,0])\r\n m.addConstr(-p[i],GRB.GREATER_EQUAL,-sep['gen'][i,8],'Pmax%d' % sep['gen'][i,0])\r\n\r\nt11 = time.time()\r\n\r\n# SOLVER & INFO\r\nt2 = time.time() \r\nm.optimize()\r\nt3 = time.time()\r\n\r\nif True:\r\n m.write('TCEP_PTDF.lp') \r\nstatus = m.Status\r\nif status == GRB.Status.OPTIMAL:\r\n print ('Cost = %.2f ($MM) => CInv = %.2f & COp = %.2f' % (m.objVal,CInv.getValue(),COp.getValue()/1e6)) \r\n if nb < 100:\r\n print ('New investment decisions:') \r\n sol_n = m.getAttr('x', n)\r\n for i in range(n_var):\r\n if sol_n[i] != 0:\r\n print('n[%.0f-%.0f] = %.3f' % (sep['branch'][pos_ln[i],0], sep['branch'][pos_ln[i],1], sol_n[i]))\r\n print ('Power generation solution (Pg):') \r\n sol_p = m.getAttr('x', p)\r\n for i in range(ng):\r\n if sol_p[i] != 0:\r\n print('Pg[%.0f] = %.3f (MW)' % (sep['gen'][i,0], sol_p[i]))\r\n sol_fv = m.getAttr('x', fv)\r\n print ('Existing power flows:') \r\n Cg = sparse((ones(ng), (sep['gen'][:,0]-1, range(ng))), (nb, ng)) #conection gen matrix\r\n PF = np.dot(SF[pos_le],Cg*sol_p.values()-sep['bus'][:,2]) + np.dot(PTDFe,sol_fv.values())\r\n for i in range(nle):\r\n print('f[%.0f-%.0f] = %.3f (MW)' % (sep['branch'][pos_le[i],0], sep['branch'][pos_le[i],1], PF[i])) \r\n print ('Investment power flows:') \r\n PF = np.dot(SF[pos_ln],Cg*sol_p.values()-sep['bus'][:,2]) + np.dot(PTDFn,sol_fv.values())\r\n for i in range(n_var):\r\n if sol_n[i] != 0:\r\n print('f[%.0f-%.0f] = %.3f (MW)' % (sep['branch'][pos_ln[i],0], sep['branch'][pos_ln[i],1], PF[i])) \r\n print('=> Formulation time: %.4f (s)'% (t11-t00))\r\n print('=> Solution time: %.4f (s)' % (t3-t2))\r\n print('=> Solver time: %.4f (s)' % (m.Runtime))\r\n# fixed = m.fixed()\r\n# fixed.optimize()\r\n# print ('Lagrange multipliers:')\r\n# for v in fixed.getConstrs():\r\n# if v.pi > 1e-2:\r\n# print('%s = %g ($/MWh)' % (v.ConstrName,v.pi))\r\nelif status == GRB.Status.INF_OR_UNBD or \\\r\n status == GRB.Status.INFEASIBLE or \\\r\n status == GRB.Status.UNBOUNDED:\r\n print('The model cannot be solved because it is infeasible or unbounded => status \"%d\"' % status)\r\n sys.exit(1) #1","repo_name":"juninho-03/python-gurobi","sub_path":"TCEP_PTDF_static.py","file_name":"TCEP_PTDF_static.py","file_ext":"py","file_size_in_byte":6326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"8382369039","text":"from numba.np.ufunc import parallel\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport time\r\nfrom numba import njit, prange, jit, cuda\r\nfrom numba.typed import List, Dict\r\nfrom numba.core import types\r\nfrom numpy.core.numeric import identity\r\n\r\nfloat_2Darray = types.float64[:,:]\r\nimport heapq\r\nimport open3d as o3d\r\nimport math\r\nimport pickle\r\n\r\nimport threading\r\nfrom timeit import repeat\r\n\r\n\r\nimport readData #file for reading/plotting data\r\nimport numbaFunctions as nF #file for all of the numba functions\r\nimport cudaFunctions as cF\r\nimport pointCloudPlotFunctions as cloudPlot\r\nimport pykitti\r\n\r\ndef getKittiOdom(map):\r\n # Change this to the directory where you store KITTI data\r\n basedir = \"D:/UAH/Grad School/Research/KITTIData/dataset\"\r\n\r\n # Specify the dataset to load\r\n sequence = '00'\r\n\r\n # Load the data. Optionally, specify the frame range to load.\r\n dataset = pykitti.odometry(basedir, sequence)\r\n dataset = pykitti.odometry(basedir, sequence, frames=range(0, map, 1))\r\n\r\n # dataset.calib: Calibration data are accessible as a named tuple\r\n # dataset.timestamps: Timestamps are parsed into a list of timedelta objects\r\n # dataset.poses: List of ground truth poses T_w_cam0\r\n # dataset.camN: Generator to load individual images from camera N\r\n # dataset.gray: Generator to load monochrome stereo pairs (cam0, cam1)\r\n # dataset.rgb: Generator to load RGB stereo pairs (cam2, cam3)\r\n # dataset.velo: Generator to load velodyne scans as [x,y,z,reflectance]\r\n\r\n # Grab some data\r\n pose0 = dataset.poses[0]\r\n # print(dataset.calib)\r\n pose0_xyz =np.eye(4)\r\n # pose0_xyz[0,:] = pose0[2,:] #- pose0[2,3]\r\n # pose0_xyz[1,:] = -pose0[0,:] #- pose0[0,3]\r\n # pose0_xyz[2,:] = pose0[1,:] #- #z is forward\r\n cameraToLidar = np.eye(4)\r\n th = np.deg2rad(-90) #about z\r\n beta = np.deg2rad(0) #about y\r\n gamma = np.deg2rad(-90) #about x\r\n Rz = np.array([[np.cos(th), -np.sin(th), 0.0],[np.sin(th), np.cos(th), 0.0],[0.0, 0.0, 1.0]])\r\n Ry = np.array([[np.cos(beta), 0, np.sin(beta)],[0, 1, 0],[-np.sin(beta), 0, np.cos(beta)]])\r\n Rx = np.array([[1, 0, 0],[0, np.cos(gamma), -np.sin(gamma)],[0, np.sin(gamma), np.cos(gamma)]]) #https://en.wikipedia.org/wiki/Rotation_matrix\r\n cameraToLidar[0:3,0:3] = np.matmul(np.matmul(Rz,Ry),Rx)\r\n\r\n # cam = np.eye(4)\r\n # print(dataset.calib.K_cam0.shape)\r\n # print(dataset.calib.P_rect_00.shape)\r\n # cam = np.matmul(dataset.calib.K_cam0,dataset.calib.P_rect_00)\r\n # print(cam.shape)\r\n # print(cam)\r\n calibrationTransform = dataset.calib.T_cam0_velo\r\n\r\n poses = np.zeros((len(dataset.poses),3))\r\n for i in range(len(dataset.poses)):\r\n pose = dataset.poses[i]\r\n poses[i,:] = np.matmul(np.linalg.inv(calibrationTransform),pose[:,3])[0:3]\r\n\r\n return poses, pose0\r\n\r\ndef plotPickleData(filename, resolution, LidarFile):\r\n data = pickle.load( open(filename, \"rb\" ) ) #open saved pickle data\r\n poseGraph = data['transforms']\r\n\r\n groundTruth,pose0 = getKittiOdom(len(poseGraph))\r\n \r\n\r\n print(\"%s Maps\" %(len(poseGraph)))\r\n\r\n Xall = []\r\n startMap = 0\r\n if startMap == 0:\r\n endMap = len(poseGraph)\r\n else:\r\n endMap = startMap + len(poseGraph)\r\n scanToGlobal = pose0\r\n for i in range(startMap,endMap): #load LIDAR data\r\n dataFile = LidarFile + \"%06d\"%i + \".bin\"\r\n \r\n scanData = readData.load_velo_scan(dataFile) #(X,Y,Z,I) where I is intensity\r\n X = np.ascontiguousarray(scanData[:,0:3])\r\n SCAN_BOUNDS = np.array([50,20,.5,-2])\r\n X = nF.scanBoundReduction(X,SCAN_BOUNDS)\r\n X = np.vstack((X.T,np.ones(X.shape[0])))\r\n \r\n\r\n localTransform = poseGraph[i-startMap]\r\n if i >startMap:\r\n localTransform[0,3] += -resolution[0] #subtract the resolution to get the correct point location\r\n localTransform[1,3] += -resolution[1]\r\n localTransform[2,3] += -resolution[2]\r\n\r\n\r\n scanToGlobal = np.matmul(scanToGlobal,localTransform)\r\n transform = scanToGlobal\r\n\r\n t=transform[0:3,3]\r\n # print(\"Transformation: X:%s, Y:%s, Z:%s\" %(t[0],t[1],t[2]))\r\n X_Global = np.matmul(scanToGlobal,X)\r\n # print(X_Global.shape)\r\n \r\n # idx = np.where(X_Global[2,:]< -4)\r\n # X_Global = np.delete(X_Global,idx,axis=1)\r\n # idx = np.where(X_Global[2,:] > 2)\r\n # X_Global = np.delete(X_Global,idx,axis=1)\r\n Xall.append(X_Global[0:3,:].T)\r\n\r\n # cloudPlot.pointCloudSeriesPlot([X[0:3,:].T])\r\n Xall.append(groundTruth)\r\n # cloudPlot.pointCloudSeriesAnimation(Xall)\r\n cloudPlot.pointCloudSeriesPlot(Xall)\r\n \r\n\r\n\r\ndef main():\r\n FILENAME = \"src/threeDScanMatch/posegraphData_all.txt\" #Path of the map files\r\n LIDAR_DATA_FILE = \"D:/UAH/Grad School/Research/KITTIData/dataset/sequences/00/velodyne/\" #directory with LIDAR Data\r\n resolution = np.array([.2,.2,1],dtype=np.float64)\r\n plotPickleData(FILENAME, resolution, LIDAR_DATA_FILE)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","repo_name":"nadurthi/ResearchCodes","sub_path":"lidarprocessing/3DScanMatching/readPosegraph.py","file_name":"readPosegraph.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"10029133300","text":"import math\r\n\r\nt = int(input())\r\nfor o in range(t):\r\n start, end, num = map(int, input().split())\r\n answer = end - start + 1\r\n n = num\r\n div = []\r\n\r\n for i in range(2, math.ceil(num ** 0.5) + 1):\r\n if n % i == 0:\r\n div.append(i)\r\n while n % i == 0:\r\n n //= i\r\n if n not in div and n != 1:\r\n div.append(n)\r\n\r\n r = len(div)\r\n\r\n for i in range(1, 2 ** r):\r\n bit = bin(i)[2:].zfill(r)\r\n rep = 1\r\n cnt = 0\r\n\r\n for j in range(r):\r\n if bit[j] == '1':\r\n rep *= div[j]\r\n cnt += 1\r\n\r\n if cnt % 2 == 0:\r\n answer += end // rep - (start - 1) // rep\r\n else:\r\n answer -= end // rep - (start - 1) // rep\r\n\r\n print(f'Case #{o + 1}: {answer}')\r\n","repo_name":"wzrabbit/algorithm-practice","sub_path":"BOJ/♠ 9000~9999/9359_서로소.py","file_name":"9359_서로소.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"40"} +{"seq_id":"3566273803","text":"import pymysql\nimport csv\n\n\ndef connect_db():\n return pymysql.connect(host='localhost',\n port=3306,\n user='root',\n password='1996',\n database='scrm')\n\n\ndef read_csv(path):\n order_csv = csv.reader(open(path, 'r'))\n db = connect_db()\n count = 0\n orders = []\n\n for line in order_csv:\n # print(line)\n orders.append((line[0], line[1], line[2], line[3]))\n\n count += 1\n print(count)\n # if count >= 5: break\n\n insert_order(db, orders)\n\n db.close()\n\n\ndef insert_order(db, orders):\n sqlstr = \"\"\"insert into orders values(%s, %s, %s, %s)\"\"\"\n # db = connect_db()\n cursor = db.cursor()\n\n try:\n # 执行sql语句\n cursor.executemany(sqlstr, orders)\n assert cursor.rowcount == len(orders), '插入数据数量有误,应为%s,实为%s'%(len(orders), cursor.rowcount)\n # 提交到数据库执行\n db.commit()\n print('数据插入完成')\n except Exception as e:\n # 如果发生错误则回滚\n print(e)\n db.rollback()\n\n\nread_csv(\"order/ratings_Sports_and_Outdoors.csv\")\n","repo_name":"EyreYoung/pysql","sub_path":"order csv2sql.py","file_name":"order csv2sql.py","file_ext":"py","file_size_in_byte":1198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38292729356","text":"from django.contrib import admin\nfrom .models import Subscribe, NEWS_Letter\n\n# Register your models here.\ndef send_newsletter(modeladmin, request, queryset):\n for newsletter in queryset:\n newsletter.send(request)\n\nsend_newsletter.short_description = \"Send selected Newsletters to all subscribers\"\n\nclass NEWS_Letter_Admin(admin.ModelAdmin):\n actions = [send_newsletter]\n\nadmin.site.register(Subscribe)\nadmin.site.register(NEWS_Letter, NEWS_Letter_Admin)\n","repo_name":"yaswanthsaivendra/Techsnap","sub_path":"Main-Application-main/newsletter/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5175184256","text":"import cv2 as cv #görüntü işleme için \r\nimport numpy as np\r\nfrom PIL import Image #resim işlemleri için\r\nimport pytesseract as pyt\r\nimport pyperclip as pc #metni kopyalama işlemi için \r\n\r\n# Tesseract-Ocr kullanmak için yolu belirtme\r\npyt.pytesseract.tesseract_cmd=\"C:\\\\Program Files (x86)\\\\Tesseract-OCR\\\\tesseract.exe\"\r\n\r\n#parametre olarak görüntüyü alan metin çevirme fonksiyonu\r\ndef metinOku(resim_yolu):\r\n\r\n resim = cv.imread(resim_yolu)#resim yolunu alma\r\n resim = cv.cvtColor(resim, cv.COLOR_BGR2GRAY)#binary formata çevirme(gri yapma)\r\n\r\n #Resimdeki kirliliği temizleme\r\n kernel = np.ones((1,1), np.uint8)\r\n resim = cv.erode(resim, kernel, iterations=1)\r\n resim = cv.dilate(resim, kernel, iterations=1)\r\n blur = cv.GaussianBlur(resim, (5,5), 0)\r\n\r\n #kontrast ayarlama\r\n goruntu = np.int16(blur) \r\n kontrast = 64\r\n parlaklik = 0\r\n goruntu = goruntu*(kontrast/127 + 1) - kontrast + parlaklik\r\n goruntu = np.clip(goruntu, 0, 255) #0-255 değerler\r\n goruntu = np.uint8(goruntu)\r\n \r\n sonuc = pyt.image_to_string(goruntu, lang='tur+eng') #resmi yazıya çevirme(algılama için 1. dil Türkçe 2. dil İngilizce)\r\n metin = sonuc.strip() #strip fonksiyonuyla kelimeler arası fazlalık boşlukları temizleme\r\n #metin = metin.replace(\"\\n\\n\",\" \")#\\n\\n ile olan yere tek boşluk koyma\r\n return metin #resimden okunan metini döndürme\r\n\r\n#parametre olarak çevirilen metni alan, karakterlerine ayırma fonksiyonu.\r\ndef MetniKarakterlerineAyir(metin):\r\n\r\n print(\"\\nMETİN KARAKTERLERE BÖLÜNÜYOR....\\n\")\r\n i = 0\r\n while i < len(metin):\r\n if metin[i] == \" \":\r\n metin[i].replace(\" \",\"\")\r\n else:\r\n print(metin[i])\r\n i += 1\r\n #parametre olarak çevirilen metni alan, kelimelere bölme fonksiyonu \r\ndef MetniKelimelereBol(metin):\r\n\r\n print(\"\\nMETİN KELİMELERE BÖLÜNÜYOR....\\n\")\r\n kelimeler = metin.split(\" \")\r\n\r\n for kelime in kelimeler:\r\n print(kelime)\r\n #parametre olarak çevirilen metni alan, cümlelere bölme fonksiyonu \r\ndef MetniCumlelereBol(metin):\r\n\r\n print(\"\\nMETİN CÜMLELERE BÖLÜNÜYOR....\\n\")\r\n cumleler = metin.split(\".\")\r\n\r\n for cumle in cumleler:\r\n print(cumle)\r\n#metni otomatik kopyalamak için yazılan fonksiyon\r\ndef MetniKopyala(metin):\r\n\r\n pc.copy(metin) #kopyalama işlemi\r\n print(\"Metin panoya kopyalandı.\")\r\n\r\nmetin = metinOku('metin.jpg') #girdi olarak alınan görüntü\r\nprint(metin) #metnin çıktısını konsola yazdırma \r\n\r\n#MetniKarakterlerineAyir(metin) #karakter ayırma fonksiyonu koşmak için \r\n#MetniKelimelereBol(metin) #kelimelere bölme fonksiyonu koşmak için \r\n#MetniCumlelereBol(metin) #cümlelere bölme fonksiyonu koşmak için \r\n#MetniKopyala(metin) #kopyalama fonksiyonu için \r\n","repo_name":"ftmszr/GoruntuUzerindeIslemler","sub_path":"GoruntudenMetneCevirme/GoruntudenMetneCevirme.py","file_name":"GoruntudenMetneCevirme.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"38063681241","text":"import os\nimport io\n\n\ndef get_transcripts():\n # Imports the Google Cloud client library\n from google.cloud import speech\n from google.cloud.speech import enums\n from google.cloud.speech import types\n\n # Instantiates a client\n client = speech.SpeechClient()\n\n # Transcribe audio files\n responses = dict()\n for root, subFolders, files in os.walk(\"../audio_input/\"):\n for file_name in files:\n file_path = root + \"/\" + file_name\n print(file_path)\n # Loads the audio into memory\n with io.open(file_path, 'rb') as audio_file:\n content = audio_file.read()\n audio = types.RecognitionAudio(content=content)\n\n config = types.RecognitionConfig(\n encoding=enums.RecognitionConfig.AudioEncoding.FLAC,\n sample_rate_hertz=16000,\n language_code='en-US',\n enable_word_time_offsets=True,\n speech_contexts=[types.SpeechContext(phrases=[\"paul\", \"estuardo\", \"piyush\",\n \"madison\", \"mostafa\", \"momotaz\",\n \"katie\", \"goodbye\", \"hello\", \"say\",\n \"great\", \"job\", \"good\", \"bye\",\n \"say hello\", \"great job\",\n \"great job good bye\",\n \"great job goodbye\"])])\n\n # Detects speech in the audio file\n responses[file_path] = client.long_running_recognize(config, audio)\n\n for file_name, future in responses.items():\n out_name = file_name.replace(\"input\", \"output\")\n response = future.result(timeout=300)\n with open(out_name, \"w\") as out_file:\n out_file.write(file_name + \"\\n\" + str(response))\n print(out_name)\n\n\nif __name__ == '__main__':\n get_transcripts()\n","repo_name":"ercarpio/SG_DBN","sub_path":"itbn_tools/speech_recognition_loader.py","file_name":"speech_recognition_loader.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"5993649193","text":"import requests\nfrom clint.textui import progress\n\n\nclass FileDownloader:\n \"\"\"\n Downloads a File from a link\n \"\"\"\n\n def __init__(self, show_progress=True) -> None:\n \"\"\"Downloads a File from a link\n\n Args:\n show_progress (bool, optional): Show progress or not. Defaults to True.\n \"\"\"\n\n self.session = requests.Session()\n self.show_progress = show_progress\n\n def download(self, url: str, dest_folder: str, name: str = \"file\", ):\n \"\"\"downloads a file\n\n Args:\n url (str): url of file\n dest_folder (str): destination folder\n name (str, optional): filename. Defaults to \"file\".\n \"\"\"\n\n response = self.session.get(url, stream=self.show_progress)\n filepath = f\"{dest_folder}/{name}\"\n if response.status_code == 200:\n if not self.show_progress:\n with open(filepath, 'wb') as f:\n f.write(response.content)\n\n else:\n total_length = int(response.headers.get('content-length'))\n\n with open(filepath, 'wb') as f:\n for chunk in progress.bar(response.iter_content(chunk_size=1024), expected_size=(total_length/1024) + 1):\n if chunk:\n f.write(chunk)\n f.flush()\n","repo_name":"Saief1999/movie-planning-predictor","sub_path":"poster-prediction/utils/file_downloader.py","file_name":"file_downloader.py","file_ext":"py","file_size_in_byte":1361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"1221637461","text":"\"\"\"Data preparation functions.\"\"\"\n\n\ndef flatten_list(data):\n \"\"\"\n Flattens a list of dictionaries and lists of dictionaries into a flat list of dictionaries.\n \"\"\"\n result = []\n for item in data:\n if isinstance(item, dict):\n result.append(item)\n elif isinstance(item, list):\n result.extend(flatten_list(item))\n return result\n","repo_name":"codeformuenster/RadhackHH23","sub_path":"bike_balance/prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19013725323","text":"''' crie um programa que tenha uma função fatorial() que receba\ndois parâmetros: o primeiro que indique o número a calcular e o outro\nchamdo show que será um valor lógico(opcional) indicando se será mostrado ou não na tela\no processo de cálculo do fatorial. *** INCLUIR DOCSTRING NA FUNÇÃO '''\ndef fatorial(n, s=False):\n \"\"\"\n Essa função calcula o fatorial de um número inteiro. É opcional detalhar a exibição do cálculo.\n :param n: Recebe o número do qual se deseja obter o fatorial\n :param s: Recebe um valor lógico. Caso seja True, será mostrado o cálculo do fatorial\n :return: n\n \"\"\"\n if not s:\n fat = n\n for i in range (1, n):\n fat = (fat * (n-i))\n print(f'Fatorial de {n} é {fat}.')\n else:\n fat = n\n print(f'Cálculo detalhado do fatorial de [{n}] = {n} x ', end='')\n for i in range(1, n):\n fat = (fat * (n - i))\n if i == n-1:\n print(f'{n - i} = ', end='')\n else:\n print(f'{n - i} x ', end='')\n print(fat)\n\n\n\n# Programa Principal\nfatorial(5, s=True)\nprint(help(fatorial))\n","repo_name":"rafaelclemes81/Python","sub_path":"ex102.py","file_name":"ex102.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"4506982877","text":"import os\nimport cuml\nfrom cuml.utils import input_utils\nimport numpy as np\nimport pandas as pd\nimport pickle as pickle\nimport sklearn.ensemble as skl_ensemble\nimport cudf\nfrom numba import cuda\nfrom cuml.benchmark import datagen\n\n\ndef fit_kneighbors(m, x):\n m.fit(x)\n m.kneighbors(x)\n\n\ndef fit(m, x, y=None):\n m.fit(x) if y is None else m.fit(x, y)\n\n\ndef fit_transform(m, x):\n m.fit_transform(x)\n\n\ndef predict(m, x):\n m.predict(x)\n\n\ndef _training_data_to_numpy(X, y):\n \"\"\"Convert input training data into numpy format\"\"\"\n if isinstance(X, np.ndarray):\n X_np = X\n y_np = y\n elif isinstance(X, cudf.DataFrame):\n X_np = X.as_gpu_matrix().copy_to_host()\n y_np = y.to_gpu_array().copy_to_host()\n elif cuda.devicearray.is_cuda_ndarray(X):\n X_np = X.copy_to_host()\n y_np = y.copy_to_host()\n elif isinstance(X, (pd.DataFrame, pd.Series)):\n X_np = datagen._convert_to_numpy(X)\n y_np = datagen._convert_to_numpy(y)\n else:\n raise TypeError(\"Received unsupported input type\")\n return X_np, y_np\n\n\ndef _build_fil_classifier(m, data, args, tmpdir):\n \"\"\"Setup function for FIL classification benchmarking\"\"\"\n from cuml.utils.import_utils import has_xgboost\n if has_xgboost():\n import xgboost as xgb\n else:\n raise ImportError(\"No XGBoost package found\")\n\n train_data, train_label = _training_data_to_numpy(data[0], data[1])\n\n dtrain = xgb.DMatrix(train_data, label=train_label)\n\n params = {\n \"silent\": 1, \"eval_metric\": \"error\",\n \"objective\": \"binary:logistic\", \"tree_method\": \"gpu_hist\",\n }\n params.update(args)\n max_depth = args[\"max_depth\"]\n num_rounds = args[\"num_rounds\"]\n n_feature = data[0].shape[1]\n train_size = data[0].shape[0]\n model_name = f\"xgb_{max_depth}_{num_rounds}_{n_feature}_{train_size}.model\"\n model_path = os.path.join(tmpdir, model_name)\n bst = xgb.train(params, dtrain, num_rounds)\n bst.save_model(model_path)\n\n return m.load(model_path, algo=args[\"fil_algo\"],\n output_class=args[\"output_class\"],\n threshold=args[\"threshold\"],\n storage_type=args[\"storage_type\"])\n\n\ndef _build_fil_skl_classifier(m, data, args, tmpdir):\n \"\"\"Trains an SKLearn classifier and returns a FIL version of it\"\"\"\n\n train_data, train_label = _training_data_to_numpy(data[0], data[1])\n\n params = {\n \"n_estimators\": 100,\n \"max_leaf_nodes\": 2**10,\n \"max_features\": \"sqrt\",\n \"n_jobs\": -1,\n \"random_state\": 42,\n }\n params.update(args)\n\n # remove keyword arguments not understood by SKLearn\n for param_name in [\"fil_algo\", \"output_class\", \"threshold\",\n \"storage_type\"]:\n params.pop(param_name, None)\n\n max_leaf_nodes = args[\"max_leaf_nodes\"]\n n_estimators = args[\"n_estimators\"]\n n_feature = data[0].shape[1]\n train_size = data[0].shape[0]\n model_name = (f\"skl_{max_leaf_nodes}_{n_estimators}_{n_feature}_\" +\n f\"{train_size}.model.pkl\")\n model_path = os.path.join(tmpdir, model_name)\n skl_model = skl_ensemble.RandomForestClassifier(**params)\n skl_model.fit(train_data, train_label)\n pickle.dump(skl_model, open(model_path, \"wb\"))\n\n return m.load_from_sklearn(skl_model, algo=args[\"fil_algo\"],\n output_class=args[\"output_class\"],\n threshold=args[\"threshold\"],\n storage_type=args[\"storage_type\"])\n\n\ndef _build_cpu_skl_classifier(m, data, args, tmpdir):\n \"\"\"Loads the SKLearn classifier and returns it\"\"\"\n\n max_leaf_nodes = args[\"max_leaf_nodes\"]\n n_estimators = args[\"n_estimators\"]\n n_feature = data[0].shape[1]\n train_size = data[0].shape[0]\n model_name = (f\"skl_{max_leaf_nodes}_{n_estimators}_{n_feature}_\" +\n f\"{train_size}.model.pkl\")\n model_path = os.path.join(tmpdir, model_name)\n\n skl_model = pickle.load(open(model_path, \"rb\"))\n return skl_model\n\n\ndef _build_treelite_classifier(m, data, args, tmpdir):\n \"\"\"Setup function for treelite classification benchmarking\"\"\"\n from cuml.utils.import_utils import has_treelite, has_xgboost\n if has_treelite():\n import treelite\n import treelite.runtime\n else:\n raise ImportError(\"No treelite package found\")\n if has_xgboost():\n import xgboost as xgb\n else:\n raise ImportError(\"No XGBoost package found\")\n\n max_depth = args[\"max_depth\"]\n num_rounds = args[\"num_rounds\"]\n n_feature = data[0].shape[1]\n train_size = data[0].shape[0]\n model_name = f\"xgb_{max_depth}_{num_rounds}_{n_feature}_{train_size}.model\"\n model_path = os.path.join(tmpdir, model_name)\n\n bst = xgb.Booster()\n bst.load_model(model_path)\n tl_model = treelite.Model.from_xgboost(bst)\n tl_model.export_lib(\n toolchain=\"gcc\", libpath=model_path+\"treelite.so\",\n params={'parallel_comp': 40}, verbose=False\n )\n return treelite.runtime.Predictor(model_path+\"treelite.so\", verbose=False)\n\n\ndef _treelite_fil_accuracy_score(y_true, y_pred):\n \"\"\"Function to get correct accuracy for FIL (returns class index)\"\"\"\n y_pred_binary = input_utils.convert_dtype(y_pred > 0.5, np.int32)\n if isinstance(y_true, np.ndarray):\n return cuml.metrics.accuracy_score(y_true, y_pred_binary)\n elif cuda.devicearray.is_cuda_ndarray(y_true):\n y_true_np = y_true.copy_to_host()\n return cuml.metrics.accuracy_score(y_true_np, y_pred_binary)\n elif isinstance(y_true, cudf.Series):\n return cuml.metrics.accuracy_score(y_true, y_pred_binary)\n elif isinstance(y_true, pd.Series):\n return cuml.metrics.accuracy_score(y_true, y_pred_binary)\n else:\n raise TypeError(\"Received unsupported input type\")\n","repo_name":"elisaoh/ECE759","sub_path":"cuml_ver2/cuml/python/cuml/benchmark/bench_helper_funcs.py","file_name":"bench_helper_funcs.py","file_ext":"py","file_size_in_byte":5817,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"17886849198","text":"import json\n\n\nclass SWeather:\n\n def __get_data_from_db(self):\n \"\"\"\n Getting the data from database\n :return: None\n \"\"\"\n\n # a sample data, in Json format.\n sample_data_js = \"\"\"\n {\n \"years\":[1999, 2000, 2000, 2021],\n \"values\":[34, 22, 34, 30],\n \"regionName\":\"QID\",\n \"data type:\":\"temperature\"\n }\n \"\"\"\n\n self.__data_object = json.loads(sample_data_js) # transfer Json into python object\n\n def get_data(self):\n \"\"\"\n Pass the data\n :return:\n \"\"\"\n self.__get_data_from_db() # update the data\n return json.dumps(self.__data_object)\n","repo_name":"TZZTERRY/Agricultural-Modelling","sub_path":"back_end/agri_service/s_weather.py","file_name":"s_weather.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"40"} +{"seq_id":"8750076051","text":"\"\"\"\nThis module contains the class definitions for Budget, BudgetManager,\nand BudgetCreator as a supporting class for constructing a BudgetManager\nmaintaining all the Budget objects.\n\"\"\"\n\nfrom enum import Enum\n\n\nclass BudgetCategory(Enum):\n \"\"\"\n An enum represents supported budget categories.\n \"\"\"\n GAMES_AND_ENTERTAINMENT = 'Games and Entertainment'\n CLOTHING_AND_ACCESSORIES = 'Clothing and Accessories'\n EATING_OUT = 'Eating Out'\n MISCELLANEOUS = 'Miscellaneous'\n\n\nclass Budget:\n \"\"\"\n A class that represents a budget. A budget has:\n - a name/category\n - a total amount\n - a amount spent\n - a state determines if this budget is locked.\n \"\"\"\n\n def __init__(self, category: BudgetCategory, total_amount: float):\n \"\"\"\n Initializes a Budget.\n :param category: a BudgetCategory\n :param total_amount: a float\n \"\"\"\n self.category = category\n self.total_amount = total_amount\n self.amount_spent = 0\n self._locked = False\n\n def __str__(self):\n return f'*** Budget: {self.name} ***\\n' \\\n f'• Status: {\"Locked\" if self._locked else \"Available\"}\\n' \\\n f'• Amount spent: ${self.amount_spent}\\n' \\\n f'• Amount left: ${self.total_amount - self.amount_spent}\\n' \\\n f'• Total amount: ${self.total_amount}'\n\n @property\n def name(self) -> str:\n \"\"\"\n Returns the name/category of the budget.\n :return: a string\n \"\"\"\n return str(self.category.value)\n\n @property\n def exceeded_ratio(self) -> float:\n \"\"\"\n A property that calculates the exceeded ratio (amount spent /\n total amount) of this budget.\n :return: a float\n \"\"\"\n return self.amount_spent / self.total_amount\n\n @property\n def locked(self) -> bool:\n \"\"\"\n Read only property of the _locked attribute, to determine if\n this budget is locked.\n :return: a bool\n \"\"\"\n return self._locked\n\n def lock(self) -> None:\n \"\"\"\n Locks this budget.\n :return: None\n \"\"\"\n self._locked = True\n\n\nclass BudgetManager:\n \"\"\"\n The BudgetManager maintains a dictionary of budgets (referenced via\n budget names).\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Initializes a BudgetManager.\n \"\"\"\n self.budgets = {}\n\n def add_budget(self, budget: Budget) -> None:\n \"\"\"\n Adds a budget to the dictionary.\n :param budget: a Budget\n :return: None\n \"\"\"\n self.budgets[budget.category] = budget\n\n def get_budget(self, category: BudgetCategory) -> Budget:\n \"\"\"\n Finds and returns budget stored in the dictionary.\n :param category: the budget name to get\n :return: a Budget\n \"\"\"\n return self.budgets.get(category, None)\n\n def get_budgets(self) -> list:\n \"\"\"\n Returns budgets stored in the dictionary as a list.\n :return: a list of Budget objects\n \"\"\"\n return list(self.budgets.values())\n\n @property\n def no_locked_budgets(self) -> int:\n \"\"\"\n Counts and returns the number of locked budgets.\n :return: an int, the number of locked budgets\n \"\"\"\n count = 0\n for budget in self.budgets.values():\n if budget.locked:\n count += 1\n return count\n\n\nclass BudgetCreator:\n \"\"\"\n An utility class that helps create Budget and BudgetManager.\n \"\"\"\n\n @staticmethod\n def create_budget(budget_category: BudgetCategory) -> Budget:\n \"\"\"\n Creates and returns a budget from user input for the given\n budget category.\n :param budget_category: a string\n :return: a Budget\n \"\"\"\n amount = -1\n while amount <= 0:\n amount = float(input(f'Enter {budget_category.value} budget: '))\n if amount <= 0:\n print('Budget amount must be greater than 0! Please enter '\n 'again!')\n return Budget(budget_category, amount)\n\n @classmethod\n def create_budget_manager(cls) -> BudgetManager:\n \"\"\"\n Prompts the user for the amount of each budget. These budgets\n will be added to a BudgetManager. The manager then will be\n returned out.\n :return: a BudgetManager\n \"\"\"\n manager = BudgetManager()\n for category in list(BudgetCategory):\n budget = cls.create_budget(category)\n manager.add_budget(budget)\n return manager\n\n @staticmethod\n def execute_budgets_menu() -> BudgetCategory:\n \"\"\"\n Presents the budget menu for the user to select and returns the\n budget category that user chooses.\n :return: a BudgetCategory\n \"\"\"\n categories = list(BudgetCategory)\n no_budgets = len(categories)\n choice = -1\n print('Select a budget category:')\n while choice < 1 or choice > no_budgets:\n for idx, category in enumerate(categories):\n print(f' {idx + 1}. {category.value}')\n choice = int(input(f'Enter a choice (1-{no_budgets}): '))\n if choice < 1 or choice > no_budgets:\n print('Invalid choice! Please enter again!')\n return categories[choice - 1]\n\n @classmethod\n def load_test_budget_manager(cls) -> BudgetManager:\n \"\"\"\n Sets up and returns a BudgetManager with each budget of amount\n $100.\n :return: a BudgetManager\n \"\"\"\n manager = BudgetManager()\n for category in list(BudgetCategory):\n budget = Budget(category, 100)\n manager.add_budget(budget)\n return manager\n","repo_name":"lizhiquan/learning-python","sub_path":"assignment-1-the-f-a-m-lizhiquan/budget.py","file_name":"budget.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9889672880","text":"#Kate Bell\r\n#BLLKAT005\r\n# 2 April 2014\r\n\r\ndef print_square():\r\n print(\"*\"*5)\r\n for i in range (0,3):\r\n print(\"*\",\" \",\"*\",sep=\"\")\r\n print(\"*\"*5) \r\n \r\n \r\ndef print_rectangle (width, height):\r\n for i in range (0,height):\r\n if i==0 or i==height-1:\r\n print(\"*\"*width)\r\n else:\r\n print(\"*\",\" \"*(width-2),\"*\",sep=\"\")\r\n \r\ndef get_rectangle (width, height):\r\n strBox=\"\"\r\n for i in range (0,height):\r\n if i==0:\r\n strBox=strBox+\"*\"*width+\"\\n\"\r\n elif i0:\r\n strBox=strBox+\"*\"+\" \"*(width-2)+\"*\"+\"\\n\"\r\n elif i==height-1:\r\n strBox=strBox+\"*\"*width\r\n return strBox\r\n\r\n \r\n","repo_name":"MrHamdulay/csc3-capstone","sub_path":"examples/data/Assignment_4/bllkat005/boxes.py","file_name":"boxes.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"17180788818","text":"import os\nimport ctypes\nimport threading\nimport math\nimport time as _time\n\n# Project Imports\nimport ext.core\n\n# Begin nanosleep code \n# Under the ZPL, Please see the license/ZPL.txt file for more information.\n# Changes: by Joseph Lisee on Jan 20, 2008\n\ntry: \n # Linux\n try:\n _libc = ctypes.CDLL(\"libc.so.6\")\n except OSError:\n _libc = None\n if _libc is None:\n # MAC OS-X\n try:\n _libc = ctypes.CDLL(\"libc.dylib\", ctypes.RTLD_GLOBAL)\n except OSError:\n raise ImportError\n\n # Define the timespec structure in python\n class _TIMESPEC(ctypes.Structure):\n _fields_ = [('secs', ctypes.c_long),\n ('nsecs', ctypes.c_long),\n ]\n\n _libc.nanosleep.argtypes = \\\n [ctypes.POINTER(_TIMESPEC), ctypes.POINTER(_TIMESPEC)]\n\n\n def nanosleep(sec, nsec):\n sleeptime = _TIMESPEC()\n sleeptime.secs = sec\n sleeptime.nsecs = nsec\n remaining = _TIMESPEC()\n _libc.nanosleep(sleeptime, remaining)\n return (remaining.secs, remaining.nsecs)\n\nexcept ImportError:\n # if ctypes is not available or no reasonable library is found we provide\n # a dummy which uses time.sleep\n\n def nanosleep(sec, nsec):\n _time.sleep(sec + (nsec * 0.000000001))\n \n# End nanosleep code\n\ndef sleep(seconds):\n \"\"\"\n Sleeps the current thread the given number of seconds useing nanosleep\n \n @type seconds: float \n @param seconds: The number of seconds to sleep\n \"\"\"\n \n # Round down to our seconds\n secs = math.floor(float(seconds))\n # Convert the remainder to nano seconds\n nsecs = (seconds - secs) * 1e9;\n \n nanosleep(long(secs), long(nsecs))\n\ndef time():\n \"\"\"\n Returns the time since program start\n \n Due to some odd platform differences different time module functions \n have different accuracies, on different platforms. The function takes\n that into account.\n \n @rtype: double\n @return: Seconds since program start\n \"\"\"\n # This is most accuracte on Linux and Mac\n if 'posix' == os.name:\n return _time.time()\n # This on on Windows\n else:\n return _time.clock()\n\nclass Timer(threading.Thread):\n \"\"\"\n Throws event after given duration sleep in a background thread\n \"\"\"\n \n def __init__(self, eventPublisher, eventType, duration, repeat = False):\n \"\"\"\n @type eventPublisher: ext.core.EventPublisher\n @param eventPublisher: Publisher to publish the event with\n \n @type eventType: str\n @param eventType: The type of event to publish\n \n @type duration: float\n @param duration: The seconds to sleep\n \n @type repeat: bool\n @param repeat: Whether or not the timer repeats\n \"\"\"\n threading.Thread.__init__(self)\n \n self._eventPublisher = eventPublisher\n self._eventType = eventType\n self._sleepTime = float(duration)\n self._running = True\n self._repeat = repeat\n \n def run(self):\n \"\"\"\n Fires of the event from a background thread after the needed sleep\n\n This is implements the standard python threading.Thread method.\n \"\"\"\n while True:\n # Sleep for that time period\n sleep(self._sleepTime)\n\n # Publish event\n self._complete()\n\n # Set running to false\n if not self._repeat:\n self.stop()\n \n if not self._running:\n break\n \n def stop(self):\n \"\"\"\n Stops the backgruond thread from publishing its event when it wakes up\n \"\"\"\n self._running = False\n \n def _complete(self):\n \"\"\"\n Publishes the desired event, called in background thread after the sleep\n \"\"\"\n # Publish event\n if self._running:\n self._eventPublisher.publish(self._eventType, ext.core.Event())\n \nclass TimerManager(ext.core.Subsystem):\n \"\"\"\n Creates ram.timer.Timer objects, using itself as the EventPublisher\n \n It makes sure all of its events are forward to the main EventHub so they\n can be properly queued by the QueuedEventHub.\n \"\"\"\n def __init__(self, config = None, deps = None):\n if config is None:\n config = {}\n \n # Require EventHub, because the main client of this class, the AI, only\n # gets events if the hub is present\n ext.core.Subsystem.getSubsystemOfExactType(ext.core.EventHub, deps, \n nonNone = True)\n ext.core.Subsystem.__init__(self, config.get('name', 'TimerManager'), \n deps)\n \n def newTimer(self, eventType, duration):\n \"\"\"\n Create a Timer object which publishes using this object.\n \n @rtype: ram.timer.Timer\n @return: A Timer objects which uses this object to publish its event\n \"\"\"\n return Timer(self, eventType, duration)\n\n\n def backgrounded(self):\n return True\next.core.registerSubsystem('TimerManager', TimerManager)\n","repo_name":"robotics-at-maryland/tortuga","sub_path":"packages/python/ram/timer.py","file_name":"timer.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"40"} +{"seq_id":"22360599107","text":"import tqdm\n\ndef solve():\n powers = set()\n for a in tqdm.tqdm(range(2, 101)):\n for b in range(2, 101):\n power = a ** b\n if power not in powers:\n powers.add(power)\n print(len(powers))\n\nsolve()\n","repo_name":"nkartashov/euler","sub_path":"problem29.py","file_name":"problem29.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"42649636190","text":"import json\nimport requests\nimport statistics\n\nprint(\"REQUESTING SOME DATA FROM THE INTERNET...\")\nrequest_url = \"https://raw.githubusercontent.com/prof-rossetti/intro-to-python/master/data/products/1.json\"\nresponse = requests.get(request_url)\nprint(response.status_code)\nprint(response.text)\nresponse_data = json.loads(response.text)\n\nprint(type(response_data)) #> or \n\n\n\n\n\n#request_url = \"https://raw.githubusercontent.com/prof-rossetti/intro-to-python/master/data/products.json\"\n#response = requests.get(request_url)\n#print(response.status_code)\n#print(response.text)\n#response_data = json.loads(response.text)\n#print (type(response_data))\n#\n#for d in response_data:\n# print(\"name: \" + d[\"name\"] + \" id: \" + str(d[\"id\"]))\n\n\n#request_url = \"https://raw.githubusercontent.com/prof-rossetti/intro-to-python/master/data/gradebook.json\"\n#response = requests.get(request_url)\n#print(response.status_code)\n#print(response.text)\n#response_data = json.loads(response.text)\n#\n#print(type(response_data))\n#\n#grades = [d[\"finalGrade\"] for d in response_data[\"students\"]]\n#\n#print(\"Grade: \",(grades))\n#avg_grade = statistics.mean(grades)\n#print(\"Avg Grade: \", avg_grade)\n#\n#print(\"Min Grade: \", min(grades))\n#\n#print(\"Avg Grade: \", max(grades))\n##{\n## \"downloadDate\": \"2018-06-05\",\n## \"professorId\": 123,\n## \"students\":[\n## {\"studentId\": 1, \"finalGrade\": 76.7},\n## {\"studentId\": 2, \"finalGrade\": 85.1},\n## {\"studentId\": 3, \"finalGrade\": 50.3},\n## {\"studentId\": 4, \"finalGrade\": 89.8},\n## {\"studentId\": 5, \"finalGrade\": 97.4},\n## {\"studentId\": 6, \"finalGrade\": 75.5},\n## {\"studentId\": 7, \"finalGrade\": 87.2},\n## {\"studentId\": 8, \"finalGrade\": 88.0},\n## {\"studentId\": 9, \"finalGrade\": 93.9},\n## {\"studentId\": 10, \"finalGrade\": 92.5}\n## ]\n##}\n##","repo_name":"ahmadwilson21/web-requests-exercise","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"5758570088","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jun 15 10:23:06 2017\n\n@author: Carly LaGrotta\n\"\"\"\n\nimport yaml\ndef sorting(filename):\n with open(filename) as f:\n config = yaml.load(f)\n simulationType = config['apparatus']['kind']\n if simulationType == 'jet stirred reactor' or simulationType=='jsr':\n print(simulationType)\n a = importingJSR(filename)\n return a\n else: \n raise Exception(\"We do not have this simulation installed\")\n \ndef importingJSR(filename):\n with open(filename) as f:\n config = yaml.load(f)\n \n reactorVolume = config['apparatus']['reactor-volume']['value']\n residenceTime = config['apparatus']['residence-time']['value']\n pressure = config['common-properties']['pressure']['value']\n initialTemperature = config['common-properties']['temperature']['initial-value']\n finalTemperature = config['common-properties']['temperature']['final-value']\n temperatureStep = config['common-properties']['temperature']['step']\n moleFractions = [((concentration['mole-fraction'])) for concentration in config['common-properties']['composition']]\n speciesNames = [(species['species']) for species in config['common-properties']['composition']]\n conditions = dict(zip(moleFractions,speciesNames))\n moleFractionObservables = [datapoint['targets'][0]['name'] for datapoint in config['datapoints']['mole-fraction']]\n absorbanceObservables = [species['species'] for species in config['datapoints']['absorbance']['absorbing-species']]\n observables = moleFractionObservables + absorbanceObservables\n return {\n 'reactorVolume': reactorVolume,\n 'residenceTime': residenceTime,\n 'pressure': pressure,\n 'initialTemperature': initialTemperature,\n 'finalTemperature': finalTemperature,\n 'temperatureStep': temperatureStep,\n 'conditions': conditions,\n 'observables': observables\n }\n\ndef importingShockTube(filename):\n with open(filename) as f:\n config = yaml.load(f)\n pressure = config['common-properties']['pressure']['value']\n temperature = config['common-properties']['temperature']['value']\n moleFractions = [((concentration['mole-fraction'])) for concentration in config['common-properties']['composition']]\n speciesNames = [(species['species']) for species in config['common-properties']['composition']]\n conditions = dict(zip(moleFractions,speciesNames))\n \n return{\n 'pressure' : pressure,\n 'temperature' : temperature,\n 'conditions' : conditions\n \n \n }\n \n ","repo_name":"markbarbet/simulations","sub_path":"JSRimports_new_editing.py","file_name":"JSRimports_new_editing.py","file_ext":"py","file_size_in_byte":2642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23044319016","text":"import urllib2\nimport tarfile\nimport glob\nimport shutil\nfrom StringIO import StringIO\nimport os.path\n\nEXTRACT_DIR = 'src/cURLpp/';\n\nif not os.path.exists(EXTRACT_DIR):\n os.makedirs(EXTRACT_DIR)\n\nresponse = urllib2.urlopen('https://curlpp.googlecode.com/files/curlpp-0.7.3.tar.gz')\nrawTarGz = response.read()\ntar = tarfile.open(mode=\"r:gz\", fileobj = StringIO(rawTarGz))\nsubdir_and_files = [\n tarinfo for tarinfo in tar.getmembers()\n if tarinfo.name.startswith(\"curlpp-0.7.3/src/\") or tarinfo.name.startswith(\"curlpp-0.7.3/include/\")\n ]\ntar.extractall(path=EXTRACT_DIR, members=subdir_and_files)\n\nfor dir in glob.glob(EXTRACT_DIR + 'curlpp-0.7.3/*'):\n shutil.move(dir, EXTRACT_DIR)\n\nshutil.rmtree(EXTRACT_DIR + 'curlpp-0.7.3/');\n","repo_name":"4d6178/academy-oauth","sub_path":"LoadcURLpp.py","file_name":"LoadcURLpp.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"9977765065","text":"from __future__ import annotations\nimport os\nimport typing\n\nimport yaml\n\n\nclass ServerConfig:\n def __init__(self, subyaml: dict, ver: tuple):\n self.ip: str = subyaml.get(\"bind\", \"0.0.0.0\")\n self.port: int = int(subyaml.get(\"port\", 8080))\n self.traceback: bool = bool(subyaml.get(\"traceback\", False))\n self.version = ver\n\n\nclass TaskConfig:\n def __init__(self, subyaml: dict):\n self.refresh_rate: int = int(subyaml.get(\"refresh_rate\", 150))\n assert self.refresh_rate >= 10, \"Refresh rate must be at least 10 seconds!\"\n\n\nclass OAuthConfig:\n def __init__(self, subyaml: dict):\n self.github_key: str = subyaml.get(\"github_api_key\", \"\")\n self.authoritative_domains: typing.List[str] = subyaml.get(\"authoritative_domains\", [])\n self.admins: typing.List[str] = subyaml.get(\"admins\", \"\").split(\" \")\n\n\nclass DirectoryConfig:\n def __init__(self, subyaml: dict):\n self.scratch = subyaml.get(\"scratch\", \"scratch\")\n self.remove_bare = subyaml.get(\"remove_bare\", True)\n assert os.path.isdir(self.scratch), f\"Scratch dir {self.scratch} could not be found, please create it or change clc.yaml!\"\n\nclass DebugConfig:\n def __init__(self, subyaml: dict):\n self.print_issues = subyaml.get('print_issues', True)\n self.open_server = subyaml.get('open_server', False)\n\nclass Project:\n def __init__(self, path):\n self.repo = path.split(\"/\")[-1]\n self.mtimes = {}\n self.settings = {}\n self.history = []\n self.issues = []\n self.issues_per_file = {}\n self.deleted = False # Is set to true when scheduled to removal by background services\n self.warning = \"\" # Set when a scan fails for whatever reason\n\n\nclass LogicConfig:\n def __init__(self, subyaml: dict):\n self.short_word_limit = int(subyaml.get('short_word_limit', 5))\n self.short_word_regex = subyaml.get('short_words', r\"(?:\\b|_)+({word})(?:ed|ing|s)?(?:\\b|\\W|_)+\")\n self.long_word_regex = subyaml.get('long_words', r\"({word})\")\n\n\nclass AccountConfig:\n def __init__(self, subyaml: dict):\n subyaml = subyaml or {}\n self.accounts_file = subyaml.get('accounts_file')\n self.accounts = {}\n self.audit_log = subyaml.get('auditlog', 'auditlog.txt')\n if self.accounts_file and os.path.exists(self.accounts_file):\n self.accounts = yaml.safe_load(open(self.accounts_file))\n self.accounts_file_stat = os.stat(self.accounts_file)\n else:\n self.accounts_file_stat = None\n\n\nclass Configuration:\n def __init__(self, clcversion: tuple, yml: dict, dyml: dict):\n self.server: ServerConfig = ServerConfig(yml.get(\"server\", {}), clcversion)\n self.tasks: TaskConfig = TaskConfig(yml.get(\"tasks\", {}))\n self.oauth: OAuthConfig = OAuthConfig(yml.get(\"oauth\", {}))\n self.dirs: DirectoryConfig = DirectoryConfig(yml.get(\"directories\", {}))\n self.debug: DebugConfig = DebugConfig(yml.get(\"debug\", {}))\n self.accounts: AccountConfig = AccountConfig(yml.get('acl', {}))\n self.logic: LogicConfig = LogicConfig(dyml.get('match_logic', {}))\n self.words = dyml.get(\"words\", [])\n self.excludes = dyml.get(\"excludes\", [])\n self.excludes_context = dyml.get(\"excludes_context\", [])\n self.contexts = dyml.get(\"contexts\", [])\n self.executables = yml.get(\"executables\", {})\n assert \"git\" in self.executables and os.path.exists(self.executables[\"git\"]), \\\n \"This service requires the git executable installed. If it is already installed, \" \\\n \"please let me know where to find it in clc.yaml\"\n\n\nclass InterData:\n \"\"\"\n A mix of various global variables used throughout processes\n \"\"\"\n\n def __init__(self):\n self.repositories: list = []\n self.sessions: dict = {}\n self.people: list = []\n self.projects: dict = {}\n self.project_queue: list = []\n self.activity: str = \"Idling...\"\n\n","repo_name":"Humbedooh/clc","sub_path":"server/plugins/configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":4014,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"40"} +{"seq_id":"32780577727","text":"#!/usr/bin/env python3\n\nfrom fastapi import APIRouter, Request, Depends, Response, encoders\nimport typing as t\n\nfrom app.db.session import get_db\n\nfrom app.db.crud.post_graduations import get_post_graduation, edit_information, get_informations, delete_information, get_information, create_attendance, create_phone\nfrom app.db import models as m\n\nfrom app.schemas.base_schemas import AttendanceCreate, Attendance, AttendanceEdit, Phone, PhoneCreate, PhoneEdit\nfrom app.core.auth import get_current_active_superuser, get_current_active_user\n\nattendance_router = a = APIRouter()\n\n@a.get(\"/contato\", response_model=t.List[Attendance], response_model_exclude_none=True)\nasync def get_attendances(\n response: Response,\n db=Depends(get_db),\n current_user=Depends(get_current_active_user),\n):\n attendances = get_informations(db, current_user.owner_id, m.Attendance).all()\n response.headers[\"Content-Range\"] = f\"0-9/{len(attendances)}\"\n return attendances\n\n@a.get(\"/contato/{attendance_id}\", response_model=Attendance, response_model_exclude_none=True)\nasync def attendance_details(\n response: Response,\n attendance_id: int,\n db=Depends(get_db),\n current_user=Depends(get_current_active_user),\n):\n return get_information(db, attendance_id, m.Attendance)\n\n@a.post(\"/contato\", response_model=Attendance, response_model_exclude_none=True)\nasync def attendance_create(\n request: Request,\n attendance: AttendanceCreate,\n db=Depends(get_db),\n current_user=Depends(get_current_active_superuser),\n):\n \"\"\"\n Create a new attendance\n \"\"\"\n return create_attendance(db, attendance)\n\n\n@a.put(\"/contato/{attendance_id}\", response_model=Attendance, response_model_exclude_none=True)\nasync def attendance_edit(\n request: Request,\n attendance_id: int,\n attendance: AttendanceEdit,\n db=Depends(get_db),\n current_user=Depends(get_current_active_user),\n):\n \"\"\"\n Edit attendance\n \"\"\"\n return edit_information(db, attendance_id, attendance, m.Attendance)\n\n@a.get(\"/telefone\", response_model=t.List[Phone], response_model_exclude_none=True)\nasync def get_phones(\n response: Response,\n db=Depends(get_db),\n current_user=Depends(get_current_active_user),\n):\n phones = list(filter(lambda x: x.deleted == False, get_informations(db, current_user.owner_id, m.Attendance).first().phones))\n response.headers[\"Content-Range\"] = f\"0-9/{len(phones)}\"\n return phones\n\n@a.get(\"/telefone/{phone_id}\", response_model=Phone, response_model_exclude_none=True)\nasync def phone_details(\n response: Response,\n phone_id: int,\n db=Depends(get_db),\n current_user=Depends(get_current_active_user),\n):\n return get_information(db, phone_id, m.Phone)\n\n@a.post(\"/telefone\", response_model=Phone, response_model_exclude_none=True)\nasync def phone_create(\n request: Request,\n phone: PhoneCreate,\n db=Depends(get_db),\n current_user=Depends(get_current_active_user),\n):\n \"\"\"\n Create a new phone\n \"\"\"\n attendance_id = get_post_graduation(db, current_user.owner_id).attendance.id\n return create_phone(db, attendance_id, phone)\n\n@a.delete(\"/telefone/{phone_id}\", response_model=Phone, response_model_exclude_none=True)\nasync def phone_delete(\n request: Request,\n phone_id: int,\n db=Depends(get_db),\n current_user=Depends(get_current_active_user),\n):\n \"\"\"\n Delete phone\n \"\"\"\n return delete_information(db, get_information(db, phone_id, m.Phone).id, m.Phone)\n\n@a.put(\"/telefone/{phone_id}\", response_model=Phone, response_model_exclude_none=True)\nasync def phone_edit(\n request: Request,\n phone_id: int,\n phone: PhoneEdit,\n db=Depends(get_db),\n current_user=Depends(get_current_active_user),\n):\n \"\"\"\n Edit phone\n \"\"\"\n return edit_information(db, get_information(db, phone_id, m.Phone).id, phone, m.Phone)\n","repo_name":"luccasmmg/NewPosgrad","sub_path":"backend/app/api/api_v1/routers/attendance.py","file_name":"attendance.py","file_ext":"py","file_size_in_byte":3943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"14377066314","text":"import pyomo.common.plugin\nfrom pysp import solutionwriter\nfrom pysp.scenariotree.tree_structure import \\\n ScenarioTree\n\n#\n# a simple utility to munge the index name into something a\n# bit more csv-friendly and in general more readable. at the\n# current time, we just eliminate any leading and trailing\n# parentheses and change commas to colons - the latter\n# because it's a csv file!\n#\n\ndef index_to_string(index):\n\n result = str(index)\n result = result.lstrip('(').rstrip(')')\n result = result.replace(',',':')\n result = result.replace(' ','')\n\n return result\n\n\ndef write_csv_soln(scenario_tree, output_file_prefix):\n \"\"\"\n Write the csv solution to a file.\n Args: scenario_tree: a scenario tree object populated with a solution.\n output_file_prefix: a string to indicate the file names for output.\n output_file_prefix + \".csv\"\n output_file_prefix + \"_StageCostDetail.csv\"\n \"\"\"\n\n if not isinstance(scenario_tree, ScenarioTree):\n raise RuntimeError(\n \"CSVSolutionWriter write method expects \"\n \"ScenarioTree object - type of supplied \"\n \"object=\"+str(type(scenario_tree)))\n\n solution_filename = output_file_prefix + \".csv\"\n with open(solution_filename, \"w\") as f:\n for stage in scenario_tree.stages:\n for tree_node in sorted(stage.nodes,\n key=lambda x: x.name):\n for variable_id in sorted(tree_node._variable_ids):\n var_name, index = \\\n tree_node._variable_ids[variable_id]\n f.write(\"%s, %s, %s, %s, %s\\n\"\n % (stage.name,\n tree_node.name,\n var_name,\n index_to_string(index),\n tree_node._solution[variable_id]))\n\n print(\"Scenario tree solution written to file=\"+solution_filename)\n\n cost_filename = output_file_prefix + \"_StageCostDetail.csv\"\n with open(cost_filename, \"w\") as f:\n for stage in scenario_tree.stages:\n # DLW March 2020 to pasting over a bug in handling\n # of NetworkX by tree_structure.py\n # (stage costs may be None but are OK at the node level)\n scost = stage._cost_variable # might be None\n for tree_node in sorted(stage.nodes,\n key=lambda x: x.name):\n if scost is None:\n scost = tree_node._cost_variable\n cost_name, cost_index = scost # moved into loop 3/2020 hack\n for scenario in sorted(tree_node.scenarios,\n key=lambda x: x.name):\n stage_cost = scenario._stage_costs[stage.name]\n f.write(\"%s, %s, %s, %s, %s, %s\\n\"\n % (stage.name,\n tree_node.name,\n scenario.name,\n cost_name,\n index_to_string(cost_index),\n stage_cost))\n print(\"Scenario stage costs written to file=\"+cost_filename)\n\n\nclass CSVSolutionWriter(pyomo.common.plugin.SingletonPlugin):\n\n pyomo.common.plugin.implements(\n solutionwriter.ISolutionWriterExtension)\n\n def write(self, scenario_tree, output_file_prefix):\n write_csv_soln(scenario_tree, output_file_prefix)\n","repo_name":"Pyomo/pysp","sub_path":"pysp/plugins/csvsolutionwriter.py","file_name":"csvsolutionwriter.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"40"} +{"seq_id":"38558684278","text":"import matplotlib.pyplot as plt \nimport numpy as np\n\nfrom ekf import ekf\n\n\n\n#***********************************************************************\n\nclass run_ekf:\n def __init__(self, x=np.zeros((3, 1)), u=np.zeros((2, 1)), sensor = np.zeros((3, 1)), P = np.identity(3), dt = 1):\n self.x = x\n self.u = u\n self.sensor = sensor\n self.P = P\n self.dt = dt\n self.ekf = ekf()\n\n\n def calculateEKF(self, ekf):\n\n # prior, assuming start at (0, 0, 0)\n # [x, y, theta]\n # xhat = np.array([[0, 0, 0]])\n\n # State Covariance Matrix\n # P = np.identity(3)\n\n # Process Noise Covariance Matrix\n Q = np.identity(3)*(0.1**2)\n\n # Measurement Noise Covariance Matrix\n # R = np.identity(3)\n R = np.diag(np.array([0.05, 0.05, 0.1]))\n\n # Linearized Measurement Model Jacobian Matrix\n H = np.identity(3)\n\n # State Transition Jacobian\n G = np.array([[1, 0, -1*self.dt*self.u[0]*np.sin(self.x[:,-1][2])], [0, 1, self.dt*self.u[0]*np.cos(self.x[:,-1][2])], [0, 0, 1]])\n\n\n # Control Matrix (Velocities and Acceleration)\n # [ x , ...]\n # [ w , ...]\n # nonholonomic\n # u = np.zeros((2, 2))\n # u[0:,1] = [0.1, 0]\n\n # Sensor Measurement Values (Position)\n # Sensor (Position) Matrix\n # [ x , ...]\n # [ y , ...]\n # [theta, ...]\n sensor = np.zeros((3, 1))\n sensor[:,0] = self.sensor[:,0]\n # sensor[:,0] = np.add(self.sensor[:,0], [np.random.normal(0, 0.1), np.random.normal(0, 0.1), np.random.normal(0, 0.1)])\n #sensor[:,1] = np.add(self.sensor[:,1], [np.random.normal(0, 0.1), np.random.normal(0, 0.1), np.random.normal(0, 0.1)])\n\n # Sate Matrix\n # x\n\n #***********************************************************************\n x_pred, P = ekf.ekf(self.x[:,-1], self.u[:,-1], sensor[:,-1], Q, R, G, H, self.P, self.dt)\n X0, Y0, a, b, angle, r_ellipse = ekf.covarianceEllipse(x_pred.reshape(3, -1), P)\n \n # x[:, n+1], P = ekf.ekf(self.x[:,0], u[:,0], sensor[:,1], Q, R, H, P, dt)\n # X0, Y0, _, _, _, r_ellipse = ekf.covarianceEllipse(x[:, n+1].reshape(3, -1), P)\n\n return x_pred, P, X0, Y0, a, b, angle\n\n def run(self):\n ekf = self.ekf\n return self.calculateEKF(ekf)","repo_name":"rezarajan/mte544","sub_path":"Lab 2/Code/run_ekf.py","file_name":"run_ekf.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"31299555034","text":"# 決定木\n# 決定木はクラス分類と回帰タスクに広く用いられているモデルである\n\n\n# クラス分類\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.datasets import load_breast_cancer\nfrom sklearn.model_selection import train_test_split\n\ncancer = load_breast_cancer()\nX_train, X_test, y_train, y_test = train_test_split(cancer.data, cancer.target, stratify=cancer.target, random_state=42)\ntree = DecisionTreeClassifier(random_state=0).fit(X_train, y_train)\nprint(tree.score(X_train, y_train)) # 1.000\nprint(tree.score(X_test, y_test)) # 0.937\n# 過剰適合\n\n# 事前枝刈り\n# 構築過程で木の生成を早めに止めてしまうことで過剰適合を防ぐ\ntree = DecisionTreeClassifier(max_depth=4, random_state=0).fit(X_train, y_train)\nprint(tree.score(X_train, y_train)) # 0.988\nprint(tree.score(X_test, y_test)) # 0.951\n# 事後枝刈りという、情報の少ないノードを削除する方法もある\n\n# 特徴量の重要度\nprint(tree.feature_importances_)\n# [0. 0. 0. 0. 0. 0.\n# 0. 0. 0. 0. 0.01019737 0.04839825\n# 0. 0. 0.0024156 0. 0. 0.\n# 0. 0. 0.72682851 0.0458159 0. 0.\n# 0.0141577 0. 0.018188 0.1221132 0.01188548 0. ]\n\n\n# 回帰\n# 決定木によるモデルを回帰に使う際に注意しなければならないことがある\n# それは、外挿ができない、つまり、訓練データの外側に対しては予測ができない\n# 教科書P.81図2-32を見ればわかる\n\n# 長所、短所、パラメータ\n# 長所は、結果のモデルが容易に可視化可能でm専門家でなくても理解可能であること\n# さらに、データのスケールに対して完全に不変であることである\n# 特徴量の正規化や、標準化が必要ない\n# 最大の問題点は、事前枝刈りを行っても過剰適合しやすく、\n# 汎化性能が低い傾向があること\n# このため、ほとんどの場合ではアンサンブル法が用いられる\n\n# 決定木のアンサンブル法\n\n# ランダムフォレスト\n# 少しずつ異なる決定機を集める\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.datasets import make_moons\nX, y = make_moons(n_samples=100, noise=0.25, random_state=3)\nX_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42)\nforest = RandomForestClassifier(n_estimators=5, random_state=2) # 決定木5個\nforest.fit(X_train, y_train)\nprint(forest.score(X_train, y_train)) # 0.96\nprint(forest.score(X_test, y_test)) # 0.92\n# random forestでも重要度があり、単独の決定木よりも信用できる\n\n# 長所、短所、パラメータ\n# ランダムフォレストは完全にランダムであるので、\n# 乱数シード(random_state)を変更すると構築されるモデルがおおきくかわる可能性がある\n# ランダムフォレストは、テキストデータなどの非常に\n# 高次元で疎なデータに対してはうまく機能しない傾向にある\n# このようなデータに対しては、線形モデルのほうが適している\n# さらに、ランダムフォレストは、線形モデルよりも、\n# 多くのメモリを消費するし、訓練も予測も遅い\n# 実行時間やメモリが重要なアプリケーションでは、線形モデルを使ったほうがよい\n\n\n# 勾配ブースティング回帰木(勾配ブースティングマシン)\n# 複数の決定木を組み合わせてより強力なモデルを構築するもう一つのアンサンブル学習である\n# 分類も回帰にも使える\n# 勾配ブースティングでは、ひとつ前の決定木の誤りを次の決定木が修正するようにして\n# 決定木を順番に作っていく\n# デフォルトでは、乱数性はなく、強力な事前枝刈りが行われる\n# ポイントは、浅い決定木のような簡単なモデルを多数組み合わせることにある\n# パラメータはlearning_rateと、n_estimators\n# 一般には、初めにランダムフォレストをためし、うまくいったとしても時間がかかったり\n# 最後の1%まで性能を絞り出したい場合に勾配ブースティングを試すとよい\n\nfrom sklearn.ensemble import GradientBoostingClassifier\ngbrt = GradientBoostingClassifier(random_state=0)\ngbrt.fit(X_train, y_train)\nprint(gbrt.score(X_train, y_train)) # 1.0\nprint(gbrt.score(X_test, y_test)) # 0.88\n\n# 長所、短所、パラメータ\n# 勾配ブースティング回帰木は、教師あり学習の中で最も強力で\n# 広く使われているモデルである\n# 短所は、パラメータのチューニングに細心の注意が必要であることと\n# 訓練にかかる時間が長いことである。また、高次元な疎なデータに対しては\n# あまりうまく機能しない\n","repo_name":"urah45/studying-machine-learning","sub_path":"chapter_two/dicision_tree.py","file_name":"dicision_tree.py","file_ext":"py","file_size_in_byte":4997,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"11101348471","text":"import cv2 as cv\n\ncap = cv.VideoCapture(0)\n\nwhile cap.isOpened():\n ret, frame = cap.read()\n if ret:\n cv.imshow('frame', frame)\n if cv.waitKey(1) == ord('q'):\n cv.destroyAllWindows()\n break\n else:\n break\n\ncap.release()","repo_name":"tjwldnjss13/OpenCV-Drill","sub_path":"practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"33208532364","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# @Date : 2016-10-12 17:20:54\n# @Author : taoleilei (1214360171@qq.com)\n# @Link : ${link}\n# @Version : $Id$\n\nimport selectors\nimport socket\n \nsel = selectors.DefaultSelector()\nCONNECTION_LIST = []\n\ndef broadcast_data(conn, message):\n # 循环监听列表,将该客户端(sock)发的消息(message)转发给除过服务器和他自己以外的其他客户端。\n for socket in CONNECTION_LIST:\n if socket != sock and socket != conn:\n try:\n socket.send(message)\n except:\n socket.close()\n CONNECTION_LIST.remove(socket)\n\ndef accept(sock, mask):\n conn, addr = sock.accept() # Should be ready\n CONNECTION_LIST.append(conn)\n print('accepted', conn, 'from', addr)\n conn.setblocking(False)\n sel.register(conn, selectors.EVENT_READ, read)\n \ndef read(conn, mask):\n data = conn.recv(1024) # Should be ready\n if data:\n broadcast_data(conn, data)\n else:\n print('closing', conn)\n sel.unregister(conn)\n conn.close()\n \nsock = socket.socket()\nsock.bind(('localhost', 5000))\nsock.listen(100)\nsock.setblocking(False)\nsel.register(sock, selectors.EVENT_READ, accept)\n \nwhile True:\n events = sel.select()\n for key, mask in events:\n callback = key.data\n callback(key.fileobj, mask)\n","repo_name":"taoleilei/ChatRoom","sub_path":"server2.py","file_name":"server2.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"18030479207","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"The main window for Zipy.\"\"\"\n\nimport tkinter as tk\nimport _tkinter\nfrom tkinter import ttk\nfrom tkinter import filedialog\nfrom idlelib.tooltip import Hovertip\nimport zipfile\nimport os\n\nimport pkinter as pk\n\n__title__ = \"Zipy\"\n__author__ = \"DeflatedPickle\"\n__version__ = \"1.0.0\"\n\n\n# http://www.rarlab.com\n# http://www.7-zip.org\n# http://www.winzip.com/index.html\n\n\nclass Window(tk.Tk):\n def __init__(self, *args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n self.title(\"Zipy\")\n self.geometry(\"550x500\")\n self.minsize(width=350, height=200)\n self.rowconfigure(1, weight=1)\n self.columnconfigure(0, weight=1)\n\n self.widget_toolbar = Toolbar(self)\n self.widget_toolbar.grid(row=0, column=0, sticky=\"we\")\n\n self.widget_statusbar = Statusbar(self)\n self.widget_statusbar.grid(row=2, column=0, sticky=\"we\")\n\n self.frame_treeview = ttk.Frame(self)\n self.frame_treeview.rowconfigure(0, weight=1)\n self.frame_treeview.columnconfigure(0, weight=1)\n self.frame_treeview.grid(row=1, column=0, sticky=\"nesw\")\n\n self.widget_treeview = Treeview(self.frame_treeview)\n self.widget_treeview.grid(row=0, column=0, sticky=\"nesw\")\n self.widget_treeview[\"displaycolumns\"] = (\"File Extension\",\n \"Date Modified\",\n \"File Type\",\n \"Compress Size\",\n \"File Size\",\n \"Filler\")\n # print(self.widget_treeview[\"displaycolumns\"])\n\n self.widget_scrollbar_horizontal = ttk.Scrollbar(self.frame_treeview, orient=\"horizontal\",\n command=self.widget_treeview.xview)\n self.widget_scrollbar_horizontal.grid(row=1, column=0, sticky=\"we\")\n\n self.widget_scrollbar_vertical = ttk.Scrollbar(self.frame_treeview, orient=\"vertical\",\n command=self.widget_treeview.yview)\n self.widget_scrollbar_vertical.grid(row=0, column=1, sticky=\"ns\")\n\n self.widget_treeview.configure(xscrollcommand=self.widget_scrollbar_horizontal.set,\n yscrollcommand=self.widget_scrollbar_vertical.set)\n\n self.widget_menu = Menu(self)\n\n def clear(self):\n for item in self.widget_treeview.get_children():\n self.widget_treeview.delete(item)\n\n def open_file(self, file):\n self.clear()\n self.title(\"Zipy - {}\".format(file.split(\"/\")[-1]))\n try:\n with zipfile.ZipFile(file, \"r\") as z:\n previous_folder = \"\"\n text = \"\"\n for item in z.infolist():\n # print(item)\n if \"/\" in item.filename:\n try:\n self.widget_treeview.insert(parent=previous_folder,\n index=\"end\",\n iid=os.path.splitext(item.filename.split(\"/\")[-2])[0],\n text=os.path.splitext(item.filename.split(\"/\")[-2])[0])\n previous_folder = os.path.splitext(item.filename.split(\"/\")[-2])[0]\n except _tkinter.TclError:\n pass\n text = os.path.splitext(item.filename.split(\"/\")[-1])[0]\n self.add_item(item, previous_folder, text)\n if \"/\" not in item.filename:\n previous_folder = \"\"\n text = os.path.splitext(item.filename)[0]\n self.add_item(item, previous_folder, text)\n except FileNotFoundError:\n print(\"'{}' does not exist.\".format(file))\n\n def add_item(self, item, parent, text):\n self.widget_treeview.insert(parent=parent,\n index=\"end\",\n iid=os.path.splitext(item.filename)[0],\n text=text,\n values=[os.path.splitext(item.filename)[1],\n \"{0[2]}/{0[1]}/{0[0]} {0[5]}:{0[4]}:{0[3]}\".format(item.date_time),\n \"\",\n item.compress_type,\n item.comment,\n item.extra,\n item.create_system,\n item.create_version,\n item.extract_version,\n item.reserved,\n item.flag_bits,\n item.volume,\n item.internal_attr,\n item.external_attr,\n item.header_offset,\n item.CRC,\n item.compress_size,\n item.file_size])\n\n def exit_program(self):\n raise SystemExit\n\n\nclass Menu(tk.Menu):\n def __init__(self, parent, *args, **kwargs):\n tk.Menu.__init__(self, parent, type=\"menubar\", *args, **kwargs)\n self.option_add('*tearOff', False)\n self.parent = parent\n\n self.init_menu_application()\n self.init_menu_file()\n self.init_menu_view()\n self.init_menu_columns()\n self.init_menu_window()\n self.init_menu_help()\n self.init_menu_system()\n\n self.parent.configure(menu=self)\n\n def init_menu_application(self):\n self.menu_application = tk.Menu(self, name=\"apple\")\n\n self.menu_application.add_command(label=\"About Zipy\", state=\"disabled\")\n self.menu_application.add_command(label=\"Exit\", command=self.parent.exit_program)\n\n self.add_cascade(label=\"Application\", menu=self.menu_application)\n\n def init_menu_file(self):\n self.menu_file = tk.Menu(self)\n\n self.menu_file.add_command(label=\"Open\", command=lambda: open_file(self.parent))\n\n self.add_cascade(label=\"File\", menu=self.menu_file)\n\n def init_menu_view(self):\n self.menu_view = tk.Menu(self)\n\n self.menu_view.add_command(label=\"Collapse the TreeView\", state=\"disabled\")\n self.menu_view.add_command(label=\"Expand the TreeView\", state=\"disabled\")\n self.menu_view.add_command(label=\"Refresh the TreeView\", state=\"disabled\")\n\n self.add_cascade(label=\"View\", menu=self.menu_view)\n\n def init_menu_columns(self):\n self.menu_columns = tk.Menu(self.menu_view)\n self.columns_default = [\"File Extension\", \"Date Modified\", \"File Type\", \"Compress Size\", \"File Size\", \"Filler\"]\n\n # for item in self.parent.widget_treeview[\"columns\"]:\n # self.menu_columns.add_checkbutton(label=item, variable=tk.BooleanVar())\n\n # TODO: Replace menu items below with a for loop to save lines.\n self.boolean_variable_file_extension = tk.BooleanVar()\n self.boolean_variable_file_extension.set(True)\n self.menu_columns.add_checkbutton(label=\"File Extension\", variable=self.boolean_variable_file_extension, command=lambda: self.toggle_column(self.boolean_variable_file_extension, 0, \"File Extension\"))\n\n self.boolean_variable_date_modified = tk.BooleanVar()\n self.boolean_variable_date_modified.set(True)\n self.menu_columns.add_checkbutton(label=\"Date Modified\", variable=self.boolean_variable_date_modified, command=lambda: self.toggle_column(self.boolean_variable_date_modified, 1, \"Date Modified\"))\n\n self.boolean_variable_file_type = tk.BooleanVar()\n self.boolean_variable_file_type.set(True)\n self.menu_columns.add_checkbutton(label=\"File Type\", variable=self.boolean_variable_file_type, command=lambda: self.toggle_column(self.boolean_variable_file_type, 2, \"File Type\"))\n\n self.boolean_variable_compress_type = tk.BooleanVar()\n self.boolean_variable_compress_type.set(False)\n self.menu_columns.add_checkbutton(label=\"Compress Type\", variable=self.boolean_variable_compress_type, command=lambda: self.toggle_column(self.boolean_variable_compress_type, 3, \"Compress Type\"))\n\n self.boolean_variable_comment = tk.BooleanVar()\n self.boolean_variable_comment.set(False)\n self.menu_columns.add_checkbutton(label=\"Comment\", variable=self.boolean_variable_comment, command=lambda: self.toggle_column(self.boolean_variable_comment, 4, \"Comment\"))\n\n self.boolean_variable_extra = tk.BooleanVar()\n self.boolean_variable_extra.set(False)\n self.menu_columns.add_checkbutton(label=\"Extra\", variable=self.boolean_variable_extra, command=lambda: self.toggle_column(self.boolean_variable_extra, 5, \"Extra\"))\n\n self.boolean_variable_create_system = tk.BooleanVar()\n self.boolean_variable_create_system.set(False)\n self.menu_columns.add_checkbutton(label=\"Create System\", variable=self.boolean_variable_create_system, command=lambda: self.toggle_column(self.boolean_variable_create_system, 6, \"Create System\"))\n\n self.boolean_variable_create_version = tk.BooleanVar()\n self.boolean_variable_create_version.set(False)\n self.menu_columns.add_checkbutton(label=\"Create Version\", variable=self.boolean_variable_create_version, command=lambda: self.toggle_column(self.boolean_variable_create_version, 7, \"Create Version\"))\n\n self.boolean_variable_extract_version = tk.BooleanVar()\n self.boolean_variable_extract_version.set(False)\n self.menu_columns.add_checkbutton(label=\"Extract Version\", variable=self.boolean_variable_extract_version, command=lambda: self.toggle_column(self.boolean_variable_extract_version, 8, \"Extract Version\"))\n\n self.boolean_variable_reserved = tk.BooleanVar()\n self.boolean_variable_reserved.set(False)\n self.menu_columns.add_checkbutton(label=\"Reserved\", variable=self.boolean_variable_reserved, command=lambda: self.toggle_column(self.boolean_variable_reserved, 9, \"Reserved\"))\n\n self.boolean_variable_flag_bits = tk.BooleanVar()\n self.boolean_variable_flag_bits.set(False)\n self.menu_columns.add_checkbutton(label=\"Flag Bits\", variable=self.boolean_variable_flag_bits, command=lambda: self.toggle_column(self.boolean_variable_flag_bits, 10, \"Flag Bits\"))\n\n self.boolean_variable_volume = tk.BooleanVar()\n self.boolean_variable_volume.set(False)\n self.menu_columns.add_checkbutton(label=\"Volume\", variable=self.boolean_variable_volume, command=lambda: self.toggle_column(self.boolean_variable_volume, 11, \"Volume\"))\n\n self.boolean_variable_internal_attr = tk.BooleanVar()\n self.boolean_variable_internal_attr.set(False)\n self.menu_columns.add_checkbutton(label=\"Internal Attr\", variable=self.boolean_variable_internal_attr, command=lambda: self.toggle_column(self.boolean_variable_internal_attr, 12, \"Internal Attr\"))\n\n self.boolean_variable_external_attr = tk.BooleanVar()\n self.boolean_variable_external_attr.set(False)\n self.menu_columns.add_checkbutton(label=\"External Attr\", variable=self.boolean_variable_external_attr, command=lambda: self.toggle_column(self.boolean_variable_external_attr, 13, \"External Attr\"))\n\n self.boolean_variable_header_offset = tk.BooleanVar()\n self.boolean_variable_header_offset.set(False)\n self.menu_columns.add_checkbutton(label=\"Header Offset\", variable=self.boolean_variable_header_offset, command=lambda: self.toggle_column(self.boolean_variable_header_offset, 14, \"Header Offset\"))\n\n self.boolean_variable_crc = tk.BooleanVar()\n self.boolean_variable_crc.set(False)\n self.menu_columns.add_checkbutton(label=\"CRC\", variable=self.boolean_variable_crc, command=lambda: self.toggle_column(self.boolean_variable_crc, 15, \"CRC\"))\n\n self.boolean_variable_compress_size = tk.BooleanVar()\n self.boolean_variable_compress_size.set(True)\n self.menu_columns.add_checkbutton(label=\"Compress Size\", variable=self.boolean_variable_compress_size, command=lambda: self.toggle_column(self.boolean_variable_compress_size, 16, \"Compress Size\"))\n\n self.boolean_variable_file_size = tk.BooleanVar()\n self.boolean_variable_file_size.set(True)\n self.menu_columns.add_checkbutton(label=\"File Size\", variable=self.boolean_variable_file_size, command=lambda: self.toggle_column(self.boolean_variable_file_size, 17, \"File Size\"))\n\n self.boolean_variable_filler = tk.BooleanVar()\n self.boolean_variable_filler.set(True)\n self.menu_columns.add_checkbutton(label=\"Filler\", variable=self.boolean_variable_filler, command=lambda: self.toggle_column(self.boolean_variable_filler, 18, \"Filler\"))\n\n self.menu_view.add_cascade(label=\"Columns\", menu=self.menu_columns)\n\n def init_menu_window(self):\n self.menu_window = tk.Menu(self, name=\"window\")\n self.add_cascade(label=\"Window\", menu=self.menu_window)\n\n def init_menu_help(self):\n self.menu_help = tk.Menu(self, name=\"help\")\n\n self.add_cascade(label=\"Help\", menu=self.menu_help)\n\n def init_menu_system(self):\n self.menu_system = tk.Menu(self, name=\"system\")\n self.add_cascade(label=\"System\", menu=self.menu_system)\n\n def toggle_column(self, variable: tk.BooleanVar, index: int, column: str):\n columns = list(self.parent.widget_treeview[\"displaycolumns\"])\n if variable.get():\n columns.insert(index, column)\n # print(\"Added '{}' to the shown columns.\".format(column))\n else:\n columns.pop(columns.index(column))\n # print(\"Removed '{}' from the shown columns.\".format(column))\n self.parent.widget_treeview[\"displaycolumns\"] = columns = tuple(columns)\n # print(columns)\n\n\nclass Toolbar(ttk.Frame):\n def __init__(self, parent, *args, **kwargs):\n ttk.Frame.__init__(self, parent, *args, **kwargs)\n self.parent = parent\n self.columnconfigure(1, weight=1)\n\n self.widget_button_open = ttk.Button(self, text=\"Open\", command=lambda: open_file(self.parent), style=\"Toolbutton\")\n self.widget_button_open.grid(row=0, column=0)\n Hovertip(self.widget_button_open, self.widget_button_open[\"text\"])\n\n self.widget_button_exit = ttk.Button(self, text=\"Exit\", command=self.parent.exit_program, style=\"Toolbutton\")\n self.widget_button_exit.grid(row=0, column=1, sticky=\"e\")\n Hovertip(self.widget_button_exit, self.widget_button_exit[\"text\"])\n\n\nclass Statusbar(pk.Statusbar):\n def __init__(self, parent, *args, **kwargs):\n pk.Statusbar.__init__(self, parent, *args, **kwargs)\n self.parent = parent\n\n self.status_variable = tk.StringVar()\n self.add_variable(textvariable=self.status_variable)\n\n self.bind_widget(self.parent.widget_toolbar.widget_button_open, self.status_variable, \"Open an archive\", \"\")\n self.bind_widget(self.parent.widget_toolbar.widget_button_exit, self.status_variable, \"Close the program\", \"\")\n\n self.add_sizegrip()\n\n\nclass Treeview(ttk.Treeview):\n def __init__(self, parent, *args, **kwargs):\n ttk.Treeview.__init__(self, parent, selectmode=\"browse\", columns=[\"File Extension\",\n \"Date Modified\",\n \"File Type\",\n \"Compress Type\",\n \"Comment\",\n \"Extra\",\n \"Create System\",\n \"Create Version\",\n \"Extract Version\",\n \"Reserved\",\n \"Flag Bits\",\n \"Volume\",\n \"Internal Attr\",\n \"External Attr\",\n \"Header Offset\",\n \"CRC\",\n \"Compress Size\",\n \"File Size\",\n \"Filler\"], *args, **kwargs)\n\n self.heading(\"#0\", text=\"File Name\")\n self.column(\"#0\", width=200, stretch=False)\n self.heading(\"#1\", text=\"File Extension\")\n self.column(\"#1\", width=80, stretch=False)\n self.heading(\"#2\", text=\"Date Modified\")\n self.column(\"#2\", width=100, stretch=False)\n self.heading(\"#3\", text=\"File Type\")\n self.column(\"#3\", width=80, stretch=False)\n self.heading(\"#4\", text=\"Compress Type\")\n self.column(\"#4\", width=100, stretch=False)\n self.heading(\"#5\", text=\"Comment\")\n self.column(\"#5\", width=80, stretch=False)\n self.heading(\"#6\", text=\"Extra\")\n self.column(\"#6\", width=60, stretch=False)\n self.heading(\"#7\", text=\"Create System\")\n self.column(\"#7\", width=100, stretch=False)\n self.heading(\"#8\", text=\"Create Version\")\n self.column(\"#8\", width=100, stretch=False)\n self.heading(\"#9\", text=\"Extract Version\")\n self.column(\"#9\", width=100, stretch=False)\n self.heading(\"#10\", text=\"Reserved\")\n self.column(\"#10\", width=100, stretch=False)\n self.heading(\"#11\", text=\"Flag Bits\")\n self.column(\"#11\", width=100, stretch=False)\n self.heading(\"#12\", text=\"Volume\")\n self.column(\"#12\", width=100, stretch=False)\n self.heading(\"#13\", text=\"Internal Attr\")\n self.column(\"#13\", width=100, stretch=False)\n self.heading(\"#14\", text=\"External Attr\")\n self.column(\"#14\", width=100, stretch=False)\n self.heading(\"#15\", text=\"Header Offset\")\n self.column(\"#15\", width=100, stretch=False)\n self.heading(\"#16\", text=\"CRC\")\n self.column(\"#16\", width=100, stretch=False)\n self.heading(\"#17\", text=\"Compress Size\")\n self.column(\"#17\", width=90, stretch=False)\n self.heading(\"#18\", text=\"File Size\")\n self.column(\"#18\", width=70, stretch=False)\n self.column(\"#19\", width=1)\n\n\ndef open_file(program):\n file = filedialog.askopenfile()\n if file is None:\n return\n program.open_file(file.name)\n file.close()\n\n\ndef main():\n app = Window()\n app.mainloop()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DeflatedPickle/Zipy","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":19419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"19163289319","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport os\nimport time\n\nimport Adafruit_BMP.BMP085 as BMP085 # Actually using it for BMP180 here\nimport Adafruit_BBIO.GPIO as GPIO\n\ndef blink(pin, blinktime=0.1):\n \"\"\" Blink a single LED\n \"\"\"\n blinks([pin], blinktime)\n\ndef blinks(pins, blinktime=0.1):\n \"\"\" Blink a list of LEDs\n \"\"\"\n for pin in pins:\n GPIO.output(pin, GPIO.HIGH)\n time.sleep(blinktime)\n for pin in pins:\n GPIO.output(pin, GPIO.LOW)\n\nif __name__ == \"__main__\":\n # Set up GPIO pins\n pin0 = \"P9_14\" # GPIO_50, blue, down\n GPIO.setup(pin0, GPIO.OUT)\n GPIO.output(pin0, GPIO.LOW)\n\n pin1 = \"P9_16\" # GPIO_51, red, up\n GPIO.setup(pin1, GPIO.OUT)\n GPIO.output(pin1, GPIO.LOW)\n\n blinkshort = 0.05\n blinklong = 0.8\n\n sensor = BMP085.BMP085(busnum=2, mode=BMP085.BMP085_ULTRAHIGHRES)\n\n # Default is to monitor the temperature\n TEST_PRESSURE = True if os.getenv('TEST_PRESSURE', default='0') == '1' else False\n\n if TEST_PRESSURE:\n reading = sensor.read_pressure\n else:\n reading = sensor.read_temperature\n\n # Holt-Winters parameters\n alpha = 0.15\n beta = 0.05\n\n # Set up initial values\n x = reading()\n a = x\n b = 0\n blinktime = blinkshort\n print(\"{},{},{}\".format(x, a, b))\n\n try:\n PERIOD = int(os.getenv('PERIOD', default='1'))\n except ValueError:\n PERIOD = 1\n if PERIOD < 1:\n PERIOD = 1\n\n try:\n # different display trashhold, in units of X unit/min, above which do long blink\n SENSOR_THRESHOLD = float(os.getenv('PERIOD', default='1.0'))\n except ValueError:\n SENSOR_THRESHOLD = 1.0\n if SENSOR_THRESHOLD < 0:\n SENSOR_THRESHOLD = 1.0\n\n while True:\n time.sleep(PERIOD - blinktime)\n x = reading()\n aold, bold = a, b\n a = alpha * x + (1 - alpha) * (aold + bold)\n b = beta * (a - aold) + (1 - beta) * bold\n print(\"Reading: {0:0.1f}; a[t]: {1:0.3f}; b[t]: {2:0.3f}\".format(x, a, b))\n # Do long blink if temperature change is more than 1 unit/min\n blinktime = blinklong if abs(b) >= SENSOR_THRESHOLD / 60.0 * PERIOD else blinkshort\n if abs(b) < 0.001:\n blinks([pin0, pin1], blinktime)\n elif b < 0:\n blink(pin0, blinktime)\n else:\n blink(pin1, blinktime)\n","repo_name":"balena-io-experimental/bbgw-environment-leds","sub_path":"src/station.py","file_name":"station.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"40"} +{"seq_id":"2305825969","text":"\nimport sys\nfrom collections import namedtuple\nfrom importlib import import_module\nfrom django.core.management.base import OutputWrapper\nfrom django.core.management.color import color_style\nfrom . import settings\n\ndefault_app_config = 'extended_shell.apps.ExtendedShellConfig'\n\nstyle = color_style(\n settings.EXTENDED_SHELL_COLORED\n)\n\nterm = OutputWrapper(\n sys.stdout\n)\n\nImport = namedtuple('Import', [\n 'module',\n 'name',\n 'alias'\n])\n\n\ndef parse_import(path):\n try:\n path, alias = path.rsplit(' as ', 1)\n alias = alias.strip()\n except ValueError:\n alias = None\n\n module_path = name = path\n\n try:\n module_path, name = path.rsplit('.', 1)\n except ValueError:\n pass\n\n return Import(\n module_path.strip(),\n name.strip(),\n alias\n )\n\n\ndef show_modules(modules):\n imports = {}\n strings = []\n\n for module in modules:\n if isinstance(module, str):\n data = parse_import(module)\n else:\n try:\n data = Import(\n module.__module__,\n module.__name__,\n None\n )\n except AttributeError:\n continue\n\n name = (\n data.module or\n repr(data)\n )\n\n imports.setdefault(name, [])\n imports[name].append(data)\n\n for module, datas in imports.items():\n tmpl = 'from {path} import {modules}'\n\n modules = []\n for data in datas:\n if not data.module:\n tmpl = 'import {modules}'\n\n modules.append(\n '{imp.name} as {imp.alias}'.format(imp=data)\n if data.alias else data.name\n )\n\n strings.append(\n tmpl.format(\n path=module,\n modules=', '.join(modules)\n ))\n\n for line in reversed(strings):\n term.write(style.SUCCESS(line))\n\n\ndef load_modules(pathes):\n imports = {}\n\n for path in pathes:\n data = parse_import(path)\n\n module = import_module(\n data.module or data.name\n )\n\n imports[data.alias or data.name] = getattr(\n module,\n data.name,\n module\n )\n\n return imports\n","repo_name":"detect-dev/django-extended-shell","sub_path":"extended_shell/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"40"} +{"seq_id":"30844350697","text":"# 문자열 나누기\ndef solution(string): \n cnt = 0\n answer = [] \n while len(string)>0:\n \n if cnt == 0 :\n tmp = []\n standard = string[0]\n tmp.append(standard)\n cnt += 1\n string = string[1:]\n answer.append(tmp)\n elif standard == string[0]:\n cnt += 1\n tmp.append(string[0])\n string = string[1:]\n else:\n cnt-=1 \n tmp.append(string[0])\n string = string[1:]\n print(answer)\n\n\nprint(solution(\"aaabbaccccabba\"))\n\n \n","repo_name":"jio-ping/coding-test","sub_path":"140108.py","file_name":"140108.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"15290614593","text":"import collections\n\nwith open('day_6\\input.txt') as f:\n init_dict = collections.Counter((map(int,f.readline().split(','))))\n\ndef update_timers(start_day):\n fish = {}\n for x in [1,2,3,4,5,6,8]:\n fish[x-1] = start_day[x]\n fish[8]= start_day[0]\n fish[6] = start_day[7] + start_day[0]\n return fish\n\ndef count_fish(fish_dict, days):\n for _ in range(0, days):\n fish_dict = update_timers(fish_dict)\n return(sum(fish_dict.values()))\n\nprint(count_fish(init_dict, 256))","repo_name":"DLaury/advent_of_code_2021","sub_path":"day_6/day_6a.py","file_name":"day_6a.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"23282510444","text":"from flask import render_template, current_app, request, redirect, url_for\nimport pandas\n\ndef equity_page(equity_key):\n eqdb = current_app.config[\"eqdb\"]\n equity = eqdb.get_equity(equity_key)\n return render_template(\"equity.html\", equity = equity)\n\ndef holdings_page():\n eqdb = current_app.config[\"eqdb\"]\n if request.method == \"GET\":\n equities = eqdb.get_equities()\n return render_template(\"holdings.html\", equities=sorted(equities))\n else:\n form_equity_keys = request.form.getlist(\"equity_keys\")\n for form_equity_key in form_equity_keys:\n eqdb.delete_equity(int(form_equity_key))\n return redirect(url_for(\"holdings_page\"))\n\ndef get_holdings():\n df = pandas.read_csv('currentholding.csv', engine='python', header=6, skipfooter=4)\n\n","repo_name":"RobertPrellwitz/RiskTool","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"40"} +{"seq_id":"6419554287","text":"#coding=utf-8\nimport random as rd\nfrom hashlib import md5\nimport zipfile as zip\nimport getpass\nimport sys\nimport os\nclass Codingfile:\n def __init__(self, path):\n self.path = path\n\n def file_decoding(self, pwd, pwd2):\n if(self.path[-4:]!='crpt'):\n print(\"Please open supported file format(.crpt).\")\n sys.exit(1)\n lists=self.path.split('.')\n fformat=lists[-2]\n f = open(self.path, 'rb')\n f.readline()\n offset,offset2 = self.caesar_offset(pwd, pwd2)\n with open(self.path[:(-4-len(fformat)-1)]+'-decoded.'+fformat, 'wb') as w:\n while True:\n text = f.read(16384)\n if text == b'':\n break\n w.write(self.caesar_decoding(text, offset,offset2))\n print(\"\")\n print(\"Your file has already been decoded\")\n print(\"\")\n f.close()\n w.close()\n\n def read_hash(self):\n with open(self.path, 'rb') as w:\n first = w.readline()\n first = first.decode(encoding=\"utf-8\").strip(\"\\n\")\n return first\n\n @staticmethod\n def caesar_offset(pwd, pwd2):\n chars = \"\\\n ZE1cgvraF0R4R68cPm4b6vXckIZGj8GPetweKByvBNoxS9267fhP45IupXa0Byqg\\\n 9V5q8OuvQJlgDx4mwBoFoZbWfbsjF2zzLMQSJiTLiF8uMYXggElxHwHqK09m6OQH\\\n FuOoL2GJ4HHR5Wstb9DtJepadDQzeuProWx2GMzNmJo2JkzHmWpJECOMVH5YhdX1\\\n euyLZGY0HNCtW0LO7fhdedunRl2dek2171IQ3JivYaYBKjmU6MV53WexmniTiADd\\\n \"\n rd.seed(pwd)\n num = rd.randint(0, len(chars)-1)\n while(len(pwd2) % len(pwd) == 0 or len(pwd2) * len(pwd) < 128 or (len(pwd2) * len(pwd))&(len(pwd2) * len(pwd) - 1) == 0):\n if len(pwd) == 1:\n pwd = pwd+chars[num%len(chars)]\n pwd2=pwd2+chars[num%len(chars)]\n num = num + 1\n pwd = bytes(pwd, encoding=\"utf-8\")\n pwd2 = bytes(pwd2, encoding=\"utf-8\")\n \n\n offset = []\n offset2 = []\n\n for i in range(len(pwd)):\n offset.append(pwd[i])\n offset = traversal(offset)\n\n for i in range(len(pwd2)):\n offset2.append(pwd2[i])\n offset2 = traversal(offset2)\n return [offset, offset2]\n\n\n\n\n\n\n\n @staticmethod\n def caesar_decoding(text, offset, offset2): \n text = bytearray(text) \n for i in range(len(text)):\n byte = text[i] - next(offset) - next(offset2)\n if byte < 0:\n text[i] = byte + 256\n if text[i] < 0:\n text[i] = text[i] + 256\n else:\n text[i] = byte\n return text\n\n def check_key(self, pwd, pwd2):\n pwdhash = create_md5(pwd, pwd2, create_salt(pwd, pwd2))\n if pwdhash == self.read_hash():\n return True\n else:\n return False\n\ndef create_salt(pwd, pwd2):\n salt = ''\n chars = \"`1234567890-=qwertyuiop[]\\\\asdfghjkl;'zxcvbnm,./\" \\\n \"~!@#$%^&*()_+QWERTYUIOP{}|ASDFGHJKL:\\\"ZXCVBNM<>?\"\n rd.seed(pwd+pwd2)\n for i in range(16):\n num = rd.randint(0, len(chars)-1)\n salt += chars[num]\n return salt\n\n\ndef create_md5(pwd, pwd2, salt):\n m = md5()\n m.update((pwd+pwd2+salt).encode(\"UTF-8\"))\n return m.hexdigest()\n\n\ndef traversal(list):\n i = 0\n while True:\n length = len(list)\n i = i % length\n yield list[i]\n i += 1\n\n\ndef enter_pwd(strs):\n print(\"\")\n pwd = getpass.getpass(\"Please enter your \"+strs+\" password: \")\n print(\"\")\n return pwd\n\n\ndef main():\n if len(sys.argv) != 3:\n print(\"Please use like python {} encode/decode E:\\ filename.txt\".format(sys.argv[0]))\n sys.exit(1)\n \n if sys.argv[1]!='decode':\n print(\"Please use like python {} encode/decode E:\\ filename.txt\".format(sys.argv[0]))\n sys.exit(1)\n\n # 判断文件或文件夹是否存在\n if not os.path.exists(sys.argv[2]):\n print(\"\")\n print(\"File or Directory does not exist!\")\n print(\"\")\n sys.exit(1)\n\n if sys.argv[1] == \"decode\":\n file = Codingfile(sys.argv[2])\n pwd = enter_pwd('first')\n pwd2 = enter_pwd('second')\n if file.check_key(pwd, pwd2):\n file.file_decoding(pwd, pwd2)\n sys.exit(0)\n else:\n print(\"\")\n print(\"Wrong Key Entered!\")\n print(\"\")\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()","repo_name":"Shuai-Zuo/Trash_Codes_Archives","sub_path":"Exact time unknown/Python/encrypt/decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11609605466","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport datetime\n\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('recipes', '0007_recipe_creation_date'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='recipe',\n name='html_content',\n field=models.TextField(default=datetime.datetime(2015, 9, 2, 17, 41, 32, 387563, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='recipe',\n name='tags',\n field=models.ManyToManyField(blank=True, related_name='recipes', to='recipes.Tag'),\n preserve_default=True,\n ),\n ]\n","repo_name":"frecar/recipemaster","sub_path":"recipemaster/recipes/migrations/0008_auto_20150902_1941.py","file_name":"0008_auto_20150902_1941.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"74584340624","text":"from __future__ import absolute_import\nimport os\nfrom digits.utils import subclass, override\nfrom digits.status import Status\nfrom digits.pretrained_model.tasks import UploadPretrainedModelTask\n\n\n@subclass\nclass CaffeUploadTask(UploadPretrainedModelTask):\n\n def __init__(self, **kwargs):\n super(CaffeUploadTask, self).__init__(**kwargs)\n\n @override\n def name(self):\n return 'Upload Pretrained Caffe Model'\n\n @override\n def get_model_def_path(self):\n \"\"\"\n Get path to model definition\n \"\"\"\n return os.path.join(self.job_dir, \"original.prototxt\")\n\n @override\n def get_weights_path(self):\n \"\"\"\n Get path to model weights\n \"\"\"\n return os.path.join(self.job_dir, \"model.caffemodel\")\n\n @override\n def __setstate__(self, state):\n super(CaffeUploadTask, self).__setstate__(state)\n\n @override\n def run(self, resources):\n\n self.move_file(self.weights_path, \"model.caffemodel\")\n self.move_file(self.model_def_path, \"original.prototxt\")\n\n if self.labels_path is not None:\n self.move_file(self.labels_path, \"labels.txt\")\n tmp_dir = os.path.dirname(self.weights_path)\n python_layer_file_name = 'digits_python_layers.py'\n if os.path.exists(os.path.join(tmp_dir, python_layer_file_name)):\n self.move_file(os.path.join(tmp_dir, python_layer_file_name), python_layer_file_name)\n elif os.path.exists(os.path.join(tmp_dir, python_layer_file_name + 'c')):\n self.move_file(os.path.join(tmp_dir, python_layer_file_name + 'c'), python_layer_file_name + 'c')\n\n self.status = Status.DONE\n","repo_name":"NVIDIA/DIGITS","sub_path":"digits/pretrained_model/tasks/caffe_upload.py","file_name":"caffe_upload.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":4106,"dataset":"github-code","pt":"48"} +{"seq_id":"40098507687","text":"n=int(input())\nif(n<=3):\n x=list(map(int,input().split(\" \")))\n x.append(0)\n x.append(0)\n count=0\n for i in range(0,len(x)-2):\n if(x[i] bool:\n if not actor_name.startswith('GameRomHorse'):\n return False\n if 'Saddle' in actor_name or 'Reins' in actor_name:\n return False\n return True\n\ndump_to_csv(Path(sys.argv[1]), predicate, PROPERTIES, sys.stdout)\n","repo_name":"leoetlino/botw-re-notes","sub_path":"tools/horses.py","file_name":"horses.py","file_ext":"py","file_size_in_byte":2210,"program_lang":"python","lang":"en","doc_type":"code","stars":107,"dataset":"github-code","pt":"48"} +{"seq_id":"33847402228","text":"import os, shutil\n\ndef ensure_mkdir(path):\n '''Conflict free mkdir.'''\n try:\n os.makedirs(path)\n except OSError:\n if not os.path.isdir(path):\n raise\n\ndef copy_without_overwrite(src, dest, quiet=True):\n # Open the file and dont do anything if it exists\n try:\n fd = os.open(dest, os.O_CREAT | os.O_EXCL | os.O_WRONLY)\n except OSError:\n if os.path.isfile(dest) and quiet:\n return\n else: raise\n\n # Copy the file and automatically close files at the end\n with os.fdopen(fd,'w') as f:\n with open(src) as sf:\n shutil.copyfileobj(sf, f)\n shutil.copymode(src, dest)\n","repo_name":"FRESNA/vresutils","sub_path":"vresutils/file_io_helper.py","file_name":"file_io_helper.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"48"} +{"seq_id":"20852122662","text":"import Queue\n\n\nclass Node:\n def __init__(self, val, ops):\n self.val = val\n self.ops = ops\n\n\ndef min_operations(x, y):\n queue = Queue.Queue()\n queue.put(Node(x, 0))\n\n while queue:\n curr = queue.get()\n\n if curr.val == y:\n return curr.ops\n\n if curr.val * 2 == y or curr.val - 1 == y:\n return curr.ops + 1\n\n queue.put(Node(curr.val * 2, curr.ops + 1))\n queue.put(Node(curr.val - 1, curr.ops + 1))\n return -1\n\n\nprint(min_operations(2, 5))\n","repo_name":"Shubhamrawat5/open-source-contribution","sub_path":"PYTHON/mininum_operations.py","file_name":"mininum_operations.py","file_ext":"py","file_size_in_byte":520,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"48"} +{"seq_id":"32302387421","text":"import json\n\nfrom moto.awslambda.exceptions import (\n PreconditionFailedException,\n UnknownPolicyException,\n)\nfrom moto.moto_api._internal import mock_random\nfrom typing import Any, Callable, Dict, List, Optional, TypeVar\n\n\nTYPE_IDENTITY = TypeVar(\"TYPE_IDENTITY\")\n\n\nclass Policy:\n def __init__(self, parent: Any): # Parent should be a LambdaFunction\n self.revision = str(mock_random.uuid4())\n self.statements: List[Dict[str, Any]] = []\n self.parent = parent\n\n def wire_format(self) -> str:\n p = self.get_policy()\n p[\"Policy\"] = json.dumps(p[\"Policy\"])\n return json.dumps(p)\n\n def get_policy(self) -> Dict[str, Any]:\n return {\n \"Policy\": {\n \"Version\": \"2012-10-17\",\n \"Id\": \"default\",\n \"Statement\": self.statements,\n },\n \"RevisionId\": self.revision,\n }\n\n # adds the raw JSON statement to the policy\n def add_statement(\n self, raw: str, qualifier: Optional[str] = None\n ) -> Dict[str, Any]:\n policy = json.loads(raw, object_hook=self.decode_policy)\n if len(policy.revision) > 0 and self.revision != policy.revision:\n raise PreconditionFailedException(\n \"The RevisionId provided does not match the latest RevisionId\"\n \" for the Lambda function or alias. Call the GetFunction or the GetAlias API to retrieve\"\n \" the latest RevisionId for your resource.\"\n )\n # Remove #LATEST from the Resource (Lambda ARN)\n if policy.statements[0].get(\"Resource\", \"\").endswith(\"$LATEST\"):\n policy.statements[0][\"Resource\"] = policy.statements[0][\"Resource\"][0:-8]\n if qualifier:\n policy.statements[0][\"Resource\"] = (\n policy.statements[0][\"Resource\"] + \":\" + qualifier\n )\n self.statements.append(policy.statements[0])\n self.revision = str(mock_random.uuid4())\n return policy.statements[0]\n\n # removes the statement that matches 'sid' from the policy\n def del_statement(self, sid: str, revision: str = \"\") -> None:\n if len(revision) > 0 and self.revision != revision:\n raise PreconditionFailedException(\n \"The RevisionId provided does not match the latest RevisionId\"\n \" for the Lambda function or alias. Call the GetFunction or the GetAlias API to retrieve\"\n \" the latest RevisionId for your resource.\"\n )\n for statement in self.statements:\n if \"Sid\" in statement and statement[\"Sid\"] == sid:\n self.statements.remove(statement)\n break\n else:\n raise UnknownPolicyException()\n\n # converts AddPermission request to PolicyStatement\n # https://docs.aws.amazon.com/lambda/latest/dg/API_AddPermission.html\n def decode_policy(self, obj: Dict[str, Any]) -> \"Policy\":\n # import pydevd\n # pydevd.settrace(\"localhost\", port=5678)\n policy = Policy(self.parent)\n policy.revision = obj.get(\"RevisionId\", \"\")\n\n # set some default values if these keys are not set\n self.ensure_set(obj, \"Effect\", \"Allow\")\n self.ensure_set(obj, \"Resource\", self.parent.function_arn + \":$LATEST\")\n self.ensure_set(obj, \"StatementId\", str(mock_random.uuid4()))\n\n # transform field names and values\n self.transform_property(obj, \"StatementId\", \"Sid\", self.nop_formatter)\n self.transform_property(obj, \"Principal\", \"Principal\", self.principal_formatter)\n\n self.transform_property(\n obj, \"SourceArn\", \"SourceArn\", self.source_arn_formatter\n )\n self.transform_property(\n obj, \"SourceAccount\", \"SourceAccount\", self.source_account_formatter\n )\n self.transform_property(\n obj, \"PrincipalOrgID\", \"Condition\", self.principal_org_id_formatter\n )\n\n # remove RevisionId and EventSourceToken if they are set\n self.remove_if_set(obj, [\"RevisionId\", \"EventSourceToken\"])\n\n # merge conditional statements into a single map under the Condition key\n self.condition_merge(obj)\n\n # append resulting statement to policy.statements\n policy.statements.append(obj)\n\n return policy\n\n def nop_formatter(self, obj: TYPE_IDENTITY) -> TYPE_IDENTITY:\n return obj\n\n def ensure_set(self, obj: Dict[str, Any], key: str, value: Any) -> None:\n if key not in obj:\n obj[key] = value\n\n def principal_formatter(self, obj: Dict[str, Any]) -> Dict[str, Any]:\n if isinstance(obj, str):\n if obj.endswith(\".amazonaws.com\"):\n return {\"Service\": obj}\n if obj.endswith(\":root\"):\n return {\"AWS\": obj}\n return obj\n\n def source_account_formatter(\n self, obj: TYPE_IDENTITY\n ) -> Dict[str, Dict[str, TYPE_IDENTITY]]:\n return {\"StringEquals\": {\"AWS:SourceAccount\": obj}}\n\n def source_arn_formatter(\n self, obj: TYPE_IDENTITY\n ) -> Dict[str, Dict[str, TYPE_IDENTITY]]:\n return {\"ArnLike\": {\"AWS:SourceArn\": obj}}\n\n def principal_org_id_formatter(\n self, obj: TYPE_IDENTITY\n ) -> Dict[str, Dict[str, TYPE_IDENTITY]]:\n return {\"StringEquals\": {\"aws:PrincipalOrgID\": obj}}\n\n def transform_property(\n self,\n obj: Dict[str, Any],\n old_name: str,\n new_name: str,\n formatter: Callable[..., Any],\n ) -> None:\n if old_name in obj:\n obj[new_name] = formatter(obj[old_name])\n if new_name != old_name:\n del obj[old_name]\n\n def remove_if_set(self, obj: Dict[str, Any], keys: List[str]) -> None:\n for key in keys:\n if key in obj:\n del obj[key]\n\n def condition_merge(self, obj: Dict[str, Any]) -> None:\n if \"SourceArn\" in obj:\n if \"Condition\" not in obj:\n obj[\"Condition\"] = {}\n obj[\"Condition\"].update(obj[\"SourceArn\"])\n del obj[\"SourceArn\"]\n\n if \"SourceAccount\" in obj:\n if \"Condition\" not in obj:\n obj[\"Condition\"] = {}\n obj[\"Condition\"].update(obj[\"SourceAccount\"])\n del obj[\"SourceAccount\"]\n","repo_name":"getmoto/moto","sub_path":"moto/awslambda/policy.py","file_name":"policy.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","stars":7174,"dataset":"github-code","pt":"48"} +{"seq_id":"5629643923","text":"import baseTools\r\nimport chanerCases\r\ndef change_part_by_part(base, \r\n take_se_id, take_part_name, \r\n put_se_id, put_as_it, put_case='MIA',\r\n action = 'START'):\r\n take_part_list = []\r\n take_founded = False\r\n take_part = baseTools.take_se(base, take_se_id)\r\n for t in take_part:\r\n if t.startswith(take_part_name):\r\n take_part_list.append(t)\r\n take_founded = True\r\n\r\n if take_founded:\r\n put_founded = False\r\n putted_part_list = []\r\n \r\n put_by_part = baseTools.take_se(base, put_se_id)\r\n if action == 'IN':\r\n for p in put_by_part:\r\n if p.startswith(put_as_it):\r\n putted_part_list.append('HERE')\r\n put_founded = True\r\n else:\r\n putted_part_list.append(p)\r\n elif action == 'BEFORE':\r\n index_c = 0\r\n for p in put_by_part:\r\n if p.startswith(put_as_it):\r\n put_founded = True\r\n break\r\n index_c += 1\r\n index_p = 0\r\n for p in put_by_part:\r\n if index_p == index_c:\r\n putted_part_list.append('HERE')\r\n putted_part_list.append(p)\r\n index_p += 1\r\n elif action == 'AFTER':\r\n index_c = 0\r\n pp = None\r\n for p in put_by_part:\r\n if p.startswith(put_as_it):\r\n pp = p.split('_')[0]\r\n if pp != None and not p.startwith(pp):\r\n put_founded = True\r\n break\r\n index_c += 1\r\n index_p = 0\r\n for p in put_by_part:\r\n putted_part_list.append(p)\r\n if index_p == index_c:\r\n putted_part_list.append('HERE')\r\n index_p += 1\r\n elif action == 'START':\r\n putted_part_list = ['HERE'] + put_by_part\r\n put_founded = True\r\n elif action == 'END':\r\n putted_part_list = put_by_part + ['HERE']\r\n put_founded = True\r\n if put_founded:\r\n take_words_list = []\r\n for w in take_part_list:\r\n take_words_list.append(baseTools.take_word(base, w))\r\n chcange_take_list = []\r\n for c in take_words_list:\r\n if chanerCases.if_case(c):\r\n cc = chanerCases.change_cases(c, 'BACK', 'YES', put_case)[0]\r\n base = baseTools.add_word(base, put_as_it, cc)\r\n nc = baseTools.take_id(base, put_as_it, cc)\r\n chcange_take_list.append(nc)\r\n else:\r\n base = baseTools.add_word(base, put_as_it, c)\r\n tc = baseTools.take_id(base, put_as_it, c)\r\n chcange_take_list.append(tc)\r\n\r\n final_put_list = []\r\n for f in putted_part_list:\r\n if f == 'HERE':\r\n for d in chcange_take_list:\r\n final_put_list.append(d)\r\n else:\r\n final_put_list.append(f)\r\n\r\n for r in put_by_part:\r\n base = baseTools.remove_sentens(base, r, put_se_id)\r\n for n in final_put_list:\r\n base = baseTools.add_se(base, n, put_se_id)\r\n # print('tutaj')\r\n # print(final_put_list)\r\n base = baseTools.join_ids_list(base, final_put_list)\r\n base = baseTools.update_se(base, put_se_id ,final_put_list)\r\n return base\r\n else:\r\n print('Wystąpił błąd')\r\n return base\r\n else:\r\n return base\r\nif __name__ == '__main__':\r\n import awareness\r\n base = awareness.take_base('memory_CLO_v2010')\r\n take_se_id = 'SE_150'\r\n put_se_id = 'SE_1200'\r\n change_part_by_part(base, \r\n take_se_id, 'PO', \r\n put_se_id, 'DO', put_case='DOP',\r\n action = 'START')","repo_name":"amnezja3/awareness","sub_path":"changePartByPart.py","file_name":"changePartByPart.py","file_ext":"py","file_size_in_byte":4091,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"16825749351","text":"from socket import *\n\nHOST = '192.168.199.231'\nPORT = 21565\nBUFSIZE = 1024\nADDR = (HOST,PORT)\n\ntcpCliSocket = socket(AF_INET,SOCK_STREAM)\ntcpCliSocket.connect(ADDR)\n\nwhile True:\n\tdata = input('> ')\n\tif not data:\n\t\tbreak\n\ttcpCliSocket.send(bytes(data,'utf-8'))\n\tdata = tcpCliSocket.recv(BUFSIZE)\n\tif not data:\n\t\tbreak\n\tprint(data.decode('utf-8'))\n\t\ntcpCliSocket.close()\n","repo_name":"Unrealplace/Python","sub_path":"tcpClient.py","file_name":"tcpClient.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"32034452665","text":"from . import panda\n\nimport argparse\n\n\ndef cli():\n parser = argparse.ArgumentParser(prog=\"walking_panda\")\n\n # option for suppressing rotation\n parser.add_argument(\"--no-rotate\",\n action=\"store_true\",\n help=\"Suppress Rotation\")\n\n # option for allowing the program to change speed rotation\n parser.add_argument(\"--rot-speed\",\n type=float,\n action=\"store\",\n default=1.0,\n help=\"Set rotation to anti-clockwise\")\n\n # option for top view orientation\n parser.add_argument(\"--top-view\",\n action=\"store_true\",\n help=\"Set camera orientation to top view\")\n\n # option for scaling the panda\n parser.add_argument(\"--scale\",\n type=float,\n action=\"store\",\n default=1.0,\n help=\"Scale panda size by a factor of SCALE\")\n\n # option for setting the default size of the panda\n parser.add_argument(\"--size\",\n type=float,\n action=\"store\",\n default=0.005,\n help=\"Set the default size(0.005) of panda to SIZE\")\n\n # option for window without the panda actor\n parser.add_argument(\"--no-panda\",\n action=\"store_true\",\n help=\"Disable panda graphic\")\n\n args = parser.parse_args()\n\n # initialise with the __dict attribute__ of args\n walking = panda.WalkingPanda(**vars(args))\n walking.run()\n\n","repo_name":"lestherll/csc1034-p1","sub_path":"walking_panda/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26889001752","text":"import math\n\ndef getAscii(letter):\n return ord(letter)\n\ndef asciiToLetter(number):\n return chr(number)\n\ndef decimalToBinary(num):\n binaryNum = []\n while num != 0:\n testNum = str(num % 2)\n binaryNum.insert(0, testNum)\n num = math.floor(num / 2)\n while len(binaryNum) != 8:\n binaryNum.insert(0, \"0\")\n return ''.join(binaryNum)\n\ndef convertLetterToBinary(letter):\n letter = getAscii(letter)\n letter = decimalToBinary(letter)\n return letter\n\ndef compare(num1, num2):\n xor = []\n for i in range(len(num1)):\n if num1[i] != num2[i]:\n xor.append(\"1\")\n else:\n xor.append(\"0\")\n return ''.join(xor)\n\ndef binaryToDecimal(binaryNum):\n decimalNumber = 0\n factor = 1\n for i in range(len(binaryNum) - 1, 0, -1):\n decimalNumber += int(binaryNum[i]) * factor\n factor *= 2\n return decimalNumber\n\ndef stringToXor(string, key):\n output = []\n keyBinary = decimalToBinary(key)\n for letter in string:\n letterBinary = decimalToBinary(getAscii(letter))\n xor = compare(keyBinary, letterBinary)\n decimalXor = binaryToDecimal(xor)\n output.append(decimalXor)\n return output\n\ncode = \"92113120120123526712310212011253\"\n\ndef decrypt(code, key):\n key = decimalToBinary(key)\n translatedWord = []\n for number in code:\n bin = decimalToBinary(number)\n xor = compare(key, bin)\n decimal = binaryToDecimal(xor)\n letter = asciiToLetter(decimal)\n if ord(letter) < 32:\n return\n translatedWord.append(letter)\n return ''.join(translatedWord)\n\n# for i in range(255):\n# print(i)\ndef guess(code):\n for i in range(128):\n decrypted = decrypt(code, i)\n if decrypted != None:\n print(\"Key: \" + str(i) + \" \" + decrypted)\n\n\ndef splitStrings(code):\n totalStrings = []\n testString = []\n doTheSplit(code, totalStrings, testString)\n return totalStrings\n\ndef doTheSplit(code, totalStrings, testString):\n if len(code) == 0:\n totalStrings.append(testString)\n return\n for i in range(0, min(3, len(code))):\n newTestString = testString.copy()\n newCode = int(code[0:i+1])\n if newCode < 128:\n newTestString.append(int(code[0:i+1]))\n doTheSplit(code[i+1:], totalStrings, newTestString)\n\nstrings = splitStrings(\"144373738105\")\nfor string in strings:\n guess(string)\n","repo_name":"Halfwai/computerSecurity","sub_path":"xor.py","file_name":"xor.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3822633735","text":"def is_low_point(map: list[list[int]], x: int, y: int) -> bool:\n m, n = len(map), len(map[0])\n val = map[x][y]\n for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n x1, y1 = x + dx, y + dy\n if x1 >= 0 and x1 < m and y1 >= 0 and y1 < n and val >= map[x1][y1]:\n return False\n return True\n\nmap = []\nwith open('input-day9.txt') as file:\n for line in file:\n line = line.rstrip()\n map.append([int(c) for c in line])\ntotal_risk = 0\nfor x in range(len(map)):\n for y in range(len(map[0])):\n if is_low_point(map, x, y):\n total_risk += map[x][y] + 1\nprint(total_risk)","repo_name":"kenleung5e28/advent-of-code-2021","sub_path":"day9-1.py","file_name":"day9-1.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9419275569","text":"\"\"\"\r\nTom Lev\r\n22/10/22\r\n\"\"\"\r\nimport socket\r\nimport hashlib\r\nimport os\r\nimport threading\r\nimport logging\r\n\r\nIP = '127.0.0.1'\r\nPORT = 8820\r\nLEN = 8\r\nthreads = []\r\nfree_cpus = os.cpu_count()\r\ntotal_cpu = os.cpu_count()\r\nANSWER = '0'\r\n\r\nstarts = 0\r\nmid = 0\r\nends = 0\r\nplus = 1 + starts\r\n\r\n\r\ndef main():\r\n \"\"\"\r\n gets the data from the server and starts threads with the md5 function ,\r\n returns with the 'ANSWER' to the server and requests more numbers range if the number not fount yet.\r\n :return: None\r\n \"\"\"\r\n my_socket = socket.socket()\r\n my_socket.connect((IP, PORT))\r\n try:\r\n while True:\r\n my_socket.send(\"ready\".encode())\r\n data = my_socket.recv(1024).decode()\r\n logging.debug(\"The server sent \" + data)\r\n if \"FOUND\" not in data:\r\n global free_cpus, plus, ends, mid, starts, total_cpu\r\n while True:\r\n start = data.split(\"|\")[0]\r\n end = data.split(\"|\")[1]\r\n msg = data.split(\"|\")[2]\r\n while free_cpus > 0:\r\n lis = give_range(int(start.split(\".\")[0]), int(end.split(\".\")[0]))\r\n st = lis[0]\r\n mi = lis[1]\r\n thread = threading.Thread(target=md5, args=(str(st), str(mi), msg,))\r\n free_cpus -= 1\r\n thread.start()\r\n threads.append(thread)\r\n logging.debug(f\"starting thread number {total_cpu - free_cpus}..\")\r\n my_socket.send(ANSWER.encode())\r\n starts = 0\r\n mid = 0\r\n ends = 0\r\n plus = 1 + starts\r\n break\r\n if free_cpus == 0:\r\n logging.debug(\"waits for the threads to end..\")\r\n for thread in threads:\r\n thread.join()\r\n free_cpus = total_cpu\r\n break\r\n\r\n else:\r\n logging.info(\"found the message!\\n disconnecting...\")\r\n my_socket.close()\r\n exit()\r\n except socket.error as er:\r\n logging.error(str(er))\r\n logging.info(\"disconnecting...\")\r\n exit()\r\n finally:\r\n my_socket.close()\r\n exit()\r\n\r\n\r\ndef md5(sta, end, msg):\r\n \"\"\"\r\n the md5 action.\r\n :param sta: int\r\n :param end: int\r\n :param msg: str\r\n :return: None\r\n \"\"\"\r\n global ANSWER\r\n for i in range(int(sta.split(\".\")[0]), int(end.split(\".\")[0])):\r\n encrypted_msg = hashlib.md5(str(i).zfill(LEN).encode()).hexdigest()\r\n if encrypted_msg == str(msg):\r\n logging.info(\"FOUND THE MESSAGE!\")\r\n ANSWER = str(i)\r\n\r\n\r\ndef give_range(start, end):\r\n \"\"\"\r\n gives range to the client threads.\r\n :param start: int\r\n :param end: int\r\n :return: start and mid\r\n \"\"\"\r\n global starts\r\n global ends\r\n global plus\r\n global mid\r\n mid = start\r\n if plus > starts:\r\n plus = end / total_cpu\r\n starts = start\r\n ends = end\r\n mid += plus\r\n return start, mid\r\n\r\n\r\nif __name__ == '__main__':\r\n logging.basicConfig(filename=\"md5_client_log.txt\", encoding='utf-8', level=logging.DEBUG)\r\n main()\r\n","repo_name":"TomLev6/school","sub_path":"md5/client1.py","file_name":"client1.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9327646104","text":"import requests\nfrom .datamodels.quote import Quote\nimport json\n\nbase_url = 'https://api.iextrading.com/1.0'\n\ndef get_stock(symbol):\n '''\n Gets Quote for given stock ticker\n\n TAKES:\n symbols: string of stock ticker eg. (AAPL, FB)\n\n RETURNS:\n Quote: quote object of json response. If error exists, quote object will contain error message\n '''\n\n api_url = '/stock/{}/quote'.format(symbol)\n url = base_url + api_url\n\n response = requests.get(url)\n if response.text == \"Unknown symbol\":\n quote = Quote()\n quote.error = \"Unknown symbol\"\n return quote\n \n try:\n quote_dic = response.json()\n except json.decoder.JSONDecodeError:\n quote = Quote()\n quote.error = \"invalid json\"\n return quote\n \n\n quote = Quote(quote_dic)\n return quote\n ","repo_name":"djlad/TTP-FS","sub_path":"server/app/services/get_stock.py","file_name":"get_stock.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"537180791","text":"import subprocess\nimport time\nimport ROOT as r\n\ndef read_threshold_file(dirName) :\n f = open('data/%s/threshold.txt'%dirName, 'r')\n threshold = []\n for l in f :\n threshold.append(float(l))\n return threshold\n\ndef run_time_scan(args, dirName, threshold) :\n script = 'scan_script/sbit_time_scan.py'\n arg = '%d -time %d -interval %d -total %d'%(args.nOH, args.timeWindow, args.interval, args.totalTime)\n\n arg += ' -thr'\n for thr in thresholds : arg += ' %d'%thr\n \n cmd = 'bash -c -l \"python -u - %s\"'%arg\n \n host = 'gempro@gem-shelf01-amc02'\n log = file('%s/sbit_time_scan.log'%(dirName), 'w')\n err = file('%s/sbit_time_scan.err'%(dirName), 'w')\n infile = file(script, 'r')\n subprocess.call(['ssh', host, cmd], stdin=infile, stdout=log, stderr=err)\n \n log.close()\n err.close()\n infile.close()\n\ndef process_scan(dirName) :\n log = file('%s/sbit_time_scan.log'%dirName, 'r')\n histInfo = {}\n for l in log :\n source = l.replace(' ', '').rstrip().split(';')\n if source[0] == 'time' :\n t = float(source[1])\n histInfo[t] = [0 for i in range(24)]\n elif source[0] == 'vfatN' :\n vfatN = int(source[1])\n rate = float(source[2])\n histInfo[t][vfatN] = rate\n \n h = [ r.TH1D('sbit_time_vfat_%d'%i, 'vfat %d;Time;Rate [Hz]'%i, len(histInfo.keys()), 0, len(histInfo.keys())) for i in range(24) ]\n sortedKey = sorted(histInfo.keys())\n for i, t in enumerate(sortedKey) :\n rates = histInfo[t]\n loctime = time.localtime(t)\n label = '%02d:%02d:%02d'%(loctime.tm_hour, loctime.tm_min, loctime.tm_sec)\n xBin = h[0].GetXaxis().FindBin(i)\n for i in range(24) :\n h[i].GetXaxis().SetBinLabel(xBin, label)\n h[i].SetBinContent(xBin, rates[i])\n \n fOut = r.TFile('%s/sbit_time_scan.root'%dirName, 'recreate')\n for i in range(24) :\n h[i].Write()\n fOut.Close()\n\nif __name__ == '__main__' :\n import argparse\n current = time.localtime()\n dirName = 'data/time_scan/%d.%02d.%02d.%02d.%02d'%(current.tm_year, current.tm_mon, current.tm_mday, current.tm_hour, current.tm_min)\n subprocess.call(['mkdir', '-p', dirName])\n \n parser = argparse.ArgumentParser(description='SBit Rate Scans as function of time in fixed threshold value')\n parser.add_argument('nOH', type=int, default = 0, help='number of OH to scan')\n parser.add_argument('--timeWindow', '-time', type=int, help='Time window for SBit counting(seconds)')\n parser.add_argument('--interval', '-interval', type=int, default = 10, help='Time interval for next data taking (second)')\n parser.add_argument('--totalTime', '-total', type=int, default = 60, help='Total data taking time (miniute)')\n parser.add_argument('--thresholds', '-thr', default = None, help='Date for threshold setup')\n args = parser.parse_args()\n\n print (\"Reading treshold file in data/tresh_scan/%s\")\n read_threshold_file(args.thresholds)\n print (\"START scanning\")\n print (\"The scan will be finished in %d minutes\"%args.totol)\n run_time_scan(args, dirName)\n print (\"Scan has finished\")\n print (\"START processing the scan result\")\n process_scan(dirName)\n print (\"END the processing\")\n print (\"The result has saved in %s/sbit_time_scan.root\")\n exit()\n","repo_name":"yeckang/NoiseMonitoring","sub_path":"run_time_scan.py","file_name":"run_time_scan.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1015091424","text":"import os\nimport time\nimport collections\n\n\ndef FindFile(aDirs, aFiles, aSubDir = False):\n Result = []\n\n for Dir in aDirs:\n for Root, Folders, Files in os.walk(Dir, followlinks = True):\n for File1 in aFiles:\n for File2 in Files:\n if (File1 and File1 in File2):\n Result.append(Root + '/' + File2)\n if (not aSubDir):\n break\n return Result\n\n\ndef IsFileWrite(aPath: str) -> bool:\n if (os.path.exists(aPath)):\n Result = os.access(aPath, os.W_OK)\n else:\n try:\n with open(aPath, 'w+') as File:\n os.unlink(File.name)\n Result = True\n except:\n Result = False\n return Result\n\n\ndef FileExists(aPath: str) -> bool:\n return os.path.exists(aPath)\n\ndef LoadFromFile(aFileName: str, aMode = 'rb') -> bytes:\n Result = None\n if (os.path.isfile(aFileName)):\n with open(aFileName, aMode) as File:\n Result = File.read()\n return Result\n\ndef LoadFromFileToStr(aFileName: str) -> str:\n return LoadFromFile(aFileName, 'r')\n\ndef LoadFromFileToList(aName: str) -> list:\n Result = []\n if (os.path.isfile(aName)):\n with open(aName, 'r') as File:\n Result = File.readlines()\n return Result\n\ndef SaveToFileFromList(aName, aList):\n with open(aName, 'w') as File:\n for Line in aList:\n File.write(Line)\n\ndef SplitName(aFullPath):\n Root, Name = os.path.split(aFullPath)\n Base, Ext = os.path.splitext(Name)\n\n Obj = collections.namedtuple('FilePart', ['Orig', 'Root', 'Name', 'Base', 'Ext'])\n return Obj(Orig=aFullPath, Root=Root, Name=Name, Base=Base, Ext=Ext)\n\ndef ChangeExt(aPath, aExt):\n return os.path.splitext(aPath)[0] + aExt\n\ndef GetCTime(aFile):\n Result = time.gmtime(os.path.getctime(aFile))\n #time.strftime('%Y-%m-%d', Result)\n return Result\n\ndef GetFileSize(aFile):\n return int(os.path.getsize(aFile))\n","repo_name":"VladVons/py-relay","sub_path":"src/Inc/Util/UFS.py","file_name":"UFS.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"15346230521","text":"#!/usr/bin/python3\n\"\"\"\nDisplays the value of the X-Request-Id header variable from a request\nto a given URL.\n\nUsage: ./1-hbtn_header.py \n\"\"\"\nfrom urllib.request import Request, urlopen\nfrom sys import argv\n\n\ndef main():\n \"\"\"\n Displays the value of the X-Request-Id header variable\n from a request to a given URL.\n \"\"\"\n url = argv[1]\n req = Request(url)\n with urlopen(req) as res:\n x_request_id = res.info().get(\"x-request-id\")\n print(x_request_id)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"emmy3000/alx-higher_level_programming","sub_path":"0x11-python-network_1/1-hbtn_header.py","file_name":"1-hbtn_header.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73977784146","text":"import pygame\r\n# constantes para eventos, teclas y botones del mouse.\r\nfrom pygame.locals import MOUSEBUTTONDOWN,MOUSEBUTTONUP, QUIT,K_ESCAPE, KEYDOWN, K_p\r\nimport numpy as np\r\nimport time\r\nimport random\r\n\r\n# colores\r\nNegro = (0, 0, 0)\r\nBlanco = (255,255,255)\r\n#color del tablero\r\nColorTablero = (0,51,25)\r\nsp= 32\r\nalt=589\r\n\r\nclass Punto(pygame.sprite.Sprite):\r\n def __init__(self, array_indexes, location, size, color):\r\n super(Punto, self).__init__()\r\n self.surf = pygame.Surface(size)\r\n self.surf.fill(color)\r\n self.location = location\r\n self.array_indexes = array_indexes\r\n self.occupied = False\r\n self.color = None\r\n\r\n\r\ndef obtenerVecinos(y, x, board_shape): \r\n vecinos = list()\r\n\r\n if y > 0:\r\n vecinos.append((y - 1, x))\r\n if y < board_shape[0] - 1:\r\n vecinos.append((y + 1, x))\r\n if x > 0:\r\n vecinos.append((y, x - 1))\r\n if x < board_shape[1] - 1:\r\n vecinos.append((y, x + 1))\r\n\r\n \r\n return vecinos\r\n\r\ndef colisionSprite(posicion_sprite, posicion_click):\r\n sprite_y, sprite_x = posicion_sprite\r\n click_y, click_x = posicion_click\r\n\r\n if sprite_y - 10 < click_y < sprite_y + 10:\r\n if sprite_x - 10 < click_x < sprite_x + 10:\r\n return True\r\n\r\n return False\r\n\r\n\r\nclass Principal:\r\n def __init__(self):\r\n self.locations = None\r\n self.visited = None\r\n self.empty_colors = None\r\n self.empty_counts = None\r\n self.empty_groups = None\r\n self.gameover = None\r\n self.passed_in_a_row = None\r\n self.komi = None\r\n self.turno_blanco = None\r\n self.turno = None\r\n self.screen = None\r\n self.sprite_array = None\r\n self.sprites = None\r\n def init(self, komi=2.5):\r\n \r\n pygame.init()\r\n #Dimensiones de la pantalla\r\n anchoDePantalla = 620\r\n altoDePantalla = 690\r\n\r\n # Asignamos un objeto de Sprites\r\n self.sprites = pygame.sprite.Group()\r\n \r\n\r\n self.sprite_array = [[0 for _ in range(19)] for _ in range(19)]\r\n \r\n self.screen = pygame.display.set_mode((anchoDePantalla, altoDePantalla))\r\n\r\n #La siguiente linea indica el turno de cada jugador\r\n pygame.display.set_caption('GO Chino! | Jugando')\r\n \r\n\r\n # contador de turnos\r\n self.turno = 0\r\n # turno jugador blanco\r\n self.turno_blanco = False\r\n # Ventaja del jugador que comienza segundo\r\n self.komi = komi\r\n self.passed_in_a_row = 0\r\n self.gameover = False\r\n\r\n\r\n def iniciarVSbot(self):\r\n clock = pygame.time.Clock()\r\n fps = 30\r\n # Generamos las ubicaciones de los Sprites\r\n self.ubicacionSprites()\r\n # Ubicamos los Sprites\r\n self.ubicarSprites()\r\n ejecutando = True\r\n \r\n \r\n ubicacionTurno = self.screen.get_width() - 500\r\n \r\n # Creamos el fondo de la pantalla\r\n self.screen.fill(ColorTablero)\r\n \r\n self.dibujarTablero()\r\n \r\n bot = 0\r\n cont = 0\r\n contbot= 0\r\n while ejecutando:\r\n clock.tick(fps)\r\n \r\n if self.gameover:\r\n ejecutando = False\r\n if self.calcularQuienGano() == 'White':\r\n # llamar a pantalla de ganador con ganador blanco\r\n self.ganador(\"blanco\")\r\n else:\r\n # llamar a pantalla de ganador con ganador negro\r\n self.ganador(\"negro\")\r\n\r\n if self.turno % 2 == 0:\r\n cont=0\r\n for event in pygame.event.get():\r\n\r\n self.dibujarSprites()\r\n\r\n if event.type == MOUSEBUTTONDOWN:\r\n \r\n pos = pygame.mouse.get_pos()\r\n\r\n \r\n clicked_sprites = [sprite for sprite in self.sprites if colisionSprite(sprite.location, pos)]\r\n \r\n \r\n if clicked_sprites:\r\n \r\n \r\n clicked_sprite = clicked_sprites[0]\r\n #verificar si el sprite clikeado no está ocupado.\r\n if not clicked_sprite.occupied:\r\n self.turno += 1\r\n # colorcirculo es negro si el numero es impar y blanco si es par\r\n colorCirculo = Negro if self.turno % 2 else Blanco\r\n\r\n \r\n x, y = clicked_sprite.location\r\n posicion = (x + 1, y)\r\n\r\n \r\n\r\n clicked_sprite.occupied = True\r\n clicked_sprite.color = colorCirculo\r\n\r\n # Envia a la funcion el Sprite clickeado\r\n self.capturePieces(*clicked_sprite.array_indexes)\r\n\r\n if not clicked_sprite.occupied:\r\n self.turno -= 1\r\n self.turno_blanco = True if not self.turno_blanco else False\r\n\r\n else:\r\n self.passed_in_a_row = 0\r\n fichas = self.calcularFichas()\r\n print(\"se calcula\")\r\n \r\n\r\n elif event.type == KEYDOWN:\r\n if event.key == K_ESCAPE:\r\n ejecutando = False\r\n\r\n elif event.key == K_p:\r\n player = 'White' if not self.turno % 2 else 'Black'\r\n self.pasarTurno()\r\n elif event.type == QUIT:\r\n ejecutando = False\r\n\r\n self.screen.fill(ColorTablero)\r\n \r\n self.dibujarTablero()\r\n self.dibujarSprites()\r\n \r\n contbot=0\r\n else:\r\n \r\n if contbot == 0:\r\n contbot = 1\r\n pygame.time.delay(1000)\r\n if self.passed_in_a_row==1:\r\n if self.calcularQuienGano()==\"White\":\r\n self.pasarTurno()\r\n\r\n\r\n listiña=[]\r\n self.screen.fill(ColorTablero)\r\n \r\n self.dibujarTablero()\r\n self.dibujarSprites()\r\n \r\n for sprite in self.sprites:\r\n if sprite.occupied and sprite.color == Negro:\r\n \r\n for loc in self.locations:\r\n if sprite.location==loc[1]:\r\n vecinos = obtenerVecinos(loc[0][1],loc[0][0], (19, 19))\r\n for vec in vecinos:\r\n spritiño = self.sprite_array[vec[0]][vec[1]]\r\n if not spritiño.occupied:\r\n listiña.append(spritiño.location)\r\n\r\n if 0 == 0:\r\n pos= random.choice(listiña)\r\n cont = cont + 1\r\n print(\"contador\")\r\n print(cont)\r\n if cont == 200:\r\n self.pasarTurno()\r\n \r\n clicked_sprites = [sprite for sprite in self.sprites if colisionSprite(sprite.location, pos)]\r\n # asegurarse de que se ha hecho clic en al menos un sprite\r\n if clicked_sprites:\r\n clicked_sprite = clicked_sprites[0]\r\n # verificar si el sprite clikeado no está ocupado.\r\n if not clicked_sprite.occupied:\r\n self.turno += 1\r\n # colorcirculo es negro si el numero es impar y blanco si es par\r\n colorCirculo = Negro if self.turno % 2 else Blanco\r\n\r\n # obtener las coordenadas x , y de la ubicación del sprite clikeado.\r\n x, y = clicked_sprite.location\r\n posicion = (x + 1, y)\r\n\r\n \r\n clicked_sprite.occupied = True\r\n clicked_sprite.color = colorCirculo\r\n\r\n # Envia a la funcion el Sprite clickeado\r\n self.capturePieces(*clicked_sprite.array_indexes)\r\n\r\n if not clicked_sprite.occupied:\r\n self.turno -= 1\r\n self.turno_blanco = True if not self.turno_blanco else False\r\n\r\n else:\r\n self.passed_in_a_row = 0\r\n\r\n fichas = self.calcularFichas()\r\n print(\"se calcula\")\r\n \r\n\r\n\r\n pygame.display.update()\r\n pygame.quit()\r\n\r\n\r\n def pasarTurno(self):\r\n self.passed_in_a_row += 1\r\n if self.passed_in_a_row == 2:\r\n self.FinPartida()\r\n return\r\n\r\n self.turno += 1\r\n self.turno_blanco = True if not self.turno_blanco else False\r\n\r\n jugador = 'NEGRO' if not self.turno % 2 else 'BLANCO'\r\n pygame.display.set_caption(f'Go Chino! | ')\r\n\r\n #llama a la pantalla de final del juego\r\n def FinPartida(self):\r\n jugadorGanador = self.calcularQuienGano()\r\n self.gameover = True\r\n\r\n #retortna blanco o negro\r\n def calcularQuienGano(self):\r\n white_score = self.komi\r\n black_score = 0\r\n\r\n white_on_board, black_on_board = self.encontrarPiezasEnTablero()\r\n white_surrounded, black_surrounded = self.calcularCasillasRodeadas()\r\n\r\n white_score += white_on_board\r\n black_score += black_on_board\r\n\r\n white_score += white_surrounded\r\n black_score += black_surrounded\r\n\r\n if white_score > black_score:\r\n return 'White'\r\n else:\r\n return 'Black'\r\n\r\n def calcularFichas(self):\r\n list=[]\r\n white_score = self.komi\r\n black_score = 0\r\n\r\n white_on_board, black_on_board = self.encontrarPiezasEnTablero()\r\n white_surrounded, black_surrounded = self.calcularCasillasRodeadas()\r\n\r\n white_score += white_on_board\r\n black_score += black_on_board\r\n\r\n white_score += white_surrounded\r\n black_score += black_surrounded\r\n list.append(white_score)\r\n list.append(black_score)\r\n return list\r\n\r\n def ganador(self, color):\r\n pygame.init()\r\n screen = pygame.display.set_mode((700, 600))\r\n \r\n if color == \"blanco\":\r\n background_image = pygame.image.load(\"img/ganaron_blanca.jpg\").convert()\r\n elif color == \"negro\":\r\n background_image = pygame.image.load(\"img/ganaron_negras.jpg\").convert()\r\n else:\r\n # Color no válido, salir sin mostrar imagen\r\n return\r\n\r\n background_image = pygame.transform.scale(background_image, (700, 600))\r\n\r\n running = True\r\n while running:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n \r\n\r\n screen.blit(background_image, (0, 0)) \r\n pygame.display.flip()\r\n \r\n\r\n def encontrarPiezasEnTablero(self):\r\n white_count = 0\r\n black_count = 0\r\n\r\n for row in self.sprite_array:\r\n for item in row:\r\n if not item.occupied:\r\n continue\r\n\r\n color = item.color\r\n\r\n if color == Blanco:\r\n white_count += 1\r\n else:\r\n black_count += 1\r\n\r\n return white_count, black_count\r\n\r\n #retorna espacio \"cercado\"\r\n def calcularCasillasRodeadas(self):\r\n white_count = 0\r\n black_count = 0\r\n\r\n self.empty_groups = []\r\n self.empty_counts = []\r\n self.empty_colors = []\r\n self.visited = []\r\n\r\n for y, row in enumerate(self.sprite_array):\r\n for x, sprite in enumerate(row):\r\n if sprite.occupied:\r\n continue\r\n\r\n self.encontrarCasillasVacias(y, x)\r\n\r\n for index in range(len(self.empty_colors)):\r\n empty_count = self.empty_counts[index]\r\n empty_colors = self.empty_colors[index]\r\n\r\n if Negro not in empty_colors and Blanco in empty_colors:\r\n white_count += empty_count\r\n if Blanco not in empty_colors and Negro in empty_colors:\r\n black_count += empty_count\r\n\r\n return white_count, black_count\r\n\r\n \r\n def encontrarCasillasVacias(self, y, x, agregando=False):\r\n if not agregando:\r\n self.empty_groups.append([])\r\n self.empty_counts.append(0)\r\n self.empty_colors.append([])\r\n\r\n vecinos =obtenerVecinos(y, x, (19, 19))\r\n vecinos.append((y, x))\r\n\r\n for location in vecinos:\r\n sprite = self.sprite_array[location[0]][location[1]]\r\n if sprite.occupied or sprite in self.visited:\r\n continue\r\n\r\n self.visited.append(sprite)\r\n self.empty_groups[-1].append(location)\r\n self.empty_counts[-1] += 1\r\n self.empty_colors[-1] += self.obtenerColoresNoVaciosDeVecinos(y, x)\r\n self.encontrarCasillasVacias(location[0], location[1], agregando=True)\r\n\r\n #devuelve el color de la ubicacion ocupada\r\n def obtenerColoresNoVaciosDeVecinos(self, y, x):\r\n colors = []\r\n\r\n vecinos = obtenerVecinos(y, x, (19, 19))\r\n for location in vecinos:\r\n sprite = self.sprite_array[location[0]][location[1]]\r\n if not sprite.occupied:\r\n continue\r\n colors.append(sprite.color)\r\n\r\n return colors\r\n\r\n \r\n def probarGrupo(self, board, opponent_board, y, x, current_group):\r\n\r\n pos = (y, x)\r\n\r\n if current_group[pos]:\r\n # las fichas ya testeads no son liberates\r\n return False\r\n\r\n #verifica si hay una ficha del rival en pos\r\n if opponent_board[pos]:\r\n current_group[pos] = True\r\n\r\n vecinos = obtenerVecinos(y, x, board.shape)\r\n\r\n\r\n for yn, xn in vecinos:\r\n has_liberties = self.probarGrupo(board, opponent_board, yn, xn, current_group)\r\n if has_liberties:\r\n return True\r\n return False\r\n\r\n return not board[pos]\r\n\r\n def capturePieces(self, y, x):\r\n \r\n # Tablero auxiliar blanco\r\n tablero_blanco = np.array(\r\n [[1.0 if item.color == Blanco and item.occupied else 0.0 for item in row] for row in self.sprite_array],\r\n dtype=int)\r\n # Tablero auxiliar negro\r\n tablero_negro = np.array(\r\n [[1.0 if item.color == Negro and item.occupied else 0.0 for item in row] for row in self.sprite_array],\r\n dtype=int)\r\n\r\n # cambiamos el turno\r\n turno_blanco = self.turno_blanco\r\n self.turno_blanco = True if not self.turno_blanco else False\r\n\r\n # Llamamos a la funcion enviandole los dos tableros\r\n tablero_resultante = self.fastCapturePieces(tablero_negro, tablero_blanco, turno_blanco, y, x)\r\n\r\n for index1, row in enumerate(tablero_resultante):\r\n for index2, item in enumerate(row):\r\n \r\n color = Blanco if item == 1 else Negro\r\n \r\n occupied = True if item != 0 else False\r\n\r\n \r\n self.sprite_array[index1][index2].occupied = occupied\r\n self.sprite_array[index1][index2].color = color\r\n\r\n def fastCapturePieces(self, black_board_, white_board_, turn_white, y, x):\r\n\r\n black_board, white_board = black_board_.copy(), white_board_.copy()\r\n\r\n \r\n vecinos = obtenerVecinos(y, x, black_board.shape)\r\n \r\n board = white_board if turn_white else black_board\r\n opponent_board = black_board if turn_white else white_board\r\n #crea otra copia del tablero del rival\r\n original_opponent_board = opponent_board.copy()\r\n\r\n # testear movimientos suicida\r\n original_pos = (y, x)\r\n\r\n original_pos = original_pos[::-1]\r\n\r\n #array 19x19 tipo booleano\r\n current_group = np.zeros((19, 19), dtype=bool)\r\n original_pos_has_liberties = self.probarGrupo(opponent_board, board, *original_pos, current_group)\r\n\r\n\r\n \r\n for pos in vecinos:\r\n \r\n pos = pos[::-1]\r\n\r\n if not opponent_board[pos]:\r\n continue\r\n #el código crea una matriz booleana de tamaño 19x19 llena de False\r\n current_group = np.zeros((19, 19), dtype=bool)\r\n has_liberties = self.probarGrupo(board, opponent_board, *pos, current_group)\r\n\r\n if not has_liberties:\r\n opponent_board[current_group] = False\r\n\r\n same = True\r\n break_out = False\r\n\r\n for row_index, row in enumerate(original_opponent_board):\r\n for item_index, item in enumerate(row):\r\n if opponent_board[row_index, item_index] != item:\r\n same = False\r\n break_out = True\r\n break\r\n if break_out:\r\n break\r\n\r\n out_board = [[i for i in range(19)] for v in range(19)]\r\n for i in range(19):\r\n for v in range(19):\r\n if white_board[i][v]:\r\n out_board[i][v] = 1\r\n elif black_board[i][v]:\r\n out_board[i][v] = -1\r\n else:\r\n out_board[i][v] = 0\r\n\r\n if same and not original_pos_has_liberties:\r\n out_board[original_pos[0]][original_pos[1]] = 0\r\n\r\n return out_board\r\n else:\r\n return out_board\r\n\r\n def ubicacionSprites(self):\r\n ubicaciones = []\r\n\r\n\r\n for y_index, y_pos in enumerate(range(10, alt, sp)):\r\n for x_index, x_pos in enumerate(range(10, alt, sp)):\r\n ubicaciones.append([[y_index, x_index], [y_pos, x_pos]])\r\n\r\n # se guarda la lista en la variable de clase\r\n self.locations = ubicaciones\r\n\r\n \r\n def ubicarSprites(self):\r\n # rastrear la fila y el índice del elemento en la matriz\r\n fila = 0\r\n item = 0\r\n\r\n # iterar a través de las ubicaciones generadas\r\n for location in self.locations:\r\n\r\n if item >= 19:\r\n fila += 1\r\n item = 0\r\n if fila > 18:\r\n break\r\n\r\n sprite = Punto(*location, (10, 10), (255, 32, 1))\r\n\r\n # el sprite recién creado se agrega al grupo de sprites\r\n self.sprites.add(sprite)\r\n\r\n # también se agrega a la matriz\r\n self.sprite_array[item][fila] = sprite\r\n\r\n\r\n # siguiente elemento\r\n item += 1\r\n\r\n # Metodo para Dibujar lineas del tablero\r\n def dibujarTablero(self):\r\n\r\n for y_pos in range(10, alt,sp):\r\n pygame.draw.line(self.screen, Negro, (10, y_pos), (alt, y_pos), width=2)\r\n for x_pos in range(10, alt, sp):\r\n pygame.draw.line(self.screen, Negro, (x_pos, 10), (x_pos, alt), width=2)\r\n\r\n #dibuja la ficha en el lugar seleccionado\r\n def dibujarSprites(self):\r\n\r\n for entity in self.sprites:\r\n if entity.occupied:\r\n x, y = entity.location\r\n loc = (x+1,y)\r\n pygame.draw.circle(self.screen, entity.color, loc, 10, 0)\r\n \r\n\r\n\r\n\r\n ","repo_name":"EmilianoMGomez/GoChino","sub_path":"Go_Chino/Clases/prueba_19x19_Bot.py","file_name":"prueba_19x19_Bot.py","file_ext":"py","file_size_in_byte":20147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43277504032","text":"from __future__ import print_function\nimport tensorflow as tf\nimport numpy as np\nimport sys\nimport edgeml_tf.utils as utils\n\n\nclass ProtoNNTrainer:\n def __init__(self, protoNNObj, regW, regB, regZ,\n sparcityW, sparcityB, sparcityZ,\n learningRate, X, Y, lossType='l2'):\n '''\n A wrapper for the various techniques used for training ProtoNN. This\n subsumes both the responsibility of loss graph construction and\n performing training. The original training routine that is part of the\n C++ implementation of EdgeML used iterative hard thresholding (IHT),\n gamma estimation through median heuristic and other tricks for\n training ProtoNN. This module implements the same in Tensorflow\n and python.\n\n protoNNObj: An instance of ProtoNN class defining the forward\n computation graph. The loss functions and training routines will be\n attached to this instance.\n regW, regB, regZ: Regularization constants for W, B, and\n Z matrices of protoNN.\n sparcityW, sparcityB, sparcityZ: Sparsity constraints\n for W, B and Z matrices. A value between 0 (exclusive) and 1\n (inclusive) is expected. A value of 1 indicates dense training.\n learningRate: Initial learning rate for ADAM optimizer.\n X, Y : Placeholders for data and labels.\n X [-1, featureDimension]\n Y [-1, num Labels]\n lossType: ['l2', 'xentropy']\n '''\n self.protoNNObj = protoNNObj\n self.__regW = regW\n self.__regB = regB\n self.__regZ = regZ\n self.__sW = sparcityW\n self.__sB = sparcityB\n self.__sZ = sparcityZ\n self.__lR = learningRate\n self.X = X\n self.Y = Y\n self.sparseTraining = True\n if (sparcityW == 1.0) and (sparcityB == 1.0) and (sparcityZ == 1.0):\n self.sparseTraining = False\n print(\"Sparse training disabled.\", file=sys.stderr)\n # Define placeholders for sparse training\n self.W_th = None\n self.B_th = None\n self.Z_th = None\n self.__lossType = lossType\n self.__validInit = False\n self.__validInit = self.__validateInit()\n self.__protoNNOut = protoNNObj(X, Y)\n self.loss = self.__lossGraph()\n self.trainStep = self.__trainGraph()\n self.__hthOp = self.__getHardThresholdOp()\n self.accuracy = protoNNObj.getAccuracyOp()\n\n def __validateInit(self):\n self.__validInit = False\n msg = \"Sparsity value should be between\"\n msg += \" 0 and 1 (both inclusive).\"\n assert self.__sW >= 0. and self.__sW <= 1., 'W:' + msg\n assert self.__sB >= 0. and self.__sB <= 1., 'B:' + msg\n assert self.__sZ >= 0. and self.__sZ <= 1., 'Z:' + msg\n d, dcap, m, L, _ = self.protoNNObj.getHyperParams()\n msg = 'Y should be of dimension [-1, num labels/classes]'\n msg += ' specified as part of ProtoNN object.'\n assert (len(self.Y.shape)) == 2, msg\n assert (self.Y.shape[1] == L), msg\n msg = 'X should be of dimension [-1, featureDimension]'\n msg += ' specified as part of ProtoNN object.'\n assert (len(self.X.shape) == 2), msg\n assert (self.X.shape[1] == d), msg\n self.__validInit = True\n msg = 'Values can be \\'l2\\', or \\'xentropy\\''\n if self.__lossType not in ['l2', 'xentropy']:\n raise ValueError(msg)\n return True\n\n def __lossGraph(self):\n pnnOut = self.__protoNNOut\n l1, l2, l3 = self.__regW, self.__regB, self.__regZ\n W, B, Z, _ = self.protoNNObj.getModelMatrices()\n if self.__lossType == 'l2':\n with tf.name_scope('protonn-l2-loss'):\n loss_0 = tf.nn.l2_loss(self.Y - pnnOut)\n reg = l1 * tf.nn.l2_loss(W) + l2 * tf.nn.l2_loss(B)\n reg += l3 * tf.nn.l2_loss(Z)\n loss = loss_0 + reg\n elif self.__lossType == 'xentropy':\n with tf.name_scope('protonn-xentropy-loss'):\n loss_0 = tf.nn.softmax_cross_entropy_with_logits_v2(logits=pnnOut,\n labels=tf.stop_gradient(self.Y))\n loss_0 = tf.reduce_mean(loss_0)\n reg = l1 * tf.nn.l2_loss(W) + l2 * tf.nn.l2_loss(B)\n reg += l3 * tf.nn.l2_loss(Z)\n loss = loss_0 + reg\n return loss\n\n def __trainGraph(self):\n with tf.name_scope('protonn-gradient-adam'):\n trainStep = tf.train.AdamOptimizer(self.__lR)\n trainStep = trainStep.minimize(self.loss)\n return trainStep\n\n def __getHardThresholdOp(self):\n W, B, Z, _ = self.protoNNObj.getModelMatrices()\n self.W_th = tf.placeholder(tf.float32, name='W_th')\n self.B_th = tf.placeholder(tf.float32, name='B_th')\n self.Z_th = tf.placeholder(tf.float32, name='Z_th')\n with tf.name_scope('hard-threshold-assignments'):\n # hard_thrsd_W = W.assign(self.W_th)\n # hard_thrsd_B = B.assign(self.B_th)\n # hard_thrsd_Z = Z.assign(self.Z_th)\n # Code changes for tf 1.11\n hard_thrsd_W = tf.assign(W, self.W_th)\n hard_thrsd_B = tf.assign(B, self.B_th)\n hard_thrsd_Z = tf.assign(Z, self.Z_th)\n hard_thrsd_op = tf.group(hard_thrsd_W, hard_thrsd_B, hard_thrsd_Z)\n return hard_thrsd_op\n\n def train(self, batchSize, totalEpochs, sess,\n x_train, x_val, y_train, y_val, noInit=False,\n redirFile=None, printStep=10, valStep=3):\n '''\n Performs dense training of ProtoNN followed by iterative hard\n thresholding to enforce sparsity constraints.\n\n batchSize: Batch size per update\n totalEpochs: The number of epochs to run training for. One epoch is\n defined as one pass over the entire training data.\n sess: The Tensorflow session to use for running various graph\n operators.\n x_train, x_val, y_train, y_val: The numpy array containing train and\n validation data. x data is assumed to in of shape [-1,\n featureDimension] while y should have shape [-1, numberLabels].\n noInit: By default, all the tensors of the computation graph are\n initialized at the start of the training session. Set noInit=False to\n disable this behaviour.\n printStep: Number of batches between echoing of loss and train accuracy.\n valStep: Number of epochs between evolutions on validation set.\n '''\n d, d_cap, m, L, gamma = self.protoNNObj.getHyperParams()\n assert batchSize >= 1, 'Batch size should be positive integer'\n assert totalEpochs >= 1, 'Total epochs should be positive integer'\n assert x_train.ndim == 2, 'Expected training data to be of rank 2'\n assert x_train.shape[1] == d, 'Expected x_train to be [-1, %d]' % d\n assert x_val.ndim == 2, 'Expected validation data to be of rank 2'\n assert x_val.shape[1] == d, 'Expected x_val to be [-1, %d]' % d\n assert y_train.ndim == 2, 'Expected training labels to be of rank 2'\n assert y_train.shape[1] == L, 'Expected y_train to be [-1, %d]' % L\n assert y_val.ndim == 2, 'Expected validation labels to be of rank 2'\n assert y_val.shape[1] == L, 'Expected y_val to be [-1, %d]' % L\n\n # Numpy will throw asserts for arrays\n if sess is None:\n raise ValueError('sess must be valid Tensorflow session.')\n\n trainNumBatches = int(np.ceil(len(x_train) / batchSize))\n valNumBatches = int(np.ceil(len(x_val) / batchSize))\n x_train_batches = np.array_split(x_train, trainNumBatches)\n y_train_batches = np.array_split(y_train, trainNumBatches)\n x_val_batches = np.array_split(x_val, valNumBatches)\n y_val_batches = np.array_split(y_val, valNumBatches)\n if not noInit:\n sess.run(tf.global_variables_initializer())\n X, Y = self.X, self.Y\n W, B, Z, _ = self.protoNNObj.getModelMatrices()\n for epoch in range(totalEpochs):\n for i in range(len(x_train_batches)):\n batch_x = x_train_batches[i]\n batch_y = y_train_batches[i]\n feed_dict = {\n X: batch_x,\n Y: batch_y\n }\n sess.run(self.trainStep, feed_dict=feed_dict)\n if i % printStep == 0:\n loss, acc = sess.run([self.loss, self.accuracy],\n feed_dict=feed_dict)\n msg = \"Epoch: %3d Batch: %3d\" % (epoch, i)\n msg += \" Loss: %3.5f Accuracy: %2.5f\" % (loss, acc)\n print(msg, file=redirFile)\n\n # Perform Hard thresholding\n if self.sparseTraining:\n W_, B_, Z_ = sess.run([W, B, Z])\n fd_thrsd = {\n self.W_th: utils.hardThreshold(W_, self.__sW),\n self.B_th: utils.hardThreshold(B_, self.__sB),\n self.Z_th: utils.hardThreshold(Z_, self.__sZ)\n }\n sess.run(self.__hthOp, feed_dict=fd_thrsd)\n\n if (epoch + 1) % valStep == 0:\n acc = 0.0\n loss = 0.0\n for j in range(len(x_val_batches)):\n batch_x = x_val_batches[j]\n batch_y = y_val_batches[j]\n feed_dict = {\n X: batch_x,\n Y: batch_y\n }\n acc_, loss_ = sess.run([self.accuracy, self.loss],\n feed_dict=feed_dict)\n acc += acc_\n loss += loss_\n acc /= len(y_val_batches)\n loss /= len(y_val_batches)\n print(\"Test Loss: %2.5f Accuracy: %2.5f\" % (loss, acc))\n\n","repo_name":"microsoft/EdgeML","sub_path":"tf/edgeml_tf/trainer/protoNNTrainer.py","file_name":"protoNNTrainer.py","file_ext":"py","file_size_in_byte":9972,"program_lang":"python","lang":"en","doc_type":"code","stars":1499,"dataset":"github-code","pt":"48"} +{"seq_id":"2906102252","text":"import pandas as pd\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plot\nfrom sklearn.metrics import accuracy_score, confusion_matrix, roc_curve\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import ensemble\nfrom sklearn.externals import joblib\n\n\ndef F1_score(confusion_max):\n precision = []\n recall = []\n F1 = []\n class_num = len(confusion_max)\n for i in range(class_num):\n temp_row = confusion_max[i]\n TP = temp_row[i]\n FN_sum = sum(temp_row)\n temp_column = confusion_max[:, i]\n FP_sum = sum(temp_column)\n pre = TP / max(FP_sum, 1)\n rec = TP / max(FN_sum, 1)\n f1 = (2 * pre * rec) / max((pre + rec), 1)\n F1.append(f1)\n precision.append(pre)\n recall.append(rec)\n print(\"F1\")\n print(F1)\n print(\"precision\")\n print(precision)\n print(\"recall\")\n print(recall)\n F_score = ((1 / len(F1)) * sum(F1)) ** 2\n return F_score\n\n\ndef decode(encode_list):\n final_re = []\n for i in encode_list:\n if i == 0:\n final_re.append(89950166)\n if i == 1:\n final_re.append(89950167)\n if i == 2:\n final_re.append(89950168)\n if i == 3:\n final_re.append(99999825)\n if i == 4:\n final_re.append(99999826)\n if i == 5:\n final_re.append(99999827)\n if i == 6:\n final_re.append(99999828)\n if i == 7:\n final_re.append(99999830)\n return final_re\n\n\ndef type_final(bin, pro):\n num_train = len(bin)\n final_type = []\n for i in range(num_train):\n one_hot = bin.iloc[i].tolist()\n temp_site = [j for j in range(0, 7) if one_hot[j] == 0]\n if (len(temp_site) > 1) | (len(temp_site) == 0):\n pro_list = pro.iloc[i].tolist()\n final_type.append(pro_list.index(max(pro_list)))\n else:\n final_type.append(temp_site[0])\n return final_type\n\n\ndef clases_train(dataframe_raw, paralist, submit_dataframe):\n user_id_total = dataframe_raw[\"user_id\"].tolist()\n service_type = dataframe_raw['service_type_encode'].tolist()\n dataframe_raw = dataframe_raw[paralist]\n # dataframe_raw = dataframe_raw.drop([\"user_id\"], axis=1)\n submit_dataframe = submit_dataframe[paralist]\n submit_dataframe_fin = submit_dataframe.drop([\"user_id\"], axis=1)\n num_total = len(service_type)\n prediction_type = []\n prediction_probability = []\n test_type = []\n test_probability = []\n # label_train_set, label_test_set, data_train_set, data_test_set = train_test_split(service_type, dataframe_raw, test_size=0.1)\n\n ixval = 0\n idxtest = [a for a in range(num_total) if a % 10 == ixval % 10]\n idxtrain = [a for a in range(num_total) if a % 10 != ixval % 10]\n label_train_set = [service_type[r] for r in idxtrain]\n label_test_set = [service_type[r] for r in idxtest]\n data_train_set = dataframe_raw.iloc[idxtrain]\n data_test_set = dataframe_raw.iloc[idxtest]\n data_test_set_fin = data_test_set.drop(['user_id'], axis=1)\n # id_user = data_test[\"user_id\"].tolist()\n # test_label_set = [service_type[user_id_total.index(y)] for y in id_user]\n\n train_num = len(data_train_set)\n inxval = 0\n testidx = [a for a in range(train_num) if a % 10 == inxval % 10]\n trainidx = [a for a in range(train_num) if a % 10 != inxval % 10]\n data_train = data_train_set.iloc[trainidx]\n data_test = data_train_set.iloc[testidx]\n data_train_fin = data_train.drop(['user_id'], axis=1)\n data_test_fin = data_test.drop(['user_id'], axis=1)\n for i in range(0, 8):\n type_positive_sample_temp_site = []\n type_negative_sample_temp_site = []\n re_service_type = []\n for j in range(train_num):\n if label_train_set[j] == i:\n type_positive_sample_temp_site.append(j)\n re_service_type.append(0)\n else:\n type_negative_sample_temp_site.append(j)\n re_service_type.append(1)\n # temp_positive_sample = service_type.iloc[type_positive_sample_temp_site]\n # temp_negative_sample = service_type.iloc[type_negative_sample_temp_site]\n # dataframe_raw['re_current_service'] = re_service_type\n # dataframe_new = dataframe_raw[paralist]\n\n # label_train, label_test, data_train, data_test = train_test_split(re_service_type, dataframe_raw, test_size=0.1)\n\n label_train = [re_service_type[r] for r in trainidx]\n label_test = [re_service_type[r] for r in testidx]\n\n\n # id_index_train = data_train[\"user_id\"].tolist()\n # id_index_test = data_test[\"user_id\"].tolist()\n # data_train_fin = data_train.drop(['user_id'], axis=1)\n # data_test_fin = data_test.drop(['user_id'], axis=1)\n\n iTrees = 350\n depth = None\n maxFeat = 0.36\n classweight = None\n RFModel = ensemble.RandomForestClassifier(n_estimators=iTrees, max_depth=depth, max_features=maxFeat, n_jobs=-1,\n class_weight=classweight, oob_score=False, random_state=531)\n RFModel.fit(data_train_fin, label_train)\n # Accumulate auc on test set\n prediction = RFModel.predict(data_test_fin)\n correct = accuracy_score(label_test, prediction)\n prediction_train = RFModel.predict(data_train_fin)\n correct_train = accuracy_score(label_train, prediction_train)\n print(\"train correct\")\n print(correct_train)\n # generate confusion matrix\n pList = prediction.tolist()\n confusionMat = confusion_matrix(label_test, pList)\n print(\"F1 Score test\")\n print(F1_score(confusionMat))\n\n prediction_test = RFModel.predict(data_test_set_fin)\n test_type.append(prediction_test)\n test_pro_temp = RFModel.predict_proba(data_test_set_fin)\n num_test = len(data_test_set_fin)\n test_probability.append([test_pro_temp[p][0] for p in range(num_test)])\n\n prediction_type.append(RFModel.predict(submit_dataframe_fin))\n pro_temp = RFModel.predict_proba(submit_dataframe_fin)\n num_sub = len(submit_dataframe_fin)\n prediction_probability.append([pro_temp[v][0] for v in range(num_sub)])\n\n test_type_array = np.array(test_type).transpose()\n test_probability_array = np.array(test_probability).transpose()\n test_type_df = pd.DataFrame(test_type_array)\n test_probability_df = pd.DataFrame(test_probability_array)\n test_type_final = type_final(test_type_df, test_probability_df)\n\n confusionMat_test = confusion_matrix(label_test_set, test_type_final)\n print(label_test_set)\n print(test_type_final)\n f1_test = F1_score(confusionMat_test)\n print(\"F1_test\")\n print(f1_test)\n # pd.DataFrame({\"service_type\":test_type_final}).to_csv(r\"E:\\CCFDF\\plansmatching\\data\\raw data\\stacking_RFbinary_1.csv\")\n\n prediction_type_array = np.array(prediction_type).transpose()\n prediction_probability_array = np.array(prediction_probability).transpose()\n prediction_type_df = pd.DataFrame(prediction_type_array)\n prediction_probability_df = pd.DataFrame(prediction_probability_array)\n\n return prediction_type_df, prediction_probability_df\n\n\n\n\n\nraw_data = pd.read_csv(r\"E:\\CCFDF\\plansmatching\\data\\raw data\\final_data\\train_combine_4_encode_precentage.csv\",\n encoding=\"utf-8\", low_memory=False)\n\n# raw_data = pd.read_csv(r\"/Users/peterlee/Documents/CCFDF18/final_data/train_combine_4_encode_precentage.csv\",\n# encoding=\"utf-8\", low_memory=False)\n# submit_data = pd.read_csv(r\"/Users/peterlee/Documents/CCFDF18/final_data/train_combine_4_encode_precentage.csv\",\n# encoding=\"utf-8\", low_memory=False)\n# submit_data = submit_data.iloc[0:100]\n\npara_list = ['is_mix_service', 'online_time', '1_total_fee', '2_total_fee', '3_total_fee', '4_total_fee',\n '1_total_fee_norm', '2_total_fee_norm', '3_total_fee_norm', '4_total_fee_norm',\n 'month_traffic', 'many_over_bill', 'contract_type', 'contract_time',\n 'is_promise_low_consume', 'net_service', 'pay_times', 'pay_num', 'last_month_traffic',\n 'local_trafffic_month', 'local_caller_time', 'service1_caller_time', 'service2_caller_time', 'gender',\n 'age', 'complaint_level', 'former_complaint_num', 'former_complaint_fee',\n 'fee_mean', 'fee_std', 'fee_fluctuate', 'fee_mean_2',\n 'service_caller_time_fluctuate', 'online_time_norm', 'fee_mean_norm', 'fee_std_norm',\n 'fee_fluctuate_norm', 'month_traffic_norm', 'contract_time_norm', 'pay_num_norm',\n 'last_month_traffic_norm', 'local_trafffic_month_norm', 'local_caller_time_norm',\n 'service1_caller_time_norm', 'service2_caller_time_norm', 'age_norm', 'former_complaint_num_norm',\n 'former_complaint_fee_norm', 'fee_mean_2_norm', 'service_caller_time_fluctuate_norm',\n 'month_traffic_precentage', 'contract_time_precentage',\n 'pay_times_precentage', 'pay_num_precentage', 'last_month_traffic_precentage',\n 'local_trafffic_month_precentage', 'local_caller_time_precentage', 'service1_caller_time_precentage',\n 'service2_caller_time_precentage', 'month_traffic_precentage', 'contract_time_precentage',\n 'pay_times_precentage', 'pay_num_precentage', 'last_month_traffic_precentage',\n 'local_trafffic_month_precentage', 'local_caller_time_precentage', 'service1_caller_time_precentage',\n 'service2_caller_time_precentage',\n 'user_id'\n ]\n\nsub_data = pd.read_csv(r\"E:\\CCFDF\\plansmatching\\data\\raw data\\final_data\\test_4_combine.csv\",\n encoding=\"utf-8\", low_memory=False)\n# sub_data = pd.read_csv(r\"/Users/peterlee/Documents/CCFDF18/final_data/test_4_combine.csv\",\n# encoding=\"utf-8\", low_memory=False)\n\ntype_binary, pro_binary = clases_train(raw_data, para_list, sub_data)\ntype_sub = type_final(type_binary, pro_binary)\ndecode_list = decode(type_sub)\nuser_id_4 = sub_data[\"user_id\"]\nsubmit_result = pd.read_csv(r\"E:\\CCFDF\\plansmatching\\data\\raw data\\final_data\\result_test\\result_class_1_4.csv\",\n encoding=\"utf-8\",\n low_memory=False)\norigin_id = submit_result[\"user_id\"].tolist()\norigin_result = submit_result[\"current_service\"].tolist()\nnum_4 = len(user_id_4)\nfor i in range(num_4):\n origin_result[origin_id.index(user_id_4[i])] = decode_list[i]\nfinal_da = pd.DataFrame({\"user_id\": origin_id, \"current_service\": origin_result})\nfinal_da.to_csv(r\"E:\\CCFDF\\plansmatching\\data\\raw data\\final_data\\result_test\\result_binary.csv\")\n\n\n\n\n\n\n\n\n\n","repo_name":"PrideLee/CCFDF-Personalized-Matching-Model-of-Packages-for-Telecom-Users","sub_path":"code/RF_binary_class.py","file_name":"RF_binary_class.py","file_ext":"py","file_size_in_byte":10780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30997105191","text":"'''\nCreated on Oct 6, 2016\n\n@author: svanhmic\n'''\nfrom pyspark import SparkContext\nfrom pyspark.sql import SQLContext\nimport numpy as np\nimport re\nimport matplotlib.pyplot as plt\n#from RegnskabsClass import Regnskaber\n\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\nsc = SparkContext(\"local[8]\",\"PlotingData\" )#pyFiles=['/home/svanhmic/workspace/Python/Erhvervs/src/RegnSkabData/RegnskabsClass.py'])\nsqlContext = SQLContext(sc)\n#sc.addPyFile('/home/svanhmic/workspace/Python/Erhvervs/src/RegnSkabData/RegnskabsClass.py')\n\ndef plotFeatureRepresentation(df):\n groupedPivotCount = (df\n .groupBy()\n .avg()\n .collect())\n #print(groupedPivotCount)\n countDict = groupedPivotCount[0].asDict()\n countDict08 = {}\n countDict06 = {}\n countDict04 = {}\n countDict02 = {}\n countDict00 = {}\n \n for (k,v) in countDict.items():\n if v >= 0.8:\n countDict08[k] = v\n elif v >= 0.6 and v < 0.8:\n countDict06[k] = v\n elif v >= 0.4 and v < 0.6:\n countDict04[k] = v\n elif v >= 0.2 and v < 0.4:\n countDict02[k] = v\n else:\n countDict00[k] = v\n #print(\"keys: \" + str(k)+\" Values: \"+str(v))\n for (k,v) in countDict00.items():\n print(\"keys: \" + str(k)+\" Values: \"+str(v))\n arrayDict = [countDict00,countDict02,countDict04,countDict06,countDict08]\n intervalArr = [0,0.2,0.4,0.6,0.8]\n \n for (i,val) in enumerate(arrayDict):\n fig = plt.figure(i)\n ax = fig.add_subplot(111)\n ind = np.arange(len(val))\n width = 0.3\n plot1 = ax.barh(ind,val.values(),width,color=\"green\")\n ax.set_xlim(0,(intervalArr[i]+0.21))\n ax.set_yticks(ind+width)\n ytickNames = ax.set_yticklabels([re.sub(r'\\W',\"\",re.sub(r'avg',\"\", x, )) for x in val.keys()])\n plt.title(\"Representation of Features with average value above \"+str(intervalArr[i]))\n plt.setp(ytickNames, rotation=0, fontsize=10)\n plt.show()\n\nif __name__ == '__main__':\n \n cleanedCsvPath = \"/home/svanhmic/workspace/Python/Erhvervs/data/regnskabsdata/sparkdata/csv\"\n dfRegnskabsCount = sqlContext.read.csv(path=cleanedCsvPath+\"/pivotRowDataCounts.csv\", sep=\";\", header=True, encoding=\"utf-8\",inferSchema=True)\n #dfRegnskabsCount.show()\n plotFeatureRepresentation(dfRegnskabsCount)","repo_name":"mssalvador/RegnSkabData","sub_path":"PlotData.py","file_name":"PlotData.py","file_ext":"py","file_size_in_byte":2390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3365538255","text":"import functools\nfrom multiprocessing import Process\nfrom multiprocessing import Queue\nimport traceback\n\nfrom six.moves import socketserver\n\n\nclass TestsTimeoutException(Exception):\n pass\n\n\ndef time_limit(seconds, fp, func, *args, **kwargs):\n\n if fp:\n if not hasattr(fp, 'write'):\n raise TypeError(\"Expected 'file-like' object, got '%s'\" % fp)\n else:\n def record(msg):\n fp.write(msg)\n else:\n def record(msg):\n return\n\n def capture_results(msg_queue, func, *args, **kwargs):\n try:\n result = func(*args, **kwargs)\n except Exception as e:\n msg_queue.put(\n \"Running function '%s' resulted in exception '%s' with \"\n \"message: '%s'\\n\" % (func.__name__, e.__class__.__name__, e))\n # no point re-raising an exception from the subprocess, instead\n # return False\n return False\n else:\n msg_queue.put(\n \"Running function '%s' finished with result '%s', and\"\n \"stack:\\n%s\\n\" % (func.__name__, result,\n traceback.format_stack()))\n return result\n\n messages = Queue()\n # although creating a separate process is expensive it's the only way to\n # ensure cross platform that we can cleanly terminate after timeout\n p = Process(target=functools.partial(capture_results, messages, func),\n args=args, kwargs=kwargs)\n p.start()\n p.join(seconds)\n if p.is_alive():\n p.terminate()\n while not messages.empty():\n record(messages.get())\n record(\"Running function '%s' did not finish\\n\" % func.__name__)\n\n raise TestsTimeoutException\n else:\n while not messages.empty():\n record(messages.get())\n record(\"Running function '%s' finished with exit code '%s'\\n\"\n % (func.__name__, p.exitcode))\n\n\nclass NullServer(socketserver.TCPServer):\n\n request_queue_size = 1\n\n def __init__(self, server_address, *args, **kwargs):\n # TCPServer is old style in python 2.x so cannot use\n # super() correctly, explicitly call __init__.\n\n # simply init'ing is sufficient to open the port, which\n # with the server not started creates a black hole server\n socketserver.TCPServer.__init__(\n self, server_address, socketserver.BaseRequestHandler,\n *args, **kwargs)\n","repo_name":"ceph/python-jenkins","sub_path":"tests/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"35156278365","text":"from django.conf.urls import patterns, url\nimport views\n\nurlpatterns = patterns('',\n url(r'^$', views.show_cart, name='cart'),\n url(r'^add/(?P\\d+)/$', views.add_to_cart, name='add_to_cart'),\n url(r'^remove/(?P\\d+)/$', views.remove_from_cart, name='remove_from_cart'),\n url(r'^update/$', views.update_cart, name='update_cart'),\n\n url(r'^order/$', views.order_cart, name='order_cart'),\n url(r'^order/success/$', views.cart_order_success, name='cart_order_success'),\n url(r'^order/cancelled/$', views.cart_order_cancelled, name='cart_order_cancelled'),\n url(r'^order/error/$', views.cart_order_error, name='cart_order_error'),\n url(r'^order/cancelled/(?P\\d+)/$', views.cart_order_cancelled, name='cart_order_cancelled'),\n)\n","repo_name":"vgaicuks/django-qshop-vat","sub_path":"qshop/cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17410660255","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom hwt.hdl.constants import Time, READ, WRITE, NOP\nfrom hwt.interfaces.std import BramPort_withoutClk\nfrom hwtLib.abstract.discoverAddressSpace import AddressSpaceProbe\nfrom hwtLib.amba.axiLite_comp.endpoint_arr_test import AxiLiteEndpointArrayTC\nfrom hwtLib.amba.axiLite_comp.endpoint_test import AxiLiteEndpointTC, \\\n structTwoFieldsDense, structTwoFieldsDenseStart, AxiLiteEndpointDenseTC, \\\n AxiLiteEndpointDenseStartTC\nfrom hwtLib.xilinx.ipif.endpoint import IpifEndpoint\nfrom hwtLib.xilinx.ipif.intf import Ipif\nfrom hwtLib.xilinx.ipif.simMaster import IPFISimMaster\nfrom pyMathBitPrecise.bit_utils import mask\n\n\ndef addrGetter(intf):\n if isinstance(intf, Ipif):\n return intf.bus2ip_addr\n elif isinstance(intf, BramPort_withoutClk):\n return intf.addr\n else:\n raise TypeError(intf)\n\n\nclass IpifEndpointTC(AxiLiteEndpointTC):\n FIELD_ADDR = [0x0, 0x4]\n\n def mkRegisterMap(self, u):\n self.addrProbe = AddressSpaceProbe(u.bus, addrGetter)\n self.regs = IPFISimMaster(u.bus, self.addrProbe.discovered)\n\n def mySetUp(self, data_width=32):\n u = self.u = IpifEndpoint(self.STRUCT_TEMPLATE)\n\n self.DATA_WIDTH = data_width\n u.DATA_WIDTH = self.DATA_WIDTH\n\n self.compileSimAndStart(self.u, onAfterToRtl=self.mkRegisterMap)\n return u\n\n def randomizeAll(self):\n pass\n\n def test_nop(self):\n u = self.mySetUp(32)\n\n self.randomizeAll()\n self.runSim(100 * Time.ns)\n\n self.assertEmpty(u.bus._ag.r_data)\n self.assertIs(u.bus._ag.actual, NOP)\n self.assertEmpty(u.decoded.field0._ag.dout)\n self.assertEmpty(u.decoded.field1._ag.dout)\n\n def test_read(self):\n u = self.mySetUp(32)\n MAGIC = 100\n A = self.FIELD_ADDR\n u.bus._ag.requests.extend([(READ, A[0]),\n (READ, A[1]),\n (READ, A[0]),\n (READ, A[1])\n ])\n\n u.decoded.field0._ag.din.append(MAGIC)\n u.decoded.field1._ag.din.append(MAGIC + 1)\n\n self.randomizeAll()\n self.runSim(300 * Time.ns)\n\n self.assertValSequenceEqual(u.bus._ag.r_data, [MAGIC,\n MAGIC + 1,\n MAGIC,\n MAGIC + 1])\n\n def test_write(self):\n u = self.mySetUp(32)\n MAGIC = 100\n A = self.FIELD_ADDR\n m = mask(32 // 8)\n u.bus._ag.requests.extend([\n (WRITE, A[0], MAGIC, m),\n (WRITE, A[1], MAGIC + 1, m),\n (WRITE, A[0], MAGIC + 2, m),\n (WRITE, A[1], MAGIC + 3, m)])\n\n self.randomizeAll()\n self.runSim(400 * Time.ns)\n\n self.assertValSequenceEqual(u.decoded.field0._ag.dout, [MAGIC,\n MAGIC + 2\n ])\n self.assertValSequenceEqual(u.decoded.field1._ag.dout, [MAGIC + 1,\n MAGIC + 3\n ])\n\n\nclass IpifEndpointDenseTC(IpifEndpointTC):\n STRUCT_TEMPLATE = structTwoFieldsDense\n FIELD_ADDR = [0x0, 0x8]\n\n def test_registerMap(self):\n AxiLiteEndpointDenseTC.test_registerMap(self)\n\n\nclass IpifEndpointDenseStartTC(IpifEndpointTC):\n STRUCT_TEMPLATE = structTwoFieldsDenseStart\n FIELD_ADDR = [0x4, 0x8]\n\n def test_registerMap(self):\n AxiLiteEndpointDenseStartTC.test_registerMap(self)\n\n\nclass IpifEndpointArray(AxiLiteEndpointArrayTC):\n FIELD_ADDR = [0x0, 0x10]\n mkRegisterMap = IpifEndpointTC.mkRegisterMap\n mySetUp = IpifEndpointTC.mySetUp\n\n def randomizeAll(self):\n pass\n\n def test_nop(self):\n u = self.mySetUp(32)\n MAGIC = 100\n\n for i in range(8):\n u.decoded.field0._ag.mem[i] = MAGIC + 1 + i\n u.decoded.field1._ag.mem[i] = 2 * MAGIC + 1 + i\n\n self.randomizeAll()\n self.runSim(100 * Time.ns)\n\n self.assertEmpty(u.bus._ag.r_data)\n for i in range(8):\n self.assertValEqual(u.decoded.field0._ag.mem[i], MAGIC + 1 + i)\n self.assertValEqual(u.decoded.field1._ag.mem[i], 2 * MAGIC + 1 + i)\n\n def test_read(self):\n u = self.mySetUp(32)\n # u.bus._ag._debug(sys.stdout)\n regs = self.regs\n MAGIC = 100\n # u.bus._ag.requests.append(NOP)\n for i in range(4):\n u.decoded.field0._ag.mem[i] = MAGIC + i + 1\n u.decoded.field1._ag.mem[i] = 2 * MAGIC + i + 1\n regs.field0[i].read()\n regs.field1[i].read()\n\n self.randomizeAll()\n self.runSim(200 * Time.ns)\n\n self.assertValSequenceEqual(u.bus._ag.r_data,\n [MAGIC + 1,\n 2 * MAGIC + 1,\n MAGIC + 2,\n 2 * MAGIC + 2,\n MAGIC + 3,\n 2 * MAGIC + 3,\n MAGIC + 4,\n 2 * MAGIC + 4,\n ])\n\n def test_write(self):\n u = self.mySetUp(32)\n regs = self.regs\n MAGIC = 100\n\n for i in range(4):\n u.decoded.field0._ag.mem[i] = None\n u.decoded.field1._ag.mem[i] = None\n regs.field0[i].write(MAGIC + i + 1)\n regs.field1[i].write(2 * MAGIC + i + 1)\n\n self.randomizeAll()\n self.runSim(400 * Time.ns)\n\n self.assertEmpty(u.bus._ag.r_data)\n for i in range(4):\n self.assertValEqual(\n u.decoded.field0._ag.mem[i], MAGIC + i + 1, f\"index={i:d}\")\n self.assertValEqual(\n u.decoded.field1._ag.mem[i], 2 * MAGIC + i + 1, f\"index={i:d}\")\n\n\nif __name__ == \"__main__\":\n import unittest\n _ALL_TCs = [IpifEndpointTC, IpifEndpointDenseTC, IpifEndpointDenseStartTC, IpifEndpointArray]\n testLoader = unittest.TestLoader()\n loadedTcs = [testLoader.loadTestsFromTestCase(tc) for tc in _ALL_TCs]\n suite = unittest.TestSuite(loadedTcs)\n runner = unittest.TextTestRunner(verbosity=3)\n runner.run(suite)\n","repo_name":"Nic30/hwtLib","sub_path":"hwtLib/xilinx/ipif/endpoint_test.py","file_name":"endpoint_test.py","file_ext":"py","file_size_in_byte":6442,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"36127018665","text":"# coding: UTF-8\n# ----------------------\n# Author : fzh\n# Time : 2017/5/17\nimport sys\nfrom importlib import reload\n\nfrom Common import Common\n\ncrupath = sys.path[0]\n# scriptpath=os.path.join(crupath,'common')\n# sys.path.append(scriptpath)\n\nimport inifile\n\n\nclass Login(Common):\n\tdef __init__(self,server,serve_yy,moduleName):\n\t\tCommon.__init__(self,server,serve_yy,moduleName)\n\n\tdef runCase(self,persons):\n\n\t\tmediaId = self.get_mediaId()\n\t\tprint('mediaId:'+str(mediaId))\n\t\tmediaId = int(mediaId)\n\n\t\tuserId=self.get_pid()\n\t\tpid=int(userId)\n\n\t\t#话题投诉==========1.0.11-1===================\n\t\tbodys={}\n\t\tbodys['comment']='投诉媒体'\n\t\tbodys['mediaId']=mediaId\n\t\tclaim = self.ants_claim(bodys,persons[1])\n\t\tprint(claim)\n\t\tif claim ==-1 or claim ==-100:\n\t\t\tself.write_str('1.0.11-1 投诉媒体异常 error')\n\t\t\tself.write_email_log('1.0.11-1','投诉媒体异常','error')\n\t\telif claim =='success':\n\t\t\tself.write_str(\"1.0.11-1 投诉媒体 success\")\n\t\t\tself.write_email_log('1.0.11-1',\"投诉媒体\",\"success\")\t\n\t\telse:\n\t\t\tself.write_str(\"1.0.11-1 投诉媒体 fail\")\n\t\t\tself.write_email_log('1.0.11-1',\"投诉媒体\",\"fail\")\t\n\n\t\t#轮播============1.0.11-2==============\n\t\tcarouselsUser_time1=self.get_now_time()\n\t\tcarousels_user = self.ants_carousels('carousel',pid)\n\t\tcarouselsUser_time2=self.get_now_time()\n\t\tprint(carousels_user)\n\t\tif carousels_user==-1 or carousels_user == -100:\n\t\t\tself.write_str('1.0.11-2 轮播接口异常 error')\n\t\t\tself.write_email_log('1.0.11-2','轮播接口异常','error')\n\t\telif len(carousels_user)>0:\n\t\t\tself.write_str('1.0.11-2 轮播接口carousel获取 success,耗时:'+str(self.get_time_long(carouselsUser_time1,carouselsUser_time2)))\n\t\t\tself.write_email_log('1.0.11-2','轮播接口carousel获取,耗时:'+str(self.get_time_long(carouselsUser_time1,carouselsUser_time2)),'success')\n\t\telse:\n\t\t\tself.write_str('1.0.11-2 轮播接口carousel获取 fail')\n\t\t\tself.write_email_log('1.0.11-2','轮播接口carousel获取','fail')\n\n\t\t#热门标签============1.0.11-2==============\n\t\tcarouselsUser_time1=self.get_now_time()\n\t\tcarousels_user = self.ants_carousels('tagCategory',pid)\n\t\tcarouselsUser_time2=self.get_now_time()\n\t\tprint(carousels_user)\n\t\tif carousels_user==-1 or carousels_user == -100:\n\t\t\tself.write_str('1.0.11-2 轮播接口异常 error')\n\t\t\tself.write_email_log('1.0.11-2','轮播接口异常','error')\n\t\telif len(carousels_user)>0:\n\t\t\tself.write_str('1.0.11-2 轮播接口tagCategory获取 success,耗时:'+str(self.get_time_long(carouselsUser_time1,carouselsUser_time2)))\n\t\t\tself.write_email_log('1.0.11-2','轮播接口tagCategory获取,耗时:'+str(self.get_time_long(carouselsUser_time1,carouselsUser_time2)),'success')\n\n\t\t\tfor x in range(len(carousels_user)):\n\t\t\t\ttagId=carousels_user[x]['id']\n\t\t\t\tprint('tagId:'+str(tagId))\n\n\t\t\t\t#标签最热列表\n\t\t\t\tcity_time1=self.get_now_time()\n\t\t\t\ttags_hot = self.ants_tags_hot(tagId,pid)\n\t\t\t\tprint(tags_hot)\n\t\t\t\tcity_time2=self.get_now_time()\n\t\t\t\tif tags_hot == -1 or tags_hot == -100:\n\t\t\t\t\tself.write_str('1.0.11-3 标签'+str(tagId)+'最热列表异常 error')\n\t\t\t\t\tself.write_email_log('1.0.11-3',\"标签\"+str(tagId)+\"用户列表异常\",\"error\")\n\t\t\t\telif len(tags_hot)>0:\n\t\t\t\t\tself.write_str('1.0.11-3 标签'+str(tagId)+'最热列表 success,耗时'+str(self.get_time_long(city_time1,city_time2)))\n\t\t\t\t\tself.write_email_log('1.0.11-3',\"标签\"+str(tagId)+\"最热列表,耗时\"+str(self.get_time_long(city_time1,city_time2)),\"success\")\n\t\t\t\telse:\n\t\t\t\t\tself.write_str('1.0.11-3 标签'+str(tagId)+'最热列表为空 fail')\n\t\t\t\t\tself.write_email_log('1.0.11-3',\"标签\"+str(tagId)+\"最热列表为空\",\"fail\")\n\n\t\telse:\n\t\t\tself.write_str('1.0.11-2 轮播接口tagCategory获取 fail')\n\t\t\tself.write_email_log('1.0.11-2','轮播接口tagCategory获取','fail')\n\n\t\t#闪屏============1.0.11-2==============\n\t\tcarouselsUser_time1=self.get_now_time()\n\t\tcarousels_user = self.ants_screen(pid)\n\t\tcarouselsUser_time2=self.get_now_time()\n\t\tprint(carousels_user)\n\t\tif carousels_user==-1 or carousels_user == -100:\n\t\t\tself.write_str('1.0.11-4 splash_screen信息列表异常 error')\n\t\t\tself.write_email_log('1.0.11-4 ','splash_screen信息列表异常','error')\n\t\telif len(carousels_user)>0:\n\t\t\tself.write_str('1.0.11-4 splash_screen信息列表 success,耗时:'+str(self.get_time_long(carouselsUser_time1,carouselsUser_time2)))\n\t\t\tself.write_email_log('1.0.11-4','splash_screen信息列表,耗时:'+str(self.get_time_long(carouselsUser_time1,carouselsUser_time2)),'success')\n\t\telse:\n\t\t\tself.write_str('1.0.11-4 splash_screen信息列表 fail')\n\t\t\tself.write_email_log('1.0.11-4','splash_screen信息列表','fail')\n\n\t\t# 媒体详情==========1.0.1——1.1 ===================\n\t\tdetail_time1 = self.get_now_time();\n\t\tmediaInfoResult_init = self.ants_media_detail(mediaId,pid)\n\t\tprint(mediaInfoResult_init)\n\t\tdetail_time2 = self.get_now_time();\n\t\tif mediaInfoResult_init ==-1 or mediaInfoResult_init ==-100:\n\t\t\tself.write_str(u\"1.0.11-5 媒体详情异常 error\")\n\t\t\tself.write_email_log('1.0.11-5',\"媒体详情异常,耗时\"+str(self.get_time_long(detail_time1,detail_time2)),\"error\")\n\t\telse:\n\t\t\tvisits_init =mediaInfoResult_init[0]['visits'] \n\t\t\t\n\t\t\t#更新媒体浏览数\n\t\t\tupdateVisit_result=self.ants_update_visit(mediaId,pid)\n\t\t\tprint(updateVisit_result)\n\t\t\tif updateVisit_result == 'success':\n\t\t\t\tself.write_str(u\"1.0.11-6 数据浏览visit更新 success\")\n\t\t\t\tself.write_email_log('1.0.11-6',\"数据浏览visit更新\",\"success\")\n\n\t\t\t\t#获取媒体详情\n\t\t\t\tmediaInfoResult = self.ants_media_detail(mediaId,pid)\n\t\t\t\tif mediaInfoResult ==-1 or mediaInfoResult ==-100:\n\t\t\t\t\tself.write_str(u\"1.0.11-5 媒体详情异常 error\")\n\t\t\t\t\tself.write_email_log('1.0.11-5',\"媒体详情异常\",'error')\n\t\t\t\telse:\n\t\t\t\t\tvisits = mediaInfoResult[0]['visits']\n\t\t\t\t\tprint(visits)\n\t\t\t\t\tprint(visits_init)\n\t\t\t\t\tif visits == visits_init+2:\n\t\t\t\t\t\tself.write_str(u\"1.0.11-7 修改媒体浏览数后,媒体详情visit更新 success\")\n\t\t\t\t\t\tself.write_email_log('1.0.11-7',\"修改媒体浏览数后,媒体详情visit更新\",\"success\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.write_str(\"1.0.11-7 修改媒体浏览数后,媒体详情visit更新 fail\")\n\t\t\t\t\t\tself.write_email_log('1.0.11-7','修改媒体浏览数后,媒体详情visit更新','fail')\n\t\t\telse:\n\t\t\t\tself.write_str(u\"1.0.11-6 数据浏览visit更新 fail\")\n\t\t\t\tself.write_email_log('1.0.11-6',\"数据浏览visit更新\",\"fail\")\n\n\n\ndef main():\n\treload(sys)\n\n\tserver,persons,serve_yy,server_firmware = inifile.get_input_params()\n\n\tcase=Login(server,serve_yy,'eleven')\n\tcase.runCase(persons)\n\nif __name__ == '__main__':\n\tmain()","repo_name":"seasonluowx/python","sub_path":"cases/1.0.11_others.py","file_name":"1.0.11_others.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37706513885","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nfrom setuptools import setup, find_packages\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\nVERSION = [l for l in read('procfs/__init__.py').splitlines()\n if l.startswith('__version__ =')][0].split(\"'\")[1]\n\nkw = {}\nif sys.version_info >= (3,):\n kw['use_2to3'] = True\n\nsetup(\n name='procfs',\n version=VERSION,\n packages=find_packages(),\n author='Philippe Muller',\n author_email='philippe.muller@gmail.com',\n description='Python API for the Linux /proc virtual filesystem',\n long_description=read('README.rst'),\n license='BSD',\n keywords='linux proc procfs system kernel',\n url='https://github.com/pmuller/procfs',\n platforms=['Linux'],\n entry_points={\n 'console_scripts': [\n 'procfs = procfs.cli:run',\n 'procfsd = procfs.http:run',\n ]},\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: POSIX :: Linux',\n 'Topic :: System :: Operating System Kernels :: Linux',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n **kw\n)\n","repo_name":"pmuller/procfs","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"48"} +{"seq_id":"43983750006","text":"import rsa_crypt\nfrom base64 import b64encode, b64decode\n\nmsg1 = \"Hello Tony, I am Jarvis!\"\nmsg2 = \"Hello Toni, I am Jarvis!\"\nkeysize = 2048\n(public, private) = rsa_crypt.newkeys(keysize)\nencrypted = b64encode(rsa_crypt.encrypt(msg1, public))\ndecrypted = rsa_crypt.decrypt(b64decode(encrypted), private)\nsignature = b64encode(rsa_crypt.sign(msg1, private, \"SHA-512\"))\nverify = rsa_crypt.verify(msg1, b64decode(signature), public)\n\nprint(private.exportKey('PEM'))\nprint(public.exportKey('PEM'))\nprint(\"Encrypted: \" + encrypted)\nprint(\"Decrypted: '%s'\" % decrypted)\nprint(\"Signature: \" + signature)\nprint(\"Verify: %s\" % verify)\nrsa_crypt.verify(msg2, b64decode(signature), public)\n","repo_name":"blueyi/pyAes","sub_path":"rsa_crypt-test.py","file_name":"rsa_crypt-test.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22636913370","text":"from django import forms\nfrom . import models\n\n# Formulario para cadastrar um Tipo de Despesa\nclass TipoDespesasForm(forms.ModelForm):\n class Meta:\n model = models.TipoDespesas\n fields = [\n 'nome'\n ]\n\n# Formulario para cadastrar um Tipo de Faturamento\nclass TipoFaturamentoForm(forms.ModelForm):\n class Meta:\n model = models.TipoFaturamento\n fields = [\n 'nome'\n ]\n\n# Formulário para cadastrar uma Despesa\nclass DespesasForm(forms.ModelForm):\n class Meta:\n model = models.Despesas\n fields = [\n 'nome',\n 'mes',\n 'data_vencimento',\n 'data_pagamento',\n 'valor',\n 'fixo',\n 'tipo'\n ]\n\n# Formulario para cadastrar um Faturamento\nclass FaturamentosForm(forms.ModelForm):\n class Meta:\n model = models.Faturamentos\n fields = [\n 'nome',\n 'mes',\n 'data_vencimento',\n 'data_pagamento',\n 'valor',\n 'fixo',\n 'tipo'\n ]\n\n# Formulario para cadastrar um Fechamento\nclass FechamentosForm(forms.ModelForm):\n class Meta:\n model = models.Fechamentos\n exclude = [\n 'faturamento',\n 'despesas',\n 'balanco'\n ]\n widgets = {\n 'faturamento': forms.HiddenInput(),\n 'despesas': forms.HiddenInput(),\n 'balanco': forms.HiddenInput(),\n }\n\n\n","repo_name":"caiositta/TCC_MC","sub_path":"app_mc/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1482,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11707744592","text":"rc = list(map(int, input().split()))\nrows = rc[0]\ncolumns = rc[1]\nmatrix = []\nfor _ in range(rows):\n row = input().split(\" \")\n matrix.append(row)\n\n\nwhile True:\n command = input().split()\n if command[0] == \"END\":\n break\n if command[0] != \"swap\" or len(command) != 5:\n print(\"Invalid input!\")\n else:\n x1 = int(command[1])\n y1 = int(command[2])\n x2 = int(command[3])\n y2 = int(command[4])\n if x1 >= rows or x2 >= rows or y1 >= columns or y2 >= columns:\n print(\"Invalid input!\")\n else:\n value = matrix[x1][y1]\n matrix[x1][y1] = matrix[x2][y2]\n matrix[x2][y2] = value\n for row in matrix:\n print(' '.join([str(x) for x in row]))\n","repo_name":"meroo893/Python-Advanced-SoftUni","sub_path":"Matrices 1/6. Matrix Shuffling.py","file_name":"6. Matrix Shuffling.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33153298837","text":"# encoding: utf-8\n\"\"\"\n@author : zhirui zhou\n@contact: evilpsycho42@gmail.com\n@time : 2020/4/26 14:10\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport deepseries.functional as F\nimport warnings\nwarnings.filterwarnings('ignore')\nplt.style.use('ggplot')\n\n\nclass SeriesAnalysisModel:\n\n \"\"\"https://blog.csdn.net/claroja/article/details/70841382\"\"\"\n\n def __init__(self, series, mask=None, mask_zero=True, mask_nan=True):\n self.series = series\n if mask is None:\n self.mask = F.mask_zero_nan(series, mask_zero, mask_nan)\n else:\n self.mask = np.bitwise_or(mask, F.mask_zero_nan(series, mask_zero, mask_nan))\n self.mask_zero = mask_zero\n self.starts, self.ends = F.get_valid_start_end(self.series, self.mask)\n self.valid_lens = self.ends - self.starts\n self.autocorr = None\n self.trend = None\n self.max_T = None\n\n def get_autocorr(self, n_lags, threshold=1.5, backoffset=0, use_smooth=False):\n self.autocorr = F.batch_autocorr(self.series, n_lags, self.starts, self.ends, threshold, backoffset, use_smooth)\n return self\n\n def plot_autocorr(self, idx=None, figsize=(8, 5)):\n corr = self.autocorr\n if self.series.shape[0] == 1:\n idx = 0\n if idx is None:\n f = plt.figure(figsize=figsize)\n gs = f.add_gridspec(4, 1)\n ax1 = plt.subplot(gs[:2])\n ax2 = plt.subplot(gs[2])\n ax3 = plt.subplot(gs[3])\n\n im = ax1.imshow(corr, aspect=\"auto\", vmin=-1, vmax=1, cmap='coolwarm')\n plt.colorbar(im, extend='both', shrink=0.6, ax=ax1)\n ax1.set_title(\"series autocorr\")\n\n valid_corr = np.ma.array(corr, mask=np.isnan(corr))\n ax2.plot(np.abs(valid_corr).mean(0))\n ax2.set_title(\"mean absolute autocorr over time\")\n\n ax3.hist(np.abs(corr).argmax(1), bins=max(1, corr.shape[1] // 5))\n ax3.set_title(\"time distribution of max absolute autocorr\")\n\n plt.tight_layout()\n else:\n f, ax = plt.subplots(figsize=figsize)\n not_nan_idx = np.where(~np.isnan(corr[idx]))[0]\n ax.plot(not_nan_idx, corr[idx][not_nan_idx])\n ax.set_title(\"series autocorr\")\n\n def plot_valid(self, figsize=(8, 5)):\n f, ax = plt.subplots(1, 1, figsize=figsize)\n im = ax.imshow(self.mask, aspect=\"auto\", vmin=0, vmax=1, cmap='gray', alpha=0.8)\n f.colorbar(im, ax=ax, shrink=0.6)\n ax.set_title(\"series valid value map (zero means valid)\")\n plt.tight_layout()\n return im\n\n def get_trend(self, max_T, use_smooth=True, smooth_windows=5, smooth_ratio=0.5):\n self.trend = F.get_trend(self.series, max_T, use_smooth, smooth_windows, smooth_ratio)\n self.max_T = max_T\n return self\n\n def plot_trend(self, idx=None, figsize=(16, 5), drop_before=None):\n if self.series.shape[0] == 1:\n idx = 0\n if drop_before is None:\n drop_before = self.max_T\n if idx is not None:\n f, ax = plt.subplots(figsize=figsize)\n not_nan_idx = np.where(~np.isnan(self.trend[idx]))[0]\n ax.plot(not_nan_idx, self.trend[idx][not_nan_idx], alpha=0.8, c='red', label='trend')\n ax1 = ax.twinx()\n ax1.plot(self.series[idx], c='blue', alpha=0.8, label='series')\n f.legend(loc='upper right')\n ax1.set_title(f\"series trend\")\n plt.tight_layout()\n else:\n f, ax = plt.subplots(ncols=2, figsize=figsize)\n im = ax[0].imshow(np.where(np.isnan(self.trend), 1, self.trend)[:, drop_before:], aspect='auto', cmap='coolwarm', vmin=0.5, vmax=1.5)\n ax[0].set_title(f\"series trend drop before {drop_before}\")\n plt.colorbar(im, extend='both', shrink=0.6, ax=ax[0])\n\n pd.DataFrame(self.trend[:, drop_before:]).median(axis=0).plot(ax=ax[1])\n ax[1].set_title(f'trend median over time drop before {drop_before}')\n plt.tight_layout()\n","repo_name":"EvilPsyCHo/Deep-Time-Series-Prediction","sub_path":"deepseries/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","stars":492,"dataset":"github-code","pt":"48"} +{"seq_id":"14474070646","text":"\"\"\"\nN dimenziós összevonó réteg implementációja. A pool_size paramétertől függően számolja az összevonás lépéseit.\n\"\"\"\nfrom src.layers.layer import Layer\nfrom src.utils import inc\nimport numpy as np\n\nclass MaxPoolLayerND(Layer):\n\n def __init__(self, pool_size, stride=1):\n \"\"\"\n param tuple pool_size: Az összevonás méreteit adhatjuk meg tuple formátumban pl: (2,2).\n param tuple stride: Lépésköz adható meg egyes dimenziók irányában, vagy megadható int-ként is ha minden irányba azonos lépésközt szeretnénk.\n \"\"\"\n if (type(pool_size) == int):\n self.D = 1\n else:\n self.D = len(pool_size)\n self.pool_size=pool_size\n self.stride=stride\n self.a_prev = None\n self.cache = {}\n \n def forward_pass(self, a_prev):\n self.a_prev = np.array(a_prev, copy=True)\n\n n = a_prev.shape[0]\n c = a_prev.shape[-1]\n d_in = np.array(a_prev.shape[1:-1])\n\n d_pool = np.array(self.pool_size)\n d_out = 1 + (d_in-d_pool) // self.stride\n d_index = np.zeros_like(d_out)\n output = np.zeros((n, *d_out, c)) \n\n # max pool loop\n i = len(d_out)-1\n for _ in range(np.prod(d_out)):\n d_start = d_index * self.stride\n d_end = d_start + d_pool\n\n # array slicing params\n s1 = slice(0,n)\n s2 = slice(0,c)\n d_slices = [slice(*x) for x in zip(d_start,d_end)]\n\n a_prev_slice = a_prev[(s1, *d_slices, s2)]\n self.save_mask(x=a_prev_slice, cords=tuple(d_index))\n output[(s1, *d_index, s2)] = np.max(a_prev_slice,\n axis=tuple([x+1 for x in range(self.D)])\n )\n\n # increment pool loop\n d_index[i] += 1\n inc(i, d_index, d_out)\n return output\n\n def back_pass(self, da_curr):\n output = np.zeros(self.a_prev.shape)\n\n n = da_curr.shape[0]\n c = da_curr.shape[-1]\n d_out = np.array(da_curr.shape[1:-1])\n d_pool = np.array(self.pool_size)\n d_index = np.zeros_like(d_out)\n output = np.zeros(self.a_prev.shape)\n\n i = len(d_out)-1\n for _ in range(np.prod(d_out)):\n d_start = d_index * self.stride\n d_end = d_start + d_pool\n \n s1 = slice(0,n)\n s2 = slice(0,c)\n d_slices = [slice(*x) for x in zip(d_start,d_end)]\n d_slices2 = [slice(*x) for x in zip(d_index,d_index+1)]\n\n output[(s1, *d_slices, s2)] += \\\n da_curr[(s1, *d_slices2, s2)] * self.cache[tuple(d_index)]\n\n d_index[i] += 1\n inc(i, d_index, d_out)\n return output\n\n # A max érték pozícióját menti el az összevont területen belül. Back propogation során ez alapján vissza lehet állítani az az eredeti inputot\n def save_mask(self, x, cords):\n mask = np.zeros_like(x)\n\n n = x.shape[0]\n c = x.shape[-1]\n dims = np.array(x.shape[1:-1])\n\n x = x.reshape(n, np.prod(dims) , c)\n idx = np.argmax(x, axis=1)\n\n n_idx, c_idx = np.indices((n, c))\n np.reshape(mask,(n, np.prod(dims), c))[n_idx, idx, c_idx] = 1\n self.cache[cords] = mask\n","repo_name":"TopiCsarno/Adaptiv_hazi","sub_path":"src/layers/pool.py","file_name":"pool.py","file_ext":"py","file_size_in_byte":3281,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37205714039","text":"# ---------------------------------------------------#\r\n#\r\n# File : GLU.py\r\n# Author : Soham Deshpande\r\n# Date : December 2021\r\n# Description: Gated Residual Network\r\n#\r\n#\r\n#\r\n# ----------------------------------------------------#\r\n\r\nfrom Activation_functions import ELU, Sigmoid\r\nfrom Imports import nn, torch\r\nfrom Temporal_Layer import *\r\nfrom GLU import *\r\n\r\nclass GRN(nn.Module):\r\n\r\n \"\"\"\r\n Gated Residual Network\r\n\r\n GRN(x) = LayerNorm(a + GLU(Linear(a)))\r\n\r\n Args:\r\n int input_size : Size of the input tensor\r\n int hidden_size : Size of the hidden layer\r\n int output_size : Size of the output layer\r\n float dropout : Fraction between 0 and 1 showing the dropout rate\r\n int context_size: Size of the context vector\r\n bool is_temporal : Decides if the Temporal Layer has to be used or not\r\n\r\n\r\n This unit controls how much of the original input is used. It can skip over\r\n layers where the GLU output might be close to 0.\r\n When there is no context vector present, the GRN will treat the input as 0.\r\n \"\"\"\r\n\r\n def __init__(self, input_size,hidden_size, output_size, dropout,\r\n context_size=None, is_temporal=True):\r\n super().__init__()\r\n self.input_size = input_size\r\n self.hidden_size = hidden_size\r\n self.output_size = output_size\r\n self.dropout = dropout\r\n self.is_temporal = is_temporal\r\n self.c = context_size\r\n\r\n if self.input_size != self.output_size:\r\n self.skip_layer = TemporalLayer(nn.Linear(self.input_size,\r\n self.output_size))\r\n\r\n # Context vector c\r\n if self.context_size != None:\r\n self.c = TemporalLayer(nn.Linear(self.context_size,\r\n self.hidden_size, bias=False))\r\n\r\n # Dense & ELU\r\n self.dense1 = TemporalLayer(nn.Linear(self.input_size,\r\n self.hidden_size))\r\n self.elu = nn.ELU()\r\n\r\n # Dense & Dropout\r\n self.dense2 = TemporalLayer(nn.Linear(self.hidden_size,\r\n self.output_size))\r\n self.dropout = nn.Dropout(self.dropout)\r\n\r\n # Gate, Add & Norm\r\n self.gate = TemporalLayer(GLU(self.output_size))\r\n self.layer_norm = TemporalLayer(nn.BatchNorm1d(self.output_size))\r\n\r\n def forward(self, x, c=None):\r\n a = nn.ELU(self.c(x))\r\n a = self.dropout(self.dense2(a))\r\n\r\n a = self.gate(a)\r\n\r\n if(self.skip != None):\r\n return self.norm(self.skip(x) + a)\r\n return self.norm(x + a)\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"Soham-Deshpande/Stock-TFT","sub_path":"Code/TFT Model/GRN.py","file_name":"GRN.py","file_ext":"py","file_size_in_byte":2596,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"48"} +{"seq_id":"826993247","text":"#!/usr/bin/python3\n\nimport sys\n\n\ndef category_mapper():\n \"\"\" Maps videos to category\n Input format: video_id \\t category \\t trending_date \\t views \\t country\n Output format: category \\t video_id \\t country\n \"\"\"\n for line in sys.stdin:\n # Clean input and split it\n parts = line.strip().split(\",\")\n\n # Check that the line is of the correct format\n # If line is malformed, we ignore the line and continue to the next line\n if len(parts) != 12:\n continue\n\n video_id = parts[0]\n likes = parts[6]\n\n if video_id == \"video_id\": # Skipping Header\n continue\n\n print(\"{}\\t{}\\t{}\".format(video_id, likes, 1))\n\n\nif __name__ == \"__main__\":\n category_mapper()\n","repo_name":"n3xus/MapReduce_Example_SameCombinerReducer","sub_path":"category_mapper.py","file_name":"category_mapper.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38059563085","text":"\"\"\"\nmanipulations with database sqlite\n\"\"\"\nimport itertools\nimport sqlite3\nimport datetime\n\n\nclass Database:\n \"\"\"\n ig users database\n \"\"\"\n def __init__(self, path_to_db='./volume_data/tg.db'):\n self.path_to_db = path_to_db\n\n @property\n def connection(self):\n \"\"\"\n makes a connection to db\n \"\"\"\n return sqlite3.connect(self.path_to_db)\n\n def delete_ig_table(self):\n \"\"\"\n deletes insta table\n :return:\n \"\"\"\n sql = 'DROP TABLE Users_IG'\n self.execute(sql, commit=True)\n\n def execute(self, sql: str, parameters: tuple = None,\n fetchone=False, fetchall=False, commit=False):\n \"\"\"\n executes sql code containing in the sql variable\n \"\"\"\n if not parameters:\n parameters = tuple()\n\n connection = self.connection\n # connection.set_trace_callback(logger)\n\n cursor = connection.cursor()\n cursor.execute(sql, parameters)\n data = None\n if commit:\n connection.commit()\n if fetchall:\n data = cursor.fetchall()\n if fetchone:\n data = cursor.fetchone()\n connection.close()\n\n return data\n\n def create_users_ig(self):\n \"\"\"\n creates a table if it does not exists yet\n \"\"\"\n sql = '''\n CREATE TABLE Users_IG (\n id integer NOT NULL,\n tg_id VARCHAR NOT NULL,\n tg_username VARCHAR(255) NOT NULL,\n login VARCHAR(255) NOT NULL,\n status integer,\n purchase_ending DATE,\n proxy VARCHAR(255) NOT NULL,\n PRIMARY KEY (id)\n );'''\n self.execute(sql, commit=True)\n\n def create_trial_ig(self):\n \"\"\"\n creates a table for trial subscription if it does not exists yet\n \"\"\"\n sql = '''\n CREATE TABLE Trial_IG (\n id integer NOT NULL,\n tg_id integer NOT NULL,\n login VARCHAR(255) NOT NULL,\n PRIMARY KEY (id)\n );'''\n self.execute(sql, commit=True)\n\n def ig_add_user(self, tg_id: str, tg_username: str, login: str):\n \"\"\"\n adds user into database\n \"\"\"\n # from IgSide.dataIns import proxies_data\n sql = 'INSERT INTO Users_IG(tg_id, tg_username, login, status, purchase_ending, proxy) VALUES(?, ?, ?, ?, ?, ?)'\n # proxy = proxies_data[random.randrange(len(proxies_data))]\n parameters = (tg_id, tg_username, login, 0, 0, '104.227.99.250:8000')\n if not self.ig_select_user(tg_id, login):\n self.execute(sql, parameters=parameters, commit=True)\n else:\n return 'Вы уже регистрировали этот аккаунт'\n\n def ig_select_user(self, tg_id, login):\n \"\"\"\n checks whether the user exists and returns one's data\n \"\"\"\n sql = 'SELECT * FROM Users_IG WHERE tg_id = ? AND login = ?'\n user_row = self.execute(sql, parameters=(tg_id, login), fetchall=True)\n if user_row:\n user = list(itertools.chain.from_iterable(user_row))\n return user\n else:\n return None\n\n def update_membership(self, login):\n \"\"\"\n makes subscription value to be equal 1 to be returned as true on subscription check\n \"\"\"\n days = datetime.date.today()\n delta = datetime.timedelta(days=30)\n sql = 'UPDATE Users_IG SET status= ?, purchase_ending = ? WHERE login = ?'\n\n self.execute(sql, parameters=(1, days+delta, login), commit=True)\n\n def check_accounts(self, tg_id, tg_username):\n \"\"\"\n returns a list of accounts(login + purchased date) that belong to this tg user. Returns None if there isnt any\n \"\"\"\n sql = 'SELECT login, purchase_ending FROM Users_IG WHERE tg_id = ? AND tg_username = ?'\n users_row = self.execute(sql, parameters=(tg_id, tg_username), fetchall=True)\n if users_row:\n users = list(itertools.chain.from_iterable(users_row))\n return users\n else:\n return None\n\n def check_membership(self, tg_id, login):\n \"\"\"\n returns True or False regarding to the proceed payment\n \"\"\"\n tg_id = str(tg_id)\n sql = 'SELECT status FROM Users_IG WHERE tg_id = ? AND login = ?'\n status_row = self.execute(sql, parameters=(tg_id, login), fetchone=True)\n if status_row:\n status = status_row[0] # needs to be tested, some requests were returning invalid data\n return status\n else:\n return None\n\n def minus_membership(self, login):\n \"\"\"\n makes the value of status to be equal 0\n \"\"\"\n sql = 'UPDATE Users_IG SET status= ? WHERE login = ?'\n self.execute(sql, parameters=(0, login), commit=True)\n\n def trial_select_user(self, tg_id, login):\n \"\"\"\n checks whether the user has ran the trial version already and returns the one's data on success\n \"\"\"\n tg_id = str(tg_id)\n sql = 'SELECT * FROM Trial_IG WHERE tg_id = ? AND login = ?'\n users_row = self.execute(sql, parameters=(tg_id, login), fetchall=True)\n if users_row:\n users = list(itertools.chain.from_iterable(users_row))\n return users\n else:\n return None\n\n def trial_add_user(self, tg_id: str, login: str):\n \"\"\"\n adds user into trial_users database\n \"\"\"\n sql = 'INSERT INTO Trial_IG(tg_id, login) VALUES(?, ?)'\n parameters = (tg_id, login)\n if not self.trial_select_user(tg_id, login):\n self.execute(sql, parameters=parameters, commit=True)\n else:\n return 'Вы уже регистрировали этот аккаунт'\n\n def global_check(self):\n \"\"\"\n Suppose to check db every day on whether the subscription's still valid\n Could also add a notifying directly to the user to make him prolong the membership\n \"\"\"\n days = datetime.date.today()\n sql = 'SELECT tg_id, login FROM Users_IG WHERE purchase_ending = ?'\n parameters = (days,)\n users_row = self.execute(sql, parameters=parameters, commit=True)\n if users_row:\n users = list(itertools.chain.from_iterable(users_row))\n for user in users:\n print(user)\n\n\n# def logger(statement):\n# \"\"\"\n# provides stdout logging to simply watch the process\n# \"\"\"\n# print(f\"\"\"\n#\n#\n#\n# Executing:\n# {statement}\n#\n#\n# \"\"\")\n","repo_name":"alice-luc/TeleIG_Bot","sub_path":"utils/db_api/sqlite.py","file_name":"sqlite.py","file_ext":"py","file_size_in_byte":6508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36136952749","text":"from sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport tensorflow as tf\n\nfrom .utils import get_feature_combinations\nfrom .data import prep_training_data\n\n\ndef all_nn(processed_df, additional_factors):\n \"\"\"Use neural network with given processed dataframe object and additional factors to predict\n\n Args:\n processed_df (DataFrame): a DataFrame object with well processed data\n additional_factors (list): a list of feature combination that we want to use\n\n Returns:\n list: a list of prediction results\n\n \"\"\"\n all_rmse = []\n all_combinations = get_feature_combinations(additional_factors)\n\n for comb in all_combinations:\n cur_df = processed_df[comb]\n X, y = prep_training_data(cur_df, 10)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Dense(16, activation='relu'))\n model.add(tf.keras.layers.Dense(8, activation='relu'))\n model.add(tf.keras.layers.Dense(1, activation='relu'))\n model.compile(optimizer=\"Adam\", loss=\"mse\")\n\n model.fit(X_train, y_train, epochs=100, verbose=0)\n\n y_pred = model.predict(X_test, verbose=0)\n rmse = mean_squared_error(y_test, y_pred, squared=False)\n all_rmse.append([comb, rmse])\n all_rmse = sorted(all_rmse, key=lambda x: x[1])\n print(*all_rmse, sep=\"\\n\")\n return all_rmse\n","repo_name":"ZiyiXia/CryptoRL","sub_path":"cryptorl/predict_NN.py","file_name":"predict_NN.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"34782177369","text":"from pynput import keyboard\nimport redis\n\nnotes = {\"C4\": 261.63, \"D4\": 293.66, \"E4\": 329.63, \"F4\": 349.23, \"G4\": 392, \"A4\": 440, \"B4\": 493.88}\n\nclient = redis.Redis(host = \"127.0.0.1\", port = 6379, decode_responses = True)\n\nkey_heald = '-1'\n\ndef on_press(key):\n global key_heald\n k = '0'\n try:\n k = key.char\n except:\n pass\n if key_heald != '-1':\n return\n elif k in ['a', 'b', 'c', 'd', 'e', 'f', 'g']:\n key_heald = k\n note = str(k.upper() + \"4\")\n client.publish(\"square\", notes[note])\n\ndef on_release(key):\n global key_heald\n key_heald = '-1'\n client.publish(\"square\", \"-1\")\n\nlistener = keyboard.Listener(on_press=on_press, on_release=on_release)\nlistener.start() # start to listen on a separate thread\nlistener.join()","repo_name":"Emergency-Paperclip/python","sub_path":"keypress.py","file_name":"keypress.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11665562764","text":"import numpy as np \nimport matplotlib.pyplot as plt\nfrom numpy import genfromtxt\nfrom simpson import *\n\ntabel = genfromtxt('hydrostatics_table.csv',delimiter = ',')\narea_stns = tabel[0:,0:14]\ntabel = tabel/1000\nvalue = tabel[1:,1:]\nw_lines = tabel[0,1:]\nj=0\nh = tabel[0][2] - tabel[0][1]\nfor i in range(0, value.shape[0]):\n\tfor j in range(0, 13):\n\t\tv = value[i]\n\t\tarea_stns[i+1][j+1] = simpson_3(v, j, h)\n\nnp.savetxt('area_stns.csv', np.transpose(area_stns), delimiter = ',')\n","repo_name":"rushyam/ship","sub_path":"area_stns.py","file_name":"area_stns.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8179263582","text":"import torch.nn as nn\nfrom datasets.molecule.constants import num_fg_types, num_atom_types\nfrom datasets.protein.constants import num_aa_types\nimport torch \nfrom .gvpnet import GVPNet\nfrom .transition import PositionTransition, PLTypeTransition\nimport torch.nn.functional as F\nfrom .loss import *\nimport functools\nfrom tqdm.auto import tqdm\n\nnum_total_type = num_fg_types + num_aa_types + num_atom_types\n\nclass LinkerDPM(nn.Module):\n\n def __init__(\n self, \n node_feat_dim, \n pair_feat_dim, \n num_steps, \n eps_net_opt={'type_out_num': num_total_type}, \n trans_pos_opt={}, \n trans_type_opt={\n 'min_type_num':num_aa_types, \n 'max_type_num':num_aa_types + num_atom_types, \n 'num_classes': num_total_type\n },\n position_mean=[0.0, 0.0, 0.0],\n position_scale=10.0,\n ):\n\n super().__init__()\n self.eps_net = GVPNet(node_feat_dim, pair_feat_dim, **eps_net_opt)\n self.num_steps = num_steps\n self.trans_pos = PositionTransition(num_steps, **trans_pos_opt)\n self.trans_type = PLTypeTransition(num_steps, **trans_type_opt)\n\n self.register_buffer('position_mean', torch.FloatTensor(position_mean).view(1, 1, -1))\n self.register_buffer('position_scale', torch.FloatTensor([position_scale]).view(1, 1, -1))\n self.register_buffer('_dummy', torch.empty([0, ]))\n\n def _normalize_position(self, p):\n p_norm = (p - self.position_mean) / self.position_scale\n return p_norm\n\n def _unnormalize_position(self, p_norm):\n p = p_norm * self.position_scale + self.position_mean\n return p\n\n def forward(\n self, \n p_0, \n s_0, \n mask_generate, \n mask_sample, \n linker_feat=None, \n pair_feat=None, \n denoise_structure=True, \n denoise_type=True, \n t=None\n ):\n\n batch_size = s_0.shape[0]\n\n if t is None:\n t = torch.randint(0, self.num_steps, (batch_size,), dtype=torch.long, device=self._dummy.device)\n elif len(t.shape) == 0:\n t = t.repeat(batch_size)\n\n p_0 = self._normalize_position(p_0)\n\n if denoise_structure:\n p_noisy, eps_p, pos_mask = self.trans_pos.add_noise(p_0, mask_generate, t)\n else:\n p_noisy = p_0.clone()\n eps_p = torch.zeros_like(p_noisy)\n\n if denoise_type:\n # Add noise to sequence\n s_0_ignore, s_noisy, type_mask = self.trans_type.add_noise(s_0, mask_generate, t)\n else:\n s_noisy = s_0.clone()\n\n beta = self.trans_pos.var_sched.betas[t]\n eps_p_pred, c_denoised = self.eps_net(\n p_noisy, s_noisy, beta, mask_generate, mask_sample, linker_feat, pair_feat\n ) # (N, L, 3), (N, L, 3, 3), (N, L, 3), (N, L, 20), (N, L)\n\n loss_dict = {}\n\n # Position loss\n loss_pos = F.mse_loss(eps_p_pred, eps_p, reduction='none').sum(dim=-1) # (N, L)\n loss_pos = (loss_pos * pos_mask).sum() / (pos_mask.sum().float() + 1e-8)\n loss_dict['atom_pos'] = loss_pos\n\n # Sequence categorical loss\n c_denoised = self.trans_type.before_softmax(c_denoised)\n loss_type = seq_cross_entropy(c_denoised, s_0_ignore)\n loss_type = (loss_type * type_mask).sum() / (type_mask.sum().float() + 1e-8)\n loss_dict['atom_type'] = loss_type\n\n return loss_dict\n \n @torch.no_grad()\n def sample(\n self, \n p, s, \n mask_generate, mask_sample, \n sample_structure=True, \n sample_type=True,\n pbar=False,\n ):\n \"\"\"\n Args:\n p: Positions of contextual residues, (N, L, 3).\n s: Sequence of contextual residues, (N, L).\n \"\"\"\n N, L = p.shape[:2]\n p = self._normalize_position(p)\n\n # Set the position of residues to be predicted to random values\n if sample_structure:\n p_rand = torch.randn_like(p)\n p_init = torch.where(mask_generate[:, :, None].expand_as(p), p_rand, p)\n else:\n p_init = p\n\n if sample_type:\n s_abosrb = torch.full_like(s, fill_value=num_total_type)\n s_init = torch.where(mask_generate, s_abosrb, s)\n else:\n s_init = s\n\n traj = {self.num_steps: (self._unnormalize_position(p_init), s_init)}\n if pbar:\n pbar = functools.partial(tqdm, total=self.num_steps, desc='Sampling')\n else:\n pbar = lambda x: x\n for t in pbar(range(self.num_steps, 0, -1)):\n p_t, s_t = traj[t]\n p_t = self._normalize_position(p_t)\n \n beta = self.trans_pos.var_sched.betas[t].expand([N, ])\n t_tensor = torch.full([N, ], fill_value=t, dtype=torch.long, device=self._dummy.device)\n\n eps_p, c_denoised = self.eps_net(\n p_t, s_t, beta, mask_generate, mask_sample\n ) # (N, L, 3), (N, L, 3, 3), (N, L, 3)\n\n p_next = self.trans_pos.denoise(p_t, eps_p, mask_generate, t_tensor)\n c_denoised = self.trans_type.before_softmax(c_denoised)\n s_next = self.trans_type.denoise(s_t, c_denoised, mask_generate, t_tensor)\n\n if not sample_structure:\n v_next, p_next = p_t\n if not sample_type:\n s_next = s_t\n\n traj[t-1] = (self._unnormalize_position(p_next), s_next)\n traj[t] = tuple(x.cpu() for x in traj[t]) # Move previous states to cpu memory.\n (final_pos, final_s) = traj[0]\n return (final_pos, final_s), traj","repo_name":"yanliang3612/D3FG","sub_path":"models/diffusion/linkerdpm.py","file_name":"linkerdpm.py","file_ext":"py","file_size_in_byte":5619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70417373906","text":"# -*- coding: utf-8 -*-\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom openerp import api, models\n\n\nclass StockMove(models.Model):\n _inherit = 'stock.move'\n\n @api.multi\n def write(self, vals):\n res = super(StockMove, self).write(vals)\n if res:\n Location = self.env['stock.location']\n ps_locations = Location.get_nuvemshop_stock_locations()\n products = self.filtered(\n lambda x: (x.location_id | x.location_dest_id) &\n ps_locations).mapped('product_id')\n # outgoing_qty is calculated and still has incorrect value in cache\n self.invalidate_cache()\n products.update_nuvemshop_qty()\n return res\n","repo_name":"kmee/connector-nuvemshop","sub_path":"connector_nuvemshop/models/stock_move/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29435105582","text":"from facenet_pytorch import MTCNN\nimport cv2\nimport os\nimport sys\nimport torch\nimport argparse\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\ndef run(opt):\n \n \n cls_name = opt.cls_name\n size = opt.size\n\n save_path = os.path.join(opt.save_path, cls_name)\n if os.path.isdir(save_path) == False:\n os.makedirs(save_path)\n count = len(os.listdir(save_path))\n\n mtcnn = MTCNN(device=device)\n cap = cv2.VideoCapture(0)\n\n while True:\n ret, frame = cap.read()\n if not ret:\n cap.release()\n break\n\n faces, _ = mtcnn.detect(frame[:, :, [2,1,0]])\n if not isinstance(faces, type(None)):\n areas = list(map(lambda x: (x[2]-x[0]) * (x[3]-x[1]), faces))\n for i, face in enumerate(faces):\n x1, y1, x2, y2 = list(map(int, face))\n if i == areas.index(max(areas)):\n try:\n filename = os.path.join(save_path, f\"{cls_name}_{count}.jpg\")\n count += 1\n cv2.imwrite(filename, cv2.resize(frame[y1:y2, x1:x2, :], (size, size), interpolation=cv2.INTER_CUBIC))\n except:\n pass\n cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255))\n\n cv2.imshow(\"frame\", frame)\n key = cv2.waitKey(1)\n if key == ord(\"q\"):\n break\n\n\ndef parse():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--save_path\", type=str, default=\"./dataset\")\n parser.add_argument(\"--cls_name\", type=str, default=\"other\")\n parser.add_argument(\"--size\", type=int, default=256)\n\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n opt = parse()\n run(opt)","repo_name":"Choi-Seungho/Driver-Face-Identification","sub_path":"data_generator.py","file_name":"data_generator.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70216588306","text":"import setuptools\nimport platform\nfrom os import walk, path\n\nos_type = platform.system()\nif os_type == 'Linux':\n os_name = 'POSIX :: Linux'\nelif os_type == 'Darwin':\n os_name = 'MacOS'\nelif os_type == 'Windows':\n os_name = 'Microsoft :: Windows'\nelse:\n raise NotImplementedError('current platform {} not supported'.format(os_type))\n\ndata_files = [('bin', ['node-{}-x64/bin/node'.format(os_type.lower())])]\nif os_type == 'Windows':\n data_files = [('.\\Scripts', ['node-{}/node.exe'.format(os_type.lower())])]\n\nfor (dirpath, dirnames, filenames) in walk('./nni'):\n files = [path.normpath(path.join(dirpath, filename)) for filename in filenames]\n data_files.append((path.normpath(dirpath), files))\n\nwith open('../../README.md', 'r') as fh:\n long_description = fh.read()\n\nsetuptools.setup(\n name = 'nni',\n version = '999.0.0-developing',\n author = 'Microsoft NNI team',\n author_email = 'nni@microsoft.com',\n description = 'Neural Network Intelligence package',\n long_description = long_description,\n long_description_content_type = 'text/markdown',\n license = 'MIT',\n url = 'https://github.com/Microsoft/nni',\n packages = setuptools.find_packages('../../tools') + setuptools.find_packages('../../src/sdk/pynni', exclude=['tests']),\n package_dir = {\n 'nni_annotation': '../../tools/nni_annotation',\n 'nni_cmd': '../../tools/nni_cmd',\n 'nni_trial_tool': '../../tools/nni_trial_tool',\n 'nni_gpu_tool': '../../tools/nni_gpu_tool',\n 'nni': '../../src/sdk/pynni/nni'\n },\n package_data = {'nni': ['**/requirements.txt']},\n python_requires = '>=3.5',\n install_requires = [\n 'schema',\n 'ruamel.yaml',\n 'psutil',\n 'requests',\n 'astor',\n 'PythonWebHDFS',\n 'hyperopt',\n 'json_tricks',\n 'numpy',\n 'scipy',\n 'coverage',\n 'colorama',\n 'sklearn'\n ],\n classifiers = [\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: ' + os_name\n ],\n data_files = data_files,\n entry_points = {\n 'console_scripts' : [\n 'nnictl = nni_cmd.nnictl:parse_args'\n ]\n }\n)\n","repo_name":"xieydd/nni","sub_path":"deployment/pypi/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71322871185","text":"from typing import List\n\n\nclass Solution:\n # https://leetcode.com/problems/house-robber/\n\n def rob(self, nums: List[int]) -> int:\n size = len(nums)\n if size == 0:\n return 0\n\n # dp[i]:考虑下标i(包括i)以内的房屋,最多可以偷窃的金额为dp[i]\n dp = [0] * size\n\n # base\n if size >= 1:\n dp[0] = nums[0]\n if size >= 2:\n dp[1] = max(nums[1], dp[0])\n\n for i in range(2, size):\n dp[i] = max(\n dp[i - 2] + nums[i], # 搶劫這家\n dp[i - 1] # 不搶劫\n )\n\n # print(dp)\n return dp[-1]\n\n\nif __name__ == '__main__':\n print(Solution().rob([1, 2, 3, 1]))\n print(Solution().rob([2, 1, 1, 2]))\n","repo_name":"KScaesar/DSA-python","sub_path":"Blind Curated 75/0198. House Robber.py","file_name":"0198. House Robber.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9471792444","text":"from django.shortcuts import render,redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Category,Expenses\nfrom django.contrib import messages\nfrom django.core.paginator import Paginator\nimport json\nfrom django.http import JsonResponse,HttpResponse\nfrom userpreferences.models import UserPreference\nfrom datetime import datetime,timedelta\nimport csv\nimport xlwt\nimport pdfkit\n# Create your views here.\n# The @login prevents the user to go back to the home page after \n# being logged out\n@login_required(login_url='authentication/login')\ndef expenses(request):\n categories = Category.objects.all()\n expenses = Expenses.objects.filter(owner=request.user)\n\n paginator = Paginator(expenses,10)\n page_number = request.GET.get('page')\n page_obj = Paginator.get_page(paginator,page_number)\n # currency = UserPreference.objects.filter(user = request.user).currence\n currency = ''\n context = {\n 'expenses':expenses,\n 'page_obj':page_obj,\n 'currency':currency,\n }\n\n return render(request,'expenses/index.html',context)\n\n@login_required(login_url='authentication/login')\ndef add_expense(request):\n categories = Category.objects.all()\n mydate = request.POST\n context = {\n 'categories':categories,\n 'value':mydate,\n }\n if request.method=='GET':\n return render(request,'expenses/add_expense.html',context)\n\n if request.method == 'POST':\n amount = request.POST['amount']\n description = request.POST['description']\n category=request.POST['category']\n expense_date = request.POST['expense_date']\n \n if not amount:\n messages.error(request,'Amount is required.')\n return render(request,'expenses/add_expense.html',context)\n \n if request.method == 'POST':\n if not description:\n messages.error(request,'Description is required.')\n return render(request,'expenses/add_expense.html',context)\n \n\n Expenses.objects.create(owner=request.user, amount=amount,date=expense_date,category=category,description=description)\n\n messages.success(request,'Expenses added successfully')\n\n return redirect('expenses')\n \n\n@login_required(login_url='authentication/login')\ndef edit_expense(request,id):\n expenses= Expenses.objects.get(pk=id)\n categories = Category.objects.all()\n context={\n 'expenses':expenses,\n 'values':expenses,\n 'categories':categories\n }\n if request.method =='GET':\n return render(request,'expenses/edit_expense.html',context)\n\n\n if request.method == 'POST':\n amount = request.POST['amount']\n description = request.POST['description']\n category=request.POST['category']\n expense_date = request.POST['expense_date']\n \n if not amount:\n messages.error(request,'Amount is required.')\n return render(request,'expenses/edit_expense.html',context)\n \n if request.method == 'POST':\n if not description:\n messages.error(request,'Description is required.')\n return render(request,'expenses/edit_expense.html',context)\n \n\n expenses.owner=request.user\n expenses.amount = amount\n expenses.date = expense_date\n expenses.category=category\n expenses.description=description\n \n expenses.save()\n\n\n messages.success(request,'Expense Updated successfully')\n\n return redirect('expenses')\n\n\n@login_required(login_url='authentication/login')\ndef delete_expense(request,id):\n expenses = Expenses.objects.get(pk=id)\n expenses.delete()\n messages.warning(request,'Expense deleted successfully.')\n return redirect('expenses')\n\n\n@login_required(login_url='authentication/login')\ndef search_expense(request):\n if request.method=='POST':\n search_str = json.loads(request.body).get('searchText')\n\n expenses = Expenses.objects.filter(amount__istartswith=search_str,owner=request.user) | Expenses.objects.filter(date__istartswith=search_str,owner=request.user) | Expenses.objects.filter(description__istartswith=search_str,owner=request.user) | Expenses.objects.filter(category__istartswith=search_str,owner=request.user)\n\n data = expenses.values()\n\n return JsonResponse(list(data), safe=False)\n\n\n\ndef expense_category_summary(request):\n today = datetime.now()\n six_months_go = today+timedelta(days=180)\n expenses = Expenses.objects.filter(owner=request.user ,date__gte = six_months_go, date__lte = today)\n\n finalrep = {}\n\n def get_category(expense):\n return expense.category\n category_list = list(set(map(get_category,expenses)))\n\n def get_expense_category_amount(category):\n amount = 0;\n filter_by_category = expenses.filter(category=category)\n\n for item in filter_by_category:\n amount += item.amount\n\n return amount\n\n for x in expenses:\n for y in category_list:\n finalrep[y]=get_expense_category_amount(y)\n\n return JsonResponse({'expense_category_data':finalrep}, safe=False)\n\n\ndef state_view(request):\n return render(request,'expenses/stats.html')\n\n\n\ndef export_csv(request):\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=Expenses'+str(datetime.now())+'.csv'\n\n\n writer = csv.writer(response)\n writer.writerow(['Amount','Description','Category','Date'])\n\n expenses = Expenses.objects.filter(owner=request.user)\n\n for expense in expenses:\n writer.writerow([expense.amount,expense.description,expense.category,expense.date])\n\n return response\n\n\ndef export_excel(request):\n response = HttpResponse(content_type='application/ms-excel')\n response['Content-Disposition'] = 'attachment; filename=Expenses'+str(datetime.now())+'.xls'\n\n\n wb = xlwt.Workbook(encoding='utf-8')\n ws=wb.add_sheet('Expenses')\n row_num = 0\n font_style=xlwt.XFStyle()\n font_style.font.bold=True\n\n columns=['Amount','Description','Category','Date']\n \n for col_num in range(len(columns)):\n ws.write(row_num,col_num,columns[col_num],font_style)\n font_style.font.bold=False\n\n rows = Expenses.objects.filter(owner=request.user).values_list('amount','description','category','date')\n\n\n for row in rows:\n row_num+=1\n\n for col_num in range(len(row)):\n ws.write(row_num,col_num,str(row[col_num]),font_style)\n\n wb.save(response)\n\n return response\n\n\n\ndef export_pdf(request):\n\n# Query the data from the model\n people = Expenses.objects.filter(owner=request.user)\n\n # Create the HTML content for the PDF\n # html = '

    Expenses

    '\n # html += ''\n # html += ''\n # for person in people:\n # html += ''.format(person.amount, person.description, person.category, person.date)\n # html += '
    AmountDescriptionCategorydate
    {}{}{}{}
    '\n context = {\n 'expenses':people,\n }\n html = render(request,'expenses/expense_pdf.html',context)\n\n # Generate the PDF from the HTML\n pdf = pdfkit.from_string(html, False)\n\n # Create the HTTP response with the PDF as an attachment\n response = HttpResponse(pdf, content_type='application/pdf')\n response['Content-Disposition'] = 'attachment; filename=Expenses.pdf'\n\n return response\n","repo_name":"Nero-Gh/DjangoProjects","sub_path":"Expense/expenses_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7477,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"1089615337","text":"import requests\nimport time, os\n\nos.environ['TZ'] = 'UTC+6'\n#time.tzset() #Linea bloqueada para funcionar en Termux (Android)\n\nclass crypto():\n #def __init__(self):\n allBooks = {}\n\ndef get_allBooks():\n response = requests.get('https://api.bitso.com/v3/ticker/')\n if response.json()['success'] :\n #crypto.allBooks = response.json()['payload']\n for x in range( len(response.json()['payload']) ):\n i = response.json()['payload'][x]['book']\n crypto.allBooks[i] = response.json()['payload'][x]\n #print(crypto.allBooks[i])\n return response.json()['success']\n\ndef search_betweenTwoBooks( a,b,index ) :\n aBooks = []\n bBooks = []\n c = \"\"\n for x in index :\n if a in x :\n aBooks.append( x )\n elif b in x:\n bBooks.append( x )\n if len(aBooks) > 0 and len(bBooks) > 0:\n for x in aBooks :\n xs0 = x.split('_')[0]\n xs1 = x.split('_')[1]\n for y in bBooks :\n ys0 = y.split('_')[0]\n ys1 = y.split('_')[1]\n if xs0 == ys0 or xs0 == ys1 :\n c = xs0\n elif xs1 == ys0 or xs1 == ys1 :\n c = xs1\n if not c == \"\" :\n return [x,y,c]\n x = y = \"NO Encontrado\"\n return [x,y,c]\n\ndef convert2(aValue,aCoinName,bCoinName):\n aCoinName = aCoinName.lower()\n bCoinName = bCoinName.lower()\n if not aCoinName == bCoinName :\n book = aCoinName+'_'+bCoinName\n try:\n bValue = aValue * float(crypto.allBooks[book]['last'])\n except:\n book = bCoinName+'_'+aCoinName\n try:\n bValue = aValue / float(crypto.allBooks[book]['last'])\n except:\n cCoinName = search_betweenTwoBooks( aCoinName,bCoinName,crypto.allBooks )[2]\n if not cCoinName == \"\" :\n print(\"*****\")\n cValue = convert2(aValue,aCoinName,cCoinName)\n bValue = convert2(cValue,cCoinName,bCoinName)\n print(\"*****\")\n else : \n print( \"No se puede hacer trading de {} a {} usando Bitso !\".format(aCoinName.upper(),bCoinName.upper()) )\n return 0\n else :\n bValue = aValue\n print(f\"{aValue} {aCoinName.upper()} = {bValue} {bCoinName.upper()} \")\n return bValue\n\ndef get_address_balance(network,address) :\n response = requests.get('https://chain.so/api/v2/get_address_balance/{}/{}'.format(network,address))\n if response.json()['status']==\"success\":\n data = response.json()['data']\n \"\"\"\n print(\"Network {} in address {} \".format(data['network'],data['address']) )\n print(\"Balance confirmado = {} \".format(data['confirmed_balance']) )\n print(\"Balance por confirmar = {} \".format(data['unconfirmed_balance']) )\n \"\"\"\n return data\n else:\n return response.json()['status']\n\ndef test():\n\tprint( \"Inicia programa {}\".format(time.strftime(\"%c\")) )\n\tconfirmed_balance = 0 #get_address_balance('BTC','14HprKtenqJWyMqT236J7i7DN5TArAiQLD')['confirmed_balance']\n\twhile get_allBooks() :\n\t\tdateInfo = crypto.allBooks['btc_mxn']['created_at']\n\t\tdateInfo = dateInfo.split('T')[0] +\" \"+ dateInfo.split('T')[1].split('+')[0]\n\t\tprint(f\"\\n*******\\n Hora de Bitso {dateInfo}\")\n\t\t#print(crypto.allBooks['btc_mxn']) #imprime el JSON obtenido\n\t\tconvert2(1,'USD','MXN')\n\t\tconvert2(1,'BTC','MXN')\n\t\tconvert2(1,'BTC','USD')\n\t\tconvert2(1,'ETH','USD')\n\t\tconvert2(1,'APE','USD')\n\t\tconvert2(1,'AAVE','USD')\n\t\t#for x in crypto.allBooks :\n\t\t # print( \"{}) 1 {} = {} {} \".format(x,x.split('_')[0].upper(),crypto.allBooks[x]['last'],x.split('_')[1].upper()) )\n\t\ttime.sleep(1)\n","repo_name":"eacg91/botCrypTrader","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":3752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13414780930","text":"#Import dependencies\nimport requests \nfrom bs4 import BeautifulSoup\nimport re\nimport pandas as pd\nimport numpy as np\nimport nltk\nfrom nltk.corpus import stopwords\nfrom textblob import Word\n\n#Define a get reviews\ndef get_reviews():\n links = [f'https://www.yelp.com/biz/mcdonalds-los-angeles-106?start={10+x*10}' for x in range(12)]\n links.insert(0,'https://www.yelp.com/biz/mcdonalds-los-angeles-106')\n regex = re.compile('raw__')\n\n\n reviews=[]\n for link in links:\n r=requests.get(link)\n soup=BeautifulSoup(r.text,'html.parser')\n results = soup.find_all('span',{'lang':'en'},class_=regex)\n reviews = [*reviews, *[result.text for result in results]]\n return reviews\n\n#Preprocess collected reviews\ndef preprocess(reviews):\n df = pd.DataFrame(np.array(reviews), columns=['review'])\n stop_words = stopwords.words('english')\n \n # Lowercase\n df['review_lower'] = df['review'].apply(lambda x: \" \".join(x.lower() for x in x.split()))\n # Strip punctuation\n df['review_nopunc'] = df['review_lower'].str.replace('[^\\w\\s]','')\n # Remove stopwords\n df['review_nostop'] = df['review_nopunc'].apply(lambda x:\" \".join(x for x in x.split() if x not in stop_words))\n # Custom stopwords list\n other_stopwords = ['one','get','go','im','2','thru','tell','says','two']\n # Remove other stop words\n df['review_noother'] = df['review_nostop'].apply(lambda x: \" \".join(x for x in x.split() if x not in stop_words))\n #Lemmatize\n df['cleaned_review'] = df['review_noother'].apply(lambda x: \" \".join(Word(word).lemmatize() for word in x.split()))\n\n return df\n \n #Calculate sentiment\n def calculate_sentiment(df):\n df['polarity'] = df['cleaned_review'].apply(lambda x:TextBlob(x).sentiment[0])\n df['subjectivity'] = df['cleaned_review'].apply(lambda x:TextBlob(x).sentiment[1])\n # return the final dataframe\n return df\n\n if __name__ == \"__main_\":\n reviews = get_reviews()\n df = preprocess(reviews)\n sentiment_df = calculate_sentiment(df)\n sentiment_df.to_csv('results.csv')\n \n\n\n","repo_name":"redjules/Web-Scraper-and-Detect-Company-Sentiment-with-BeautifulSoup","sub_path":"workflow.py","file_name":"workflow.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21923460817","text":"from prompt_toolkit import print_formatted_text as print\nfrom pyroute2.netlink.exceptions import NetlinkError\n\nfrom rosh.commands import RoshCommand\nfrom rosh.completer import link_completer\n\n\nclass RoshDisableInterfaceCommand(RoshCommand):\n description = 'disable (shutdown) an interface'\n\n def __init__(self, rosh):\n super().__init__(rosh, link_completer, min_args=1)\n\n def handler(self, cmd, *args):\n for link in self.rosh.ipr.link_lookup(ifname=args[0]):\n try:\n self.handle_link(link)\n except NetlinkError as ex:\n print(f'ERR: {ex.args[1]}')\n\n def handle_link(self, index):\n self.rosh.ipr.link('set', index=index, state='down')\n\nis_rosh_command = True\nrosh_command = RoshDisableInterfaceCommand\n","repo_name":"liske/rosh","sub_path":"rosh/commands/disable/interface/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":784,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"40789447567","text":"import subprocess\r\nimport tkinter as tk\r\nimport tkinter.ttk as ttk\r\nfrom tkinter import filedialog\r\n\r\nclass MyGUI:\r\n def __init__(self, master):\r\n self.master = master\r\n master.title(\"Fish Inference\")\r\n\r\n ##SELECT MODEL##\r\n self.load_model_button = tk.Button(master, text=\"Select model\", command=self.load_model_function)\r\n self.load_model_button.pack()\r\n\r\n\r\n ##SELECT ARCHITECTURE##\r\n self.label = tk.Label(master, text=\"Select an option:\")\r\n self.label.pack()\r\n\r\n self.script_options = [(\"Diffusion\", \"tools/diffusion/inference.py\"), (\"Hifisinger\", \"tools/hifisinger/inference.py\")]\r\n self.selected_script = tk.StringVar()\r\n self.selected_script.set(self.script_options[0][1])\r\n\r\n for option, script_path in self.script_options:\r\n script_button = tk.Radiobutton(master, text=option, variable=self.selected_script, value=script_path)\r\n script_button.pack()\r\n\r\n\r\n ##MULTISPEAKER##\r\n self.label = tk.Label(master, text=\"Speaker:\")\r\n self.label.pack()\r\n \r\n self.speakerenter = tk.Entry(master, textvariable=tk.StringVar(value=\"0\"))\r\n self.speakerenter.pack()\r\n\r\n\r\n ##KEY##\r\n self.label = tk.Label(master, text=\"Key change:\")\r\n self.label.pack()\r\n \r\n self.keyenter = tk.Entry(master, textvariable=tk.StringVar(value=\"0\"))\r\n self.keyenter.pack()\r\n\r\n\r\n ##INTERVAL##\r\n self.speed_var = tk.BooleanVar()\r\n self.speed_checkbox = tk.Checkbutton(master, text=\"Enable sampler interval\", variable=self.speed_var)\r\n self.speed_checkbox.pack()\r\n \r\n self.speedenter = tk.Entry(master, textvariable=tk.StringVar(value=\"20\"))\r\n self.speedenter.pack()\r\n\r\n\r\n ##INPUT##\r\n self.load_input_button = tk.Button(master, text=\"Select input audio\", command=self.load_input_function)\r\n self.load_input_button.pack()\r\n\r\n\r\n ##OUTPUT##\r\n self.load_export_button = tk.Button(master, text=\"Select export location\", command=self.load_export_function)\r\n self.load_export_button.pack()\r\n\r\n\r\n ##EXECUTE##\r\n self.execute_button = tk.Button(master, text=\"Inference\", command=self.execute_command)\r\n self.execute_button.pack()\r\n \r\n def load_model_function(self):\r\n global ckpt_path\r\n ckpt_path = tk.filedialog.askopenfilename(title = \"Select CKPT File\", filetypes=[(\"Checkpoint files\", \"*.ckpt\")])\r\n if ckpt_path == '':\r\n tk.messagebox.showerror(\"Error\", \"No CKPT file selected\")\r\n return\r\n global cnfg_path\r\n cnfg_path = tk.filedialog.askopenfilename(title = \"Select Config File\",filetypes=[(\"Py files\", \"*.py\")])\r\n if cnfg_path == '':\r\n tk.messagebox.showerror(\"Error\", \"No config file selected\")\r\n return\r\n \r\n def load_input_function(self):\r\n global input_path\r\n input_path = tk.filedialog.askopenfilename(title = \"Select WAV File\", filetypes=[(\"WAV files\", \"*.wav\")])\r\n if input_path == '':\r\n tk.messagebox.showerror(\"Error\", \"No WAV file selected\")\r\n return\r\n\r\n def load_export_function(self):\r\n global export_path\r\n export_path = tk.filedialog.asksaveasfilename(title = \"Select WAV File\", filetypes=[(\"WAV files\", \"*.wav\")], defaultextension=\".wav\")\r\n if export_path == '':\r\n tk.messagebox.showerror(\"Error\", \"No WAV file selected\")\r\n return\r\n\r\n def execute_command(self):\r\n speaker = self.speakerenter.get()\r\n key = self.keyenter.get()\r\n speed = self.speedenter.get()\r\n script_path = self.selected_script.get()\r\n if script_path == \"tools/diffusion/inference.py\":\r\n script_name = \"diffusion\"\r\n elif script_path == \"tools/hifisinger/inference.py\":\r\n script_name = \"hifisinger\"\r\n else:\r\n raise ValueError(\"Invalid script path\")\r\n return\r\n\r\n\r\n # Construct the command\r\n cmd = ['python', script_path, '--config', cnfg_path, '--checkpoint', ckpt_path, '--input', input_path, '--output', export_path, '--speaker', speaker, '--pitch_adjust', key]\r\n if self.speed_var.get():\r\n cmd.append('--sampler_interval')\r\n cmd.append(speed)\r\n print(' '.join(cmd))\r\n output = subprocess.check_output(cmd, universal_newlines=True)\r\n print(output)\r\n\r\nroot = tk.Tk()\r\nmy_gui = MyGUI(root)\r\nroot.mainloop()\r\n\r\n","repo_name":"agentasteriski/fish-accessories","sub_path":"gui-inference.py","file_name":"gui-inference.py","file_ext":"py","file_size_in_byte":4507,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31125420886","text":"class Node:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n \nclass BTree:\n def __init__(self):\n self.root = None\n \n def insert(self, value):\n if not self.root:\n self.root = Node(value)\n return\n q = [self.root]\n while q:\n temp: Node = q.pop(0)\n if not temp.left:\n temp.left = Node(value)\n break\n else:\n q.append(temp.left)\n if not temp.right:\n temp.right = Node(value)\n break\n else:\n q.append(temp.right)\n \n def inorderWrap(self, temp):\n if temp:\n self.inorderWrap(temp.left)\n print(temp.value, end=\" \")\n self.inorderWrap(temp.right)\n def inorder(self):\n self.inorderWrap(self.root)\n print()\n \n # sum of tree elements\n def tree_sum_wrap(self, temp):\n if temp is None:\n return 0\n else:\n left_sum = self.tree_sum_wrap(temp.left)\n right_sum = self.tree_sum_wrap(temp.right)\n return temp.value + left_sum + right_sum\n def tree_sum(self):\n x = self.tree_sum_wrap(self.root)\n print(\"sum of all elements\", x)\n \n #max in tree\n def tree_max_wrap(self, temp):\n if temp is None:\n return float(\"-inf\")\n else:\n left_max = self.tree_max_wrap(temp.left)\n right_max = self.tree_max_wrap(temp.right)\n return max(temp.value, left_max, right_max)\n def tree_max(self):\n x = self.tree_max_wrap(self.root)\n print(\"max: \", x)\n \n def tree_min_wrap(self, temp):\n if temp is None:\n return float(\"inf\")\n else:\n left_max = self.tree_min_wrap(temp.left)\n right_max = self.tree_min_wrap(temp.right)\n return min(temp.value, left_max, right_max)\n def tree_min(self):\n x = self.tree_min_wrap(self.root)\n print(\"max: \", x)\n \n def tree_height_wrap(self, temp: Node):\n if temp is None:\n return 0\n else:\n left_h = self.tree_height_wrap(temp.left)\n right_h = self.tree_height_wrap(temp.right)\n return 1 + max(left_h, right_h)\n def tree_height(self):\n x = self.tree_height_wrap(self.root)\n print(\"height: \", x)\n \n def is_exist_wrap(self, temp, value):\n if temp is None:\n return False\n else:\n in_left = self.is_exist_wrap(temp.left, value)\n in_right = self.is_exist_wrap(temp.right, value)\n return value == temp.value or in_left or in_right\n def is_exist(self, value):\n x = \"exist\" if self.is_exist_wrap(self.root, value) else \"not exist\"\n print(value, x)\n \n #reverse the tree\n def reverse_tree_wrap(self, temp):\n if temp is None:\n return\n else:\n self.reverse_tree_wrap(temp.left)\n self.reverse_tree_wrap(temp.right)\n temp.left, temp.right = temp.right, temp.left\n def reverse_tree(self):\n self.reverse_tree_wrap(self.root)\n \n \n # tree traversal\n \n def inorder_wrap(self, temp):\n if temp:\n self.inorder_wrap(temp.left)\n print(temp.value, end=\" \")\n self.inorder_wrap(temp.right)\n\n def preorder_wrap(self, temp):\n if temp:\n print(temp.value, end=\" \")\n self.preorder_wrap(temp.left)\n self.preorder_wrap(temp.right)\n def postorder_wrap(self, temp):\n if temp:\n self.postorder_wrap(temp.left)\n self.postorder_wrap(temp.rigt)\n print(temp.value, end=\" \")\n pass\n def inorder_iterative_wrap(self, temp):\n s = []\n curr: Node = temp\n while s or curr:\n if curr:\n s.append(curr)\n curr = curr.left\n else:\n curr = s.pop()\n print(curr.value, end=\" \")\n curr = curr.right\n \n def postorder_iterative_wrap():\n pass\n \n def traverse(self, ino=False, preo=False, posto=False, iter=False):\n if iter:\n if ino:\n self.inorder_iterative_wrap(self.root)\n print()\n else:\n if ino:\n self.inorder_wrap(self.root)\n print()\n elif preo:\n self.preorder_wrap(self.root)\n elif posto:\n self.postorder_wrap(self.root)\n \n \nif __name__ == \"__main__\":\n bt = BTree()\n bt.insert(1)\n bt.insert(2)\n bt.insert(3)\n bt.insert(4)\n bt.insert(5)\n bt.insert(6)\n bt.insert(7)\n bt.inorder()\n bt.tree_sum()\n bt.tree_max()\n bt.tree_min()\n bt.tree_height()\n bt.is_exist(4)\n bt.is_exist(14)\n bt.inorder()\n print(\"tree is reversed\")\n bt.reverse_tree()\n bt.inorder()\n print(\"inordr traversal\")\n bt.traverse(ino=True)\n print(\"inorder iterative traversal\")\n bt.traverse(ino=True, iter=True)\n \n","repo_name":"Vinaypatil-Ev/DataStructure-and-Algorithms","sub_path":"6.Tree/Binary Tree/Binary_Tree_.py","file_name":"Binary_Tree_.py","file_ext":"py","file_size_in_byte":5172,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14996213165","text":"# https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoDemo.ipynb\n\nfrom pycocotools.coco import COCO\nimport numpy as np\nimport skimage.io as io\nimport matplotlib.pyplot as plt\nfrom mxnet import nd, sym, gluon\nfrom mxnet.gluon import nn\n\ndataDir = '/media/cunyuan/TRANS-128/COCO'\ndataType = 'val2017'\n\nannFile = '{}/annotations_trainval2017/' \\\n 'annotations/instances_{}.json'.format(dataDir, dataType) # modify in need\n\n\n# coco=COCO(annFile)\n\n\n# # get all images containing given categories, select one at random\n# catIds = coco.getCatIds(catNms=['person','dog','skateboard']);\n# imgIds = coco.getImgIds(catIds=catIds );\n# imgIds = coco.getImgIds(imgIds = [324158])\n# img = coco.loadImgs(imgIds[np.random.randint(0,len(imgIds))])[0]\n\n\n# # load and display image\n# # I = io.imread('%s/images/%s/%s'%(dataDir,dataType,img['file_name']))\n# # use url to load image\n# I = io.imread(img['coco_url'])\n# plt.axis('off')\n# plt.imshow(I)\n# plt.show()\n\ndef iou(x, ys):\n \"\"\"\n Calculate intersection-over-union overlap\n Params:\n ----------\n x : numpy.array\n single box [xmin, ymin ,xmax, ymax]\n ys : numpy.array\n multiple box [[xmin, ymin, xmax, ymax], [...], ]\n Returns:\n -----------\n numpy.array\n [iou1, iou2, ...], size == ys.shape[0]\n \"\"\"\n if len(ys.shape) > 1:\n ixmin = np.maximum(ys[:, 0], x[0])\n iymin = np.maximum(ys[:, 1], x[1])\n ixmax = np.minimum(ys[:, 2], x[2])\n iymax = np.minimum(ys[:, 3], x[3])\n else:\n ixmin = np.maximum(ys[0], x[0])\n iymin = np.maximum(ys[1], x[1])\n ixmax = np.minimum(ys[2], x[2])\n iymax = np.minimum(ys[3], x[3])\n iw = np.maximum(ixmax - ixmin, 0.)\n ih = np.maximum(iymax - iymin, 0.)\n inters = iw * ih\n if len(ys.shape) > 1:\n uni = (x[2] - x[0]) * (x[3] - x[1]) + (ys[:, 2] - ys[:, 0]) * \\\n (ys[:, 3] - ys[:, 1]) - inters\n else:\n uni = (x[2] - x[0]) * (x[3] - x[1]) + (ys[2] - ys[0]) * \\\n (ys[3] - ys[1]) - inters\n ious = inters / uni\n if len(ys.shape) > 1:\n ious[uni < 1e-12] = 0 # in case bad boxes\n else:\n ious = ious * (uni > 1e-12)\n return ious\n\n\ndef iou_coco(lbx, rbxs):\n \"\"\"\n IOU for coco dataset.\n lbx: bbox, often for gt\n rbx: multiple bboxes.\n \"\"\"\n x = np.array([lbx[0], lbx[1], lbx[0] + lbx[2], lbx[1] + lbx[3]])\n ys = rbxs.copy() # .copy() is fatal !!!\n if len(ys.shape) > 1:\n ys[:, 2] = rbxs[:, 0] + rbxs[:, 2]\n ys[:, 3] = rbxs[:, 1] + rbxs[:, 3]\n else:\n ys[2] = rbxs[0] + rbxs[2]\n ys[3] = rbxs[1] + rbxs[3]\n ious = iou(x, ys)\n return ious\n\n\n# len(anns)\n# anns[0]['bbox'] # in [x, y, dw, dh]\n# if IOU(GT, anchor) > 0:\n# if sqrt(w*h) < a or sqrt(w*h) > b:\n# lbl(anchor) = -1\n# else:\n# lbl(anchor) = 1\n# else:\n# lbl(anchor) = 0\n\ndef affine_fmap2gt(fmap_size, input_size, gts, lthres, rthres):\n \"\"\"\n affine fmap grid to original image and calc iou with gt\n :param fmap_size: (x, y), 2-sdim size of the output image,\n size of feature map generated\n :param input_size: size of original image, e.g. 224*224\n :param gts: (batch_size, 4, num_boxes) ground truth boxes, numpy array of arrays\n :param lthres: lower threshold for iou\n :param rthres: upper threshold for iou\n :return: segmented labels for focus pixel generation\n \"\"\"\n\n out_lbl = np.zeros(fmap_size) # (b, c, y, x)\n scal_fact = (input_size[0] / fmap_size[-1], input_size[1] / fmap_size[-2])\n b, c, h, w = fmap_size # you cannot be too careful. c = 1 in default\n for eb in range(b):\n gt = gts[eb, :, :]\n for eh in range(h):\n for ew in range(w):\n affined_box = np.array([scal_fact[0] * eh, scal_fact[1] * ew, scal_fact[0], scal_fact[1]])\n affined_ious = iou_coco(affined_box, gt)\n if max(affined_ious) > 0:\n assigned_gt = gt[np.argmax(affined_ious), :] # [x, y, w, h]\n gt_area = assigned_gt[2] * assigned_gt[3]\n if gt_area > lthres and gt_area < rthres:\n out_lbl[eb, :, ew, eh] = 1.\n else:\n out_lbl[eb, :, ew, eh] = -1.\n else:\n out_lbl[eb, :, ew, eh] = 0.\n return out_lbl\n\n\ndef fmap_lossfunc(fmap, focus_lbl):\n \"\"\"\n calculate loss among feature-map generated by focus branch\n :param fmap: mxnet ndarray, feature map\n :param focus_lbl: mxnet ndarray, label map generated by affine_fmap-n-gt\n :return: loss.\n \"\"\"\n res = nd.abs(nd.sum(focus_lbl - fmap) / nd.sum(focus_lbl))\n return res\n\n\ndef lstlbl2bbox(lbls, IF_COCO=False, orig_size=None):\n \"\"\"\n convert mxnet lst file labelling to bounding box\n :param lbls: [lbl, xmin, ymin, xmax, ymax]\n :return:\n \"\"\"\n xxyy = lbls.asnumpy()[:, :, 1:] # miniminmaxmax\n ## for tuple\n # xxyy[:, 0] *= orig_size[0]\n # xxyy[:, 1] *= orig_size[1]\n # xxyy[:, 2] *= orig_size[0]\n # xxyy[:, 3] *= orig_size[1]\n xxyy *= orig_size\n if IF_COCO: # cvt to minminwidthheight\n xywh = xxyy.copy()\n xywh[:, :, 2] = -xxyy[:, :, 0] + xxyy[:, :, 2]\n xywh[:, :, 3] = -xxyy[:, :, 1] + xxyy[:, :, 3]\n return xywh\n else:\n return xxyy\n\n\ndef test():\n input_size = [640, 640]\n\n fmap = nd.random.normal(0, 1, shape=(16, 16))\n\n fmap_size = (fmap.shape[-1], fmap.shape[-2])\n gts = np.array([[100, 110, 10, 10], [300, 500, 100, 100]])\n lthres, rthres = 9 ** 2, 64 ** 2\n lbls = affine_fmap2gt(fmap_size, input_size, gts, lthres, rthres)\n\n fmap_loss = fmap_lossfunc(fmap, nd.array(lbls))\n\n plt.imshow(lbls);\n plt.show()\n","repo_name":"pigtamer/autofocus","sub_path":"utils/coco_af.py","file_name":"coco_af.py","file_ext":"py","file_size_in_byte":5729,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"28074256040","text":"import random\nimport math\nimport game_framework\nfrom BehaviorTree import BehaviorTree, SelectorNode, SequenceNode, LeafNode\nfrom bubble_destroy import Bubble_destroy\nfrom pico2d import *\nfrom boy_death import Death\n\nimport game_world\n\n\n# zombie Run Speed\nPIXEL_PER_METER = (10.0 / 0.3) # 10 pixel 30 cm\nRUN_SPEED_KMPH = 10.0 # Km / Hour\nRUN_SPEED_MPM = (RUN_SPEED_KMPH * 1000.0 / 60.0)\nRUN_SPEED_MPS = (RUN_SPEED_MPM / 60.0)\nRUN_SPEED_PPS = (RUN_SPEED_MPS * PIXEL_PER_METER)\n\n# zombie Action Speed\nTIME_PER_ACTION = 0.2\nACTION_PER_TIME = 1.0 / TIME_PER_ACTION\nFRAMES_PER_ACTION = 4\n\nanimation_names = ['Attack', 'Dead', 'Idle', 'Walk']\n\nclass Zombie:\n images = None\n font = None\n\n def __init__(self, name='NONAME', x=0, y=0, size=1):\n self.name = name\n self.x, self.y = x * PIXEL_PER_METER, y * PIXEL_PER_METER\n self.size = size\n self.images = load_image(\"resource/bubble_121.png\")\n if Zombie.font is None:\n Zombie.font = load_font('ENCR10B.TTF', 16)\n self.dir = random.random()*2*math.pi # random moving direction\n self.speed = 0\n self.timer = 1.0 # change direction every 1 sec when wandering\n self.survive_timer=500\n self.frame = 0\n self.build_behavior_tree()\n\n def __getstate__(self):\n state = {'x': self.x, 'y': self.y, 'dir': self.dir, 'name': self.name, 'size': self.size}\n return state\n\n def __setstate__(self, state):\n self.__init__()\n self.__dict__.update(state)\n\n\n def wander(self):\n self.speed = RUN_SPEED_PPS\n self.timer -= game_framework.frame_time\n if self.timer < 0:\n self.timer += 1.0\n self.dir = random.random()*2*math.pi\n\n return BehaviorTree.SUCCESS\n\n\n def find_player(self):\n boy = world_build_state.get_boy()\n distance = (boy.x - self.x)**2 + (boy.y - self.y)**2\n if distance < (PIXEL_PER_METER * 10)**2:\n self.dir = math.atan2(boy.y - self.y, boy.x - self.x)\n return BehaviorTree.SUCCESS\n else:\n self.speed = 0\n return BehaviorTree.FAIL\n\n def move_to_player(self):\n self.speed = RUN_SPEED_PPS\n return BehaviorTree.SUCCESS\n\n def build_behavior_tree(self):\n wander_node = LeafNode(\"Wander\", self.wander)\n find_player_node = LeafNode(\"Find Player\", self.find_player)\n move_to_player_node = LeafNode(\"Move to Player\", self.move_to_player)\n chase_node = SequenceNode(\"Chase\")\n chase_node.add_children(find_player_node, move_to_player_node)\n wander_chase_node = SelectorNode(\"WanderChase\")\n wander_chase_node.add_children(chase_node, wander_node)\n self.bt = BehaviorTree(wander_chase_node)\n\n\n def get_bb(self):\n return self.x - 30, self.y - 30, self.x + 30, self.y + 30\n\n def update(self):\n self.bt.run()\n #self.frame = (self.frame + FRAMES_PER_ACTION * ACTION_PER_TIME * game_framework.frame_time) % FRAMES_PER_ACTION\n self.x += self.speed * math.cos(self.dir)* game_framework.frame_time\n self.y += self.speed * math.sin(self.dir)* game_framework.frame_time\n self.x = clamp(50, self.x, get_canvas_width() - 50)\n self.y = clamp(50, self.y, get_canvas_height() - 50)\n self.survive_timer-=1\n\n if self.survive_timer==0 :\n global bubble_destroys\n bubble_destroys=Bubble_destroy(self.x,self.y)\n game_world.add_object(bubble_destroys,4)\n game_world.remove_object(self)\n\n bubble_destroyList = game_world.get_layer(4)\n boyList = game_world.get_layer(1)\n\n\n\n def draw(self):\n self.frame = (self.frame + FRAMES_PER_ACTION * ACTION_PER_TIME * game_framework.frame_time) % 4\n self.images.clip_draw(int(self.frame) * 40, 0, 40, 68, self.x, self.y)\n draw_rectangle(*self.get_bb())\n\n def handle_event(self, event):\n pass\n\n def collide(self,a, b):\n left_a, bottom_a, right_a, top_a = a.get_bb()\n left_b, bottom_b, right_b, top_b = b.get_bb()\n if left_a > right_b: return False\n if right_a < left_b: return False\n if top_a < bottom_b: return False\n if bottom_a > top_b: return False\n return True\n","repo_name":"tlfqk13/2DGP_TermProject","sub_path":"zombie.py","file_name":"zombie.py","file_ext":"py","file_size_in_byte":4252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1577166929","text":"from detectron2.data import DatasetMapper\nfrom detectron2.data import detection_utils as utils\n# from detectron2.data import transforms as T\n\nfrom .. import transforms as T\n\n\n__all__ = [\"RGBADatasetMapper\"]\n\n\ndef build_rgba_augmentation(cfg, is_train):\n \"\"\"\n Create a list of default :class:`Augmentation` from config.\n Now it includes resizing and flipping.\n\n Returns:\n list[Augmentation]\n \"\"\"\n if is_train:\n min_size = cfg.INPUT.MIN_SIZE_TRAIN\n max_size = cfg.INPUT.MAX_SIZE_TRAIN\n sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING\n else:\n min_size = cfg.INPUT.MIN_SIZE_TEST\n max_size = cfg.INPUT.MAX_SIZE_TEST\n sample_style = \"choice\"\n augmentation = [T.ResizeShortestEdgeRGBA(min_size, max_size, sample_style)]\n if is_train and cfg.INPUT.RANDOM_FLIP != \"none\":\n augmentation.append(\n T.RandomFlip(\n horizontal=cfg.INPUT.RANDOM_FLIP == \"horizontal\",\n vertical=cfg.INPUT.RANDOM_FLIP == \"vertical\",\n )\n )\n return augmentation\n\n\nclass RGBADatasetMapper(DatasetMapper):\n @classmethod\n def from_config(cls, cfg, is_train: bool = False):\n augs = build_rgba_augmentation(cfg, is_train)\n if cfg.INPUT.CROP.ENABLED and is_train:\n augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))\n recompute_boxes = cfg.MODEL.MASK_ON\n else:\n recompute_boxes = False\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n \"use_instance_mask\": cfg.MODEL.MASK_ON,\n \"instance_mask_format\": cfg.INPUT.MASK_FORMAT,\n \"use_keypoint\": cfg.MODEL.KEYPOINT_ON,\n \"recompute_boxes\": recompute_boxes,\n }\n\n if cfg.MODEL.KEYPOINT_ON:\n ret[\"keypoint_hflip_indices\"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)\n\n if cfg.MODEL.LOAD_PROPOSALS:\n ret[\"precomputed_proposal_topk\"] = (\n cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN\n if is_train\n else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST\n )\n return ret","repo_name":"tydpan/OpenPartSeg","sub_path":"mask2former/data/dataset_mappers/rgba_dataset_mapper.py","file_name":"rgba_dataset_mapper.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"69986566545","text":"from .schedule_dev_tow import sched_dev_tow\nfrom .schedule_dev_deck import sched_dev_deck\n\nimport logging\nmodule_logger = logging.getLogger(__name__)\n\ndef sched_dev(seq, ind_sol, install, log_phase, site, entry_point, device, sub_device,\n layout, sched_sol):\n \"\"\"sched_dev determines the duration of each individual logistic operations\n for the installtion of ocean energy devices following a common methodology:\n - the time value duration can be extracted from a direct average\n default value\n - the time value duration can result from a specialized function\n - the time value duration can be derived from other sources, mostly by\n making use of values available in the database or provided from the\n end-user\n Parameters\n ----------\n seq: integer\n index of the operation sequencing strategy under consideration\n ind_sol: integer\n index representing the feasible logistic solution under consideration\n log_phase: class\n class containing all data relevant to the characterization of the feasible\n logistic solutions\n user_inputs : dict\n dictionnary containing all required inputs to WP5 coming from WP1/end-user.\n ...\n\n Returns\n -------\n sched_sol : dict\n ...\n \"\"\"\n\n \"\"\"\n Time assessment for the installation of ocean energy devices\n \"\"\"\n# \"\"\"\n# On-deck device transportation\n# \"\"\"\n if log_phase.op_ve[seq].description == 'On-deck transportation':\n sched_sol = sched_dev_deck(seq, ind_sol, install, log_phase, site, entry_point,\n device, sub_device, layout, sched_sol)\n# \"\"\"\n# Towing device transportation\n# \"\"\"\n elif log_phase.op_ve[seq].description == 'Towing transportation':\n sched_sol = sched_dev_tow(seq, ind_sol, install, log_phase, site, entry_point,\n device, sub_device, layout, sched_sol) \n else:\n \n msg = (\"Unknow device transportation method: {}. Only 'On-deck \"\n \"transportation' or 'Towing transportation' accepted.\".format(\n log_phase.op_ve[seq].description))\n module_logger.warning(msg)\n\n return sched_sol\n","repo_name":"DTOcean/dtocean-logistics","sub_path":"dtocean_logistics/performance/schedule/install/install_dev/schedule_dev.py","file_name":"schedule_dev.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"14122635602","text":"# export FLASK_ENV=development\n# flask run\n\nfrom flask import Flask, g, render_template, request\n\nimport sqlite3, string\n\napp = Flask(__name__)\n\n@app.route('/')\ndef main():\n\treturn render_template(\"main.html\")\n\n@app.route('/submit/', methods=['POST', 'GET'])\ndef submit():\n\tif request.method == 'GET':\n\t\treturn render_template('submit.html')\n\telif request.method == 'POST':\n\t\tinsert_message(request)\n\t\treturn render_template('submit.html')\n\telse:\n\t\ttry:\n\t\t\treturn render_template('submit.html', message=request.form['message'], name=request.form['name'])\n\t\texcept:\n\t\t\treturn render_template('submit.html')\n\n@app.route('/view/', methods=['POST', 'GET'])\ndef view():\n\treturn render_template('view.html', results = random_messages(3))\n\n# create database for message\ndef get_message_db():\n\t#check whether the database message_db exists\n\tif 'message_db' not in g:\n\t\t# if not connect to the databse\n\t\tg.message_db = sqlite3.connect(\"messages_db.sqlite\")\n\t\tc = g.message_db.cursor()\n\t\tc.execute('''CREATE TABLE IF NOT EXISTS messages (id integer, handle text, message text)''')\n\t\t#commit the changes to db\n\t\tg.message_db.commit()\n\n\treturn g.message_db\n\ndef insert_message(request):\n\tmessage = request.form['message']\n\tname = request.form['name']\n\tdb = get_message_db()\n\tcursor = db.cursor()\n\tcursor.execute(\"select count(*) from messages\")\n\tcount = cursor.fetchone()[0] + 1\n\tdb.commit()\n\tdb.execute(\n\t\t\t\t'INSERT INTO messages (id, handle, message) VALUES (?, ?, ?)',\n\t\t\t\t(count, name, message)\n\t)\n\tdb.commit()\n\treturn message, name\n\ndef random_messages(n):\n\tdb = get_message_db()\n\tcursor = db.cursor()\n\tcursor.execute(\"select count(*) from messages\")\n\tcount = cursor.fetchone()[0]\n\tn = min(count, n)\n\tcommand = \"SELECT * FROM messages ORDER BY RANDOM() LIMIT \" + str(n)\n\tcursor.execute(command)\n\ts = \"\"\n\tfor row in cursor:\n\t\ts = s + row[2] + \"
    \" + \"- \"+ row[1] + \"
    \" + \"
    \"\n\t#close the connection\n\tg.message_db.close()\n\treturn s\n\n","repo_name":"lchen777-cl/Blog-Post-3","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37539015804","text":"\nfrom dash import Dash, dash_table\nimport pandas as pd\nimport numpy as np\nimport pandas as pd\nfrom dash import Dash, dcc, html, Input, Output\n\napp = Dash(__name__)\n\ndf = pd.DataFrame({\n 'student_id' : range(1, 11),\n 'score' : [1, 5, 2, 5, 2, 3, 1, 5, 1, 5]\n})\n\napp.layout = html.Div([\n dcc.Dropdown(list(range(1, 6)), 1, id='score'),\n\t'was scored by this many students:',\n\thtml.Div(id='output'),\n])\n\n@app.callback(Output('output', 'children'), Input('score', 'value'))\ndef update_output(value):\n\tfiltered_df = df[df['score'] == value]\n\treturn len(filtered_df)\n\nif __name__ == \"__main__\":\n app.run_server(debug=True)","repo_name":"abubakar12/Plotly_dash_gothrough","sub_path":"call_back_basics.py","file_name":"call_back_basics.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33194609310","text":"\"\"\"This code returns the pair of employees that have the longest common\nworking time across different projects. Pandas is used to read the .txt file\nand format it. It is then exported to a .CSV file (which is better for data\nmanagement). After that with the use of default dictionaries the needed data is\nobtained and printed as a result.\n\"\"\"\n\nimport csv\nimport pandas as pd\nimport operator\n\nfrom collections import defaultdict\nfrom itertools import combinations\nfrom datetime import datetime\n\n# global variables, using default dicts rather than normal dictionaries\nd = defaultdict(list)\npairs = defaultdict(list)\n\ndf = pd.read_csv('data.txt') # reading the text data file with pandas\n\n# replacing NULL with the value of today\ndf['DateTo'] = df['DateTo'].fillna(datetime.today().strftime('%Y-%m-%d'))\n\n# sorting by EmpID, because it will be easier to calculate the output\ndf = df.sort_values(by=['EmpID'])\n\ndf.set_index(\"EmpID\", inplace=True) # creating EmpID as the index\ndf.to_csv('new_data.csv') # exporting to a new .CSV file\n\n\nwith open(\"new_data.csv\") as f:\n next(f) # skip header\n r = csv.reader(f)\n # unpack, use projectID as key and append empID, start_date, finish_date\n for EmpID, ProjectID, FromDate, ToDate in r:\n d[int(ProjectID)].append((EmpID, FromDate, ToDate))\n\n\nfor project, aref in d.items():\n # only projects with 2 or more assigned employees\n if len(aref) >= 2:\n for ref in combinations(aref, 2):\n # using lambda operator to create an in-line function\n # mapping start and finish dates with the iterable\n start_date = max(map(lambda x: x[1], ref))\n finish_date = min(map(lambda x: x[2], ref))\n # finding the duration of a project in days\n delta = datetime.strptime(finish_date, '%Y-%m-%d') \\\n - datetime.strptime(start_date, '%Y-%m-%d')\n dd = delta.days\n if dd > 0:\n # appending to the dictionary pairs as keys with the working\n # times from different projects as values\n pairs[ref[0][0] + ' and ' + ref[1][0]].append(dd)\n # adding the working times from different projects for all\n # the possible pairs\n new_dict = {k: sum(v) for k, v in pairs.items()}\n # and finding key with highest value\n longest_working_pair = max(new_dict.items(),\n key=operator.itemgetter(1))[0]\n # highest value from the dictionary, which is the total days\n # for a single pair of employees\n max_value = max(new_dict.values())\n\nprint(\"Employees that have the longest working time together are employees \"\n \"with ID: \" + str(longest_working_pair) + \". The total working time\"\n \" of them across different projects is \" + str(max_value) + \" days\")\n\n\n","repo_name":"gerganzh/Gergan-Zhekov-employees","sub_path":"employees.py","file_name":"employees.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16714369963","text":"import albumentations as A\nimport cv2\nimport numpy as np\nimport pandas as pd\nfrom albumentations.pytorch import ToTensorV2\nfrom sklearn import preprocessing\nfrom torch.utils.data import Dataset\n\nfrom config.config import Config\n\n\nclass WhaleDataset(Dataset):\n def __init__(\n self,\n df: pd.DataFrame,\n cfg: Config,\n image_dir: str,\n val_bbox_name: str,\n data_aug: bool,\n ):\n super().__init__()\n self.index = df.index\n self.x_paths = np.array(df.image)\n self.ids = np.array(df.individual_id, dtype=int) if hasattr(df, \"individual_id\") else np.full(len(df), -1)\n self.species = np.array(df.species, dtype=int) if hasattr(df, \"species\") else np.full(len(df), -1)\n self.cfg = cfg\n self.image_dir = image_dir\n self.df = df\n self.val_bbox_name = val_bbox_name\n self.data_aug = data_aug\n augments = []\n if data_aug:\n aug = cfg.aug\n augments = [\n A.Affine(\n rotate=(-aug.rotate, aug.rotate),\n translate_percent=(0.0, aug.translate),\n shear=(-aug.shear, aug.shear),\n p=aug.p_affine,\n ),\n A.RandomResizedCrop(\n self.cfg.image_size[0],\n self.cfg.image_size[1],\n scale=(aug.crop_scale, 1.0),\n ratio=(aug.crop_l, aug.crop_r),\n ),\n A.ToGray(p=aug.p_gray),\n A.GaussianBlur(blur_limit=(3, 7), p=aug.p_blur),\n A.GaussNoise(p=aug.p_noise),\n A.Downscale(scale_min=0.5, scale_max=0.5, p=aug.p_downscale),\n A.RandomGridShuffle(grid=(2, 2), p=aug.p_shuffle),\n A.Posterize(p=aug.p_posterize),\n A.RandomBrightnessContrast(p=aug.p_bright_contrast),\n A.Cutout(p=aug.p_cutout),\n A.RandomSnow(p=aug.p_snow),\n A.RandomRain(p=aug.p_rain),\n A.HorizontalFlip(p=0.5),\n ]\n augments.append(A.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]))\n augments.append(ToTensorV2()) # HWC to CHW\n self.transform = A.Compose(augments)\n\n def __len__(self):\n return len(self.ids)\n\n def get_original_image(self, i: int):\n bgr = cv2.imread(f\"{self.image_dir}/{self.x_paths[i]}\")\n rgb = cv2.cvtColor(bgr, cv2.COLOR_BGR2RGB)\n return rgb\n\n def __getitem__(self, i: int):\n image = self.get_original_image(i)\n # crop\n if self.data_aug:\n bbox_name = np.random.choice(list(self.cfg.bboxes.keys()), p=list(self.cfg.bboxes.values()))\n else:\n bbox_name = self.val_bbox_name\n bbox = None if bbox_name == \"none\" else self.df[bbox_name].iloc[i]\n if bbox is not None:\n xmin, ymin, xmax, ymax = bbox\n image = image[ymin:ymax, xmin:xmax]\n # resize\n image = cv2.resize(image, self.cfg.image_size, interpolation=cv2.INTER_CUBIC)\n # data augmentation\n augmented = self.transform(image=image)[\"image\"]\n return {\n \"original_index\": self.index[i],\n \"image\": augmented,\n \"label\": self.ids[i],\n \"label_species\": self.species[i],\n }\n\n\ndef load_bbox(cfg: Config, in_base_dir: str, bbox_name: str, is_train: bool) -> pd.Series:\n if bbox_name == \"detic\":\n filename = \"train2.csv\" if is_train else \"test2.csv\"\n tmp_df = pd.read_csv(f\"{in_base_dir}/{filename}\")\n low_conf = pd.Series([False for _ in range(len(tmp_df))])\n bbox = tmp_df.box.map(lambda s: list(map(int, s.split())) if s == s else None)\n elif bbox_name == \"fullbody\":\n filename = \"fullbody_train.csv\" if is_train else \"fullbody_test.csv\"\n tmp_df = pd.read_csv(f\"{in_base_dir}/{filename}\")\n low_conf = tmp_df.conf.map(lambda s: float(s[1:-1]) if s == s else -1) < cfg.bbox_conf_threshold\n bbox = tmp_df.bbox.map(lambda s: list(map(int, s[2:-2].split())))\n elif bbox_name == \"fullbody_charm\":\n filename = \"fullbody_train_charm.csv\" if is_train else \"fullbody_test_charm.csv\"\n tmp_df = pd.read_csv(f\"{in_base_dir}/{filename}\")\n low_conf = tmp_df.conf.map(lambda s: float(s[1:-1]) if s == s else -1) < cfg.bbox_conf_threshold\n bbox = tmp_df.bbox.map(lambda s: list(map(int, s[2:-2].split())) if s == s else None)\n elif bbox_name == \"backfin\":\n filename = \"train_backfin.csv\" if is_train else \"test_backfin.csv\"\n tmp_df = pd.read_csv(f\"{in_base_dir}/{filename}\")\n low_conf = tmp_df.conf.map(lambda s: float(s[1:-1]) if s == s else -1) < cfg.bbox_conf_threshold\n bbox = tmp_df.bbox.map(lambda s: list(map(int, s[2:-2].split())) if s == s else None)\n else:\n raise AssertionError()\n print(f\"{bbox_name} low conf: {low_conf.sum()} / {len(tmp_df)}\")\n bbox[low_conf] = None\n return bbox\n\n\ndef load_df(in_base_dir: str, cfg: Config, filename: str, is_train: bool) -> pd.DataFrame:\n df = pd.read_csv(f\"{in_base_dir}/{filename}\")\n\n # bbox\n for bbox_name in [\"detic\", \"fullbody\", \"fullbody_charm\", \"backfin\"]:\n df[bbox_name] = load_bbox(cfg, in_base_dir, bbox_name, is_train)\n\n # label encoder\n if hasattr(df, \"individual_id\"):\n label_encoder = preprocessing.LabelEncoder()\n label_encoder.classes_ = np.load(f\"{in_base_dir}/individual_id.npy\", allow_pickle=True)\n df.individual_id = label_encoder.transform(df.individual_id)\n assert cfg.num_classes == len(label_encoder.classes_)\n if hasattr(df, \"species\"):\n df.species.replace(\n {\n \"globis\": \"short_finned_pilot_whale\",\n \"pilot_whale\": \"short_finned_pilot_whale\",\n \"kiler_whale\": \"killer_whale\",\n \"bottlenose_dolpin\": \"bottlenose_dolphin\",\n },\n inplace=True,\n ) # https://www.kaggle.com/c/happy-whale-and-dolphin/discussion/305574\n label_encoder_species = preprocessing.LabelEncoder()\n label_encoder_species.classes_ = np.load(f\"{in_base_dir}/species.npy\", allow_pickle=True)\n df.species = label_encoder_species.transform(df.species)\n assert cfg.num_species_classes == len(label_encoder_species.classes_)\n return df\n","repo_name":"knshnb/kaggle-happywhale-1st-place","sub_path":"src/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":6359,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"48"} +{"seq_id":"11962525252","text":"import builtins\nfrom unittest.mock import patch\nfrom src.buffer import Buffer, Text\nimport pytest\n\n\nclass TestBuffer:\n @pytest.fixture\n def set_buffer_list_for_test(self, mocker):\n self.data = mocker.patch.object(\n Buffer,\n \"data\",\n [\n Text(\n \"If you are smartest person in the room, then you are in the wrong room\",\n 13,\n \"Decrypted\",\n )\n ],\n )\n\n @pytest.fixture\n def set_list_empty_list_for_test(self, mocker):\n self.data = mocker.patch.object(Buffer, \"data\", [])\n\n def test_show_buffer_call_with_mock_print(self, set_buffer_list_for_test):\n with patch(\"builtins.print\") as mock_print:\n Buffer.show_buffer()\n\n mock_print.assert_called_with(\n \"1. Text(contents='If you are smartest person in the room,\"\n \" then you are in the wrong room', rot_type=13, status='Decrypted')\\n\"\n )\n\n def test_show_buffer_with_set_buffer_list_for_test(self, set_buffer_list_for_test):\n assert len(self.data) == 1\n\n def test_add_func_by_append_str_to_set_buffer_list_for_test_logic(\n self, set_buffer_list_for_test\n ):\n Buffer.add(\n Text(\"I don't now you then you are in the wrong room\", 13, \"Decrypted\")\n )\n assert len(self.data) == 2\n\n def test_add_call(self, set_buffer_list_for_test):\n with patch.object(Buffer, \"add\") as mock_add:\n Buffer.add(\n Text(\"I don't now you then you are in the wrong room\", 13, \"Decrypted\")\n )\n\n mock_add.assert_called_once()\n\n def test_convert_with_set_buffer_list_for_test_logic(\n self, set_buffer_list_for_test\n ):\n assert Buffer.convert() == [\n {\n \"contents\": \"If you are smartest person in the room, then you are in the wrong room\",\n \"rot_type\": 13,\n \"status\": \"Decrypted\",\n }\n ]\n\n def test_load_from_dict_call_with_logic(self, set_list_empty_list_for_test):\n test_data = {\n \"data\": [\n {\n \"contents\": \"If you are smartest person in the room, then you are in the wrong room\",\n \"rot_type\": 47,\n \"status\": \"Decrypted\",\n }\n ]\n }\n with patch.object(Buffer, \"load_from_dict\") as mock_load_from_dict:\n Buffer.load_from_dict(test_data)\n\n mock_load_from_dict.assert_called_once_with(test_data)\n","repo_name":"GrzesiekKRK/Cipher","sub_path":"src/test/test_buffer.py","file_name":"test_buffer.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2103099588","text":"\"\"\"\r\n@Author : TW!\r\n\r\n@Time : 2021/5/9 18:06\r\n\"\"\"\r\nimport time\r\nfrom socket import *\r\n\r\nserverPort = 12000\r\nserverName = '192.168.233.128'\r\ncounter = 0\r\nsums = 0\r\nreceived, loss = 0, 0\r\nmaximum, minimum = 0.0, 0.0\r\nwhile counter < 10:\r\n clientSocket = socket(AF_INET, SOCK_DGRAM)\r\n message = 'ping ' + str(counter)\r\n counter += 1\r\n try:\r\n clientSocket.settimeout(1)\r\n begin = time.time()\r\n clientSocket.sendto(message.encode(), (serverName, serverPort))\r\n modifyMessage, severAddress = clientSocket.recvfrom(1024)\r\n end = time.time()\r\n rtt = end - begin\r\n maximum = max(rtt, maximum)\r\n minimum = min(rtt, minimum)\r\n sums += rtt\r\n received += 1\r\n except timeout:\r\n print('Sequence %d: Request time out' % counter)\r\n loss += 1\r\n clientSocket.close()\r\n else:\r\n print('Sequence %d: Reply from %s RTT = %.3fs' % (counter, serverName, rtt))\r\n\r\n clientSocket.close()\r\nprint('%s 的 Ping 统计信息: ' % serverName)\r\nprint('\\t数据包: 已发送 = 10,已接收 = %d,丢失 = %d (%d%% 丢失),' % (received, loss, loss / 10 * 100))\r\nprint('往返行程的估计时间(以秒为单位):')\r\nprint('\\t最短 = %.3fs, 最长 = %.3fs, 平均 = %.3fs' % (minimum, maximum, sums / received))\r\n\r\n","repo_name":"tw888666/Computer-Network","sub_path":"SocketProgramLab/lab2-UDpping/source/udpPingClient-1.py","file_name":"udpPingClient-1.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"71605386706","text":"from typing import Callable\nimport numpy as np\nimport torch\nfrom torch import optim, nn\nimport deepxde as dde\nfrom src.utils.utils import cart2pol_pt, lineseg_dists\n\nclass LFunctionBase:\n \"\"\"\n Lambda function.\n \"\"\"\n def __init__(self, X, geom) -> None:\n self.X = X\n self.geom = geom\n self.alpha = None\n\n def get_dist(self, x):\n '''\n Calculate the extended distance to x (minibatch).\n '''\n del x\n\n def get_alpha(self):\n r'''\n Calculate the parameter $\\alpha$.\n '''\n dists = self.get_dist(torch.tensor(self.X).float()).detach().cpu().numpy()\n dists_ = dists[~self.geom.on_boundary(self.X)]\n # make sure when it comes to the other nearest boundary\n # the coefficient goes down to exp(-5)\n self.alpha = 5 / np.min(dists_)\n \n def __call__(self, x):\n return torch.exp(-self.alpha * self.get_dist(x))\n\n\nclass LFunctionDisk(LFunctionBase):\n \"\"\"\n Lambda function for a 2D disk.\n \"\"\"\n def __init__(self, X, disk, inner=True) -> None:\n super().__init__(X, disk)\n self.center = torch.tensor(disk.center)\n self.radius = disk.radius\n self.inner = inner\n self.get_alpha()\n \n def get_dist(self, x):\n x = x[:, :2]\n if self.inner:\n return torch.linalg.norm(x - self.center, dim=1, keepdim=True) - self.radius\n else:\n return self.radius - torch.linalg.norm(x - self.center, dim=1, keepdim=True)\n\n\nclass LFunctionRectangle(LFunctionBase):\n \"\"\"\n Lambda function for a 2D rectangle.\n \"\"\"\n def __init__(self, X, rec, m_function: Callable) -> None:\n super().__init__(X, rec)\n self.xmin = rec.xmin\n self.xmax = rec.xmax\n self.m_function = m_function\n self.get_alpha()\n \n def get_dist(self, x):\n dist = torch.stack([\n x[:, 0] - self.xmin[0], - x[:, 0] + self.xmax[0],\n x[:, 1] - self.xmin[1], - x[:, 1] + self.xmax[1],\n ], dim=1)\n return self.m_function(dist)\n\n\nclass LFunctionOpenRectangle(LFunctionBase):\n \"\"\"\n Lambda function for a 2D (right) open rectangle.\\n\n |------------------\\n\n |\\n\n |------------------\n \"\"\"\n def __init__(self, X, rec, m_function: Callable) -> None:\n super().__init__(X, rec)\n self.xmin = rec.xmin\n self.xmax = rec.xmax\n self.m_function = m_function\n self.get_alpha()\n \n def get_dist(self, x):\n dist = torch.stack([\n x[:, 0] - self.xmin[0],\n x[:, 1] - self.xmin[1], - x[:, 1] + self.xmax[1],\n ], dim=1)\n return self.m_function(dist)\n\n\nclass LFunctionAxisLine(LFunctionBase):\n \"\"\"\n Lambda function for a line perpendicular to the axis.\n \"\"\"\n def __init__(self, X, geom, x_0, j, is_left=True) -> None:\n '''\n x_0 - intersection point\n j - axis number (start from zero)\n is_left - left or right boundary\n '''\n super().__init__(X, geom)\n self.x_0 = x_0\n self.j = j\n self.is_left = is_left\n self.get_alpha()\n \n def get_dist(self, x):\n if self.is_left:\n return x[:, self.j:self.j+1] - self.x_0\n else:\n return -x[:, self.j:self.j+1] + self.x_0\n\n\nclass DistNet(nn.Module):\n \"\"\"\n Network to produce a prediction of distance.\n \"\"\"\n def __init__(self, reference_points) -> None:\n super(DistNet, self).__init__()\n self.reference_points = [\n torch.tensor(reference_point) \n for reference_point in reference_points\n ]\n self.net = dde.nn.FNN([2 * len(reference_points)] + 3 * [30] + [1], \n \"tanh\", \"Glorot normal\")\n\n def forward(self, x) -> torch.Tensor:\n x_polars = []\n for reference_point in self.reference_points:\n delta_x = x - reference_point\n x_polars.extend(cart2pol_pt(delta_x[:, 0:1], delta_x[:, 1:]))\n x_polars = torch.cat(x_polars, dim=1)\n dist_pred = self.net(x_polars)\n return dist_pred\n\n\nclass LFunctionPolygon(LFunctionBase):\n \"\"\"\n Lambda function for a 2D polygon.\n \"\"\"\n def __init__(self, X, polygon, spatial_domain) -> None:\n super().__init__(X, polygon)\n self.polygon = polygon\n self.vertices_left = polygon.vertices\n self.vertices_right = np.roll(polygon.vertices, 1, axis=0)\n center_1 = np.mean(polygon.vertices[polygon.vertices[:,0]<0.5,:], axis=0)\n center_2 = np.mean(polygon.vertices[polygon.vertices[:,0]>=0.5,:], axis=0)\n # sample points\n eps = 0.01\n self.bbox = dde.geometry.CSGDifference(\n dde.geometry.Rectangle(\n xmin=[np.min(polygon.vertices[:, 0]) - eps, np.min(polygon.vertices[:, 1]) - eps],\n xmax=[np.max(polygon.vertices[:, 0]) + eps, np.max(polygon.vertices[:, 1]) + eps]\n ), polygon)\n self.spatial_domain = spatial_domain\n X, dists = self.sample_points(1024 * 6)\n self.model = DistNet([center_1, center_2])\n X = torch.tensor(X).float()\n dists = torch.tensor(dists).float()\n self.train(X, dists)\n self.get_alpha()\n \n def sample_points(self, n):\n points = np.concatenate((\n self.bbox.random_points(n * 5 // 6),\n self.spatial_domain.random_points(n * 1 // 6)\n )) \n dists = []\n for point in points:\n dists.append(\n [np.min(lineseg_dists(point, self.vertices_left, self.vertices_right))]\n )\n return points, np.array(dists)\n\n def loss_fn(self, dists, Y):\n loss = Y - dists\n return torch.mean(torch.abs(loss))\n\n def train(self, X, dists):\n print(\"Training extended dist for 2D polygon...\")\n n_epochs = 10000\n optimizer = optim.Adam(self.model.parameters(), lr=1e-3)\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, patience=100, factor=0.75, min_lr=1e-5\n )\n for i in range(n_epochs):\n Y = self.model(X)\n loss = self.loss_fn(dists, Y)\n # Backpropagation\n optimizer.zero_grad() \n loss.backward()\n optimizer.step()\n scheduler.step(loss.item())\n if (i+1) % 1000 == 0 or i == 0:\n print(f\"[Epoch {i+1}/{n_epochs}] loss: {loss.item():>7f}\")\n # test\n X, dists = self.sample_points(1024)\n Y = self.get_dist(torch.tensor(X).float()).detach().cpu().numpy()\n print(f\"Finish training!\\nTesting loss: {np.mean(np.abs(Y - dists)):>7f}\")\n\n def get_dist(self, x):\n x = x[:, :2]\n return self.model(x)\n","repo_name":"csuastt/HardConstraint","sub_path":"src/HC/l_functions.py","file_name":"l_functions.py","file_ext":"py","file_size_in_byte":6699,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"33884798606","text":"i=1\r\nwhile i<=5:\r\n if i==3 :\r\n i+=1\r\n continue\r\n print('媳妇儿,我错了')\r\n i+=1\r\nelse :\r\n print('媳妇儿原谅我了')\r\n #与break不同的,只终止第三次,循环任然正常结束,故可以打印else后的代码\r\n","repo_name":"ldy-2001/Study1","sub_path":"27while...else之continue.py","file_name":"27while...else之continue.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43630313670","text":"\"\"\"\nUse bulk_xor to speed up xor calculate.\nLet f(x) = 1 ^ 2 ^ 3 ^ ... ^ x\nThen, xor all integers in the range from m to n = f(n) ^ f(m-1)\n\"\"\"\n\n\ndef answer(start, length):\n\n def bulk_xor(m, n):\n # f(x) = 1 ^ 2 ^ 3 ^ ... ^ x\n # bulk_xor(m, n) = m ^ m+1 ^ m+2 ... ^ n = f(n) ^ f(m-1)\n m -= 1\n f_m = [m, 1, m + 1, 0][m % 4]\n f_n = [n, 1, n + 1, 0][n % 4]\n return f_m ^ f_n\n\n xor = 0\n for i in range(length):\n xor ^= bulk_xor(start, start + length - i - 1)\n start += length\n\n return xor\n\n\nprint(answer(0, 3))\nprint(answer(17, 4))\n","repo_name":"oneshan/foobar","sub_path":"queue_to_do/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"2896127795","text":"from django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom django.db.models import Count, Q\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom help_pages.models import ChamadoBlog\nfrom notifications.models import Notification\n\nfrom solicitation.decorators import complete_tutorial\nfrom solicitation.forms import (RespostaUsuarioForm, SerResponsavelForm,\n SolicitacoesForm, SolicitacoesUpdateForm)\nfrom solicitation.models import (RespostaSolicitacao, RespostaUsuario,\n Solicitacoes)\n\n\n@login_required\ndef index(request):\n template_name = 'solicitation/index.html'\n re = request.session['re']\n\n dashboard = Solicitacoes.objects.only('id').filter(\n ReSolicitante=re).aggregate(\n analise=Count('id', Q(StatusAtual=2)),\n resolvidos=Count('id', Q(StatusAtual__in=(3, 4))),\n pendentes=Count('id', Q(StatusAtual__in=(1, 5)))\n )\n\n admindash = Solicitacoes.objects.only('id').all().aggregate(\n analise=Count('id', Q(StatusAtual=2)),\n resolvidos=Count('id', Q(StatusAtual__in=(3, 4))),\n pendentes=Count('id', Q(StatusAtual__in=(1, 5)))\n )\n \n request_user = request.user\n users_notification = Notification.objects.filter(to_user=request_user, user_has_seen=0, user_has_archive=0).only('id').exists() \n\n context = { \n #USER SECTION\n 'dashboard': dashboard,\n 'admindash': admindash,\n 'users_notification': users_notification\n }\n return render(request, template_name, context)\n\n@login_required\ndef make_request(request):\n template_name = 'solicitation/make_request.html'\n \n request_user = request.user\n users_notification = Notification.objects.filter(to_user=request_user, user_has_seen=0, user_has_archive=0).only('id').exists() \n\n form = SolicitacoesForm(\n request.POST or None,\n request.FILES or None\n )\n if request.method == \"POST\":\n if form.is_valid():\n data = form.save(commit=False)\n data.PostoGradSolicitante = request.POST.get('nposto')\n data.ReSolicitante = request.POST['nre']\n data.NomeGuerraSolicitante = request.POST['nnomeguerra']\n data.CpfSolicitante = request.POST['ncpf']\n data.FuncaoSolicitante = request.POST['nfuncao']\n data.CodOpmSolicitante = request.POST['nopmcod']\n data.GrandeComandoSolicitante = request.POST['nopmcod1']\n data.UnidadeSolicitante = request.POST['nopmcod2']\n data.DepartamentoSolicitante = request.POST['nopmcod3']\n data.DivisaoSolicitante = request.POST['nopmcod4']\n data.SecaoSolicitante = request.POST['nopmcod5']\n data.save()\n\n for user in User.objects.filter(is_staff=True): \n Notification.objects.create(\n TipoNotificacao_id=1, from_user=request.user, to_user_id=user.id, to_admin=1, Solicitacao_id=data.id) \n\n messages.success(\n request,\n f'Solicitação realizada com sucesso'\n )\n return redirect('solicitation:user_list_request')\n\n context = {\n 'form': form,\n 'users_notification': users_notification,\n }\n return render(request, template_name, context)\n\n@login_required\ndef details_request(request, id):\n template_name = 'solicitation/details_request.html'\n\n request_user = request.user\n users_notification = Notification.objects.filter(to_user=request_user, user_has_seen=0, user_has_archive=0).only('id').exists() \n\n username = request.user.username\n obj = Solicitacoes.objects.get(id=id)\n his = Solicitacoes.Historico.all().filter(id=id).order_by('history_date')\n\n if obj.DataDoEncerramento == None:\n tempo_atendimento = '24 Horas'\n else:\n tempo_atendimento = obj.DataDoEncerramento - obj.DataDaSolicitacao\n tempo_atendimento = str(tempo_atendimento).split(\".\")[0]\n\n respostas = RespostaSolicitacao.objects.select_related(\n 'RespostaResponsavel').select_related(\n 'Solicitacao').filter(\n Solicitacao=obj.id).order_by('RespostaData')\n\n\n respostasUsuarios = RespostaUsuario.objects.all().select_related(\n 'RespostaUsuarioResponsavel').select_related(\n 'RespostaDaSolicitacao')\n\n if request.user.is_authenticated and username != obj.CpfSolicitante:\n return redirect('solicitation:user_list_request')\n\n form_resposta_user = RespostaUsuarioForm(request.POST or None)\n if request.method == \"POST\" and 'resposta_user' in request.POST:\n resposta_user_id = int(request.POST.get('resposta_id'))\n respostaSolicitacaoid = RespostaSolicitacao.objects.get(\n id=resposta_user_id)\n\n if form_resposta_user.is_valid():\n add_resposta_user = form_resposta_user.save(commit=False)\n add_resposta_user.RespostaUsuarioResponsavel_id = request.user.id\n add_resposta_user.RespostaDaSolicitacao_id = respostaSolicitacaoid.id\n add_resposta_user.save()\n\n solicitante = obj.CpfSolicitante\n solicitante_user = User.objects.get(username=solicitante)\n resposta = RespostaUsuario.objects.last().id \n responsavel = obj.Responsavel \n \n Notification.objects.create(\n TipoNotificacao_id=8, from_user=solicitante_user, to_user=responsavel, to_admin=1, Solicitacao_id=id, RespostaUser_id=resposta)\n\n return redirect('solicitation:datails_requests', id=id)\n\n form = SerResponsavelForm(request.POST or None, instance=obj)\n if request.method == \"POST\" and 'resolvidoo' in request.POST:\n if form.is_valid():\n add_status = form.save(commit=False)\n add_status.Responsavel_id = request.user.id\n add_status.StatusAtual_id = 3\n add_status.save()\n\n solicitacao = Solicitacoes.objects.first().id\n\n for user in User.objects.filter(is_staff=True): \n Notification.objects.create(\n TipoNotificacao_id=10, from_user=request.user, to_user_id=user.id, to_admin=1, Solicitacao_id=solicitacao) \n \n return redirect('solicitation:datails_requests', id=id)\n\n try:\n notificationid = Notification.objects.get(TipoNotificacao_id=7, Solicitacao_id=id)\n getrespostaid = notificationid.RespostaAdmin_id\n visualizou_a_resposta = Notification.objects.get(TipoNotificacao_id=7, RespostaAdmin_id=getrespostaid)\n verificaresposta = RespostaSolicitacao.objects.get(\n Solicitacao_id=id).id \n except:\n visualizou_a_resposta = None\n verificaresposta = None\n\n ultimostatus = obj.StatusAtual\n\n ultres = None\n testeultres = RespostaSolicitacao.objects.only('id').filter(Solicitacao=obj.id).exists()\n if testeultres == False:\n ultres = False\n else:\n ultres = RespostaSolicitacao.objects.filter(\n Solicitacao=obj.id).order_by(\"-id\")[0]\n\n re = request.session['re']\n dashboard = Solicitacoes.objects.only('id').filter(\n ReSolicitante=re).aggregate(\n analise=Count('id', Q(StatusAtual=2)),\n resolvidos=Count('id', Q(StatusAtual__in=(3, 4))),\n pendentes=Count('id', Q(StatusAtual__in=(1, 5)))\n )\n\n respostaresponsavel = None\n try:\n respostaresponsavel = RespostaSolicitacao.objects.get(Solicitacao=id)\n except:\n respostaresponsavel\n\n\n if obj.DataDoEncerramento == None:\n tempo_atendimento = '24 Horas'\n dias = 0\n horas = 0\n minutos = 0\n else:\n tempo_atendimento = obj.DataDoEncerramento - obj.DataDaSolicitacao\n tempo_atendimento = tempo_atendimento.seconds \n dias, tempo_atendimento = divmod(tempo_atendimento, 86400)\n horas, tempo_atendimento = divmod(tempo_atendimento, 3600)\n minutos, tempo_atendimento = divmod(tempo_atendimento, 60)\n\n context = {\n 'details': obj,\n 'historico': his,\n 'respostas': respostas,\n 'respostaresponsavel': respostaresponsavel,\n 'form_resposta_user': form_resposta_user,\n 'respostasUsuarios': respostasUsuarios,\n 'tempo_atendimento': tempo_atendimento,\n 'ultimostatus': ultimostatus,\n 'visualizou_a_resposta': visualizou_a_resposta,\n 'verificaresposta': verificaresposta,\n 'dia': dias,\n 'hora': horas,\n 'minuto': minutos,\n 'segundo': tempo_atendimento, \n 'users_notification': users_notification, \n\n 'ultres': ultres,\n # DASH\n 'dashboard': dashboard,\n\n\n }\n return render(request, template_name, context)\n\n@login_required\ndef update_request(request, id):\n template_name = 'solicitation/update_request.html'\n obj = get_object_or_404(Solicitacoes, id=id)\n\n request_user = request.user\n users_notification = Notification.objects.filter(to_user=request_user, user_has_seen=0, user_has_archive=0).only('id').exists() \n\n username = request.user.username\n\n if request.user.is_authenticated and username != obj.CpfSolicitante:\n return redirect('solicitation:user_list_request')\n\n form = SolicitacoesUpdateForm(request.POST or None, request.FILES or None, instance=obj)\n if form.is_valid():\n add_user = form.save(commit=False)\n add_user.usuario_id = request.user.id\n add_user.save()\n\n return redirect('solicitation:user_list_request')\n\n context = {\n 'form': form,\n 'users_notification': users_notification,\n }\n return render(request, template_name, context)\n\n@login_required\ndef user_list_request(request):\n template_name = 'solicitation/user_list_requests.html'\n\n request_user = request.user\n users_notification = Notification.objects.filter(to_user=request_user, user_has_seen=0, user_has_archive=0).only('id').exists() \n\n re = request.session['re']\n\n chamados = Solicitacoes.objects.filter(\n ReSolicitante=re).only(\n 'TituloDaSolicitacao', 'CategoriaDaSolicitacao',\n 'DataDaSolicitacao', 'StatusAtual')\n \n dashboard = Solicitacoes.objects.only('id').filter(\n ReSolicitante=re).aggregate(\n analise=Count('id', Q(StatusAtual=2)),\n resolvidos=Count('id', Q(StatusAtual__in=(3, 4))),\n pendentes=Count('id', Q(StatusAtual__in=(1, 5)))\n )\n\n context = {\n 'chamados': chamados,\n 'dashboard': dashboard, \n 'users_notification': users_notification,\n }\n return render(request, template_name, context)\n","repo_name":"sidneymarcelofranco/ChamadoDP","sub_path":"solicitation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70409331345","text":"#! /usr/bin/env python\n\nimport numpy as np\n\n\n\n\nclass RadMod:\n\n def __init__(self, sexcat1: str, sexcat2: str, newcat: str, tol=1, red=1, \n minrad=10,scalecor=1):\n \"\"\"\n This routine modifies wrong large estimated radius for Sextractor catalogs. \n It does so by comparing the radius of two catalogs of the same image. It \n kept the smallest radius of the two catalogs.\n The objects radius in sexcat1 will be changed for the ones if sexcat2 if the\n tolerance is achieved. newcat will be the modified catalog\n \"\"\"\n\n #first cat\n N,Alpha,Delta,X,Y,Mg,Kr,Fluxr,Isoa,Ai,E,Theta,Bkgd,Idx,Flg=np.genfromtxt(sexcat1,delimiter=\"\",unpack=True)\n\n\n #second cat\n N2,Alpha2,Delta2,X2,Y2,Mg2,Kr2,Fluxr2,Isoa2,Ai2,E2,Theta2,Bkgd2,Idx2,Flg2=np.genfromtxt(sexcat2,delimiter=\"\",unpack=True)\n\n\n dmax= 3 # max distance to match the same object in different catalogs \n\n count = 0\n count2 = 0\n\n for idx, item in enumerate(N):\n\n foundflag = False\n\n dx = X2 - X[idx]\n dy = Y2 - Y[idx]\n dist = np.sqrt(dx**2 + dy**2)\n\n distmin = dist.min()\n idx2 = dist.argmin()\n\n if distmin <= dmax:\n\n foundflag = True\n #cflag1 = self.CheckFlag(Flg[idx],4) #check for saturated flag \n #cflag2 = self.CheckFlag(Flg2[idx2],4) #check for saturated flag \n\n cflag1 = False \n cflag2 = False \n\n\n #the comparison is based in Fluxr parameter\n\n #rad = Fluxr[idx]\n #rad2 = Fluxr2[idx2]\n\n # or you can use kr * ai\n\n rad = Kr[idx] * Ai[idx]\n rad2 = Kr2[idx2] * Ai2[idx2]\n\n kr1 = Kr[idx]\n kr2 = Kr2[idx2]\n\n ai1 = Ai[idx]\n ai2 = Ai2[idx2]\n\n\n if rad2 < .1: \n rad2 = .1 #avoids division by zero\n\n\n den = rad2 \n\n num = rad - rad2\n\n comp = num/den\n\n if ((cflag1 and cflag2) == False): \n\n if comp > tol:\n\n count +=1 \n\n # reduction factor included\n #Kr[idx],Fluxr[idx],Isoa[idx],Ai[idx] = red * Kr2[idx2],Fluxr2[idx2],Isoa2[idx2],Ai2[idx2]\n Kr[idx],Fluxr[idx],Isoa[idx],Ai[idx] = Kr2[idx2],Fluxr2[idx2],Isoa2[idx2],Ai2[idx2]\n\n #Ai[idx] = Ai2[idx2]\n #Kr[idx] = Kr2[idx2]\n\n\n #this is to avoid galaxies with very low radius after interchanging\n #radius. However we don't want to increase it to a larger \n #radius than the previous one:\n\n #kr2 is the smallest radii of the two \n \n\n comp = rad/(scalecor*rad2) - 1\n if comp > tol:\n Kr[idx] = scalecor*Kr[idx]\n #Ai[idx] = scalecor*Ai[idx]\n \n\n if foundflag == False:\n\n # If object was not found in the other catalog, then\n # the Kr is reduced by a factor \"red\". Final radius\n # can not be less than minrad\n # This is done to avoid faint large galaxies.\n \n\n Kr[idx]= red * Kr[idx]\n\n if Kr[idx]*Ai[idx] < minrad: \n\n Kr[idx] = minrad/Ai[idx]\n \n count2 +=1 \n\n \n\n line = \"catalog {}: {} objects with modified radius \".format(sexcat1,count)\n print(line)\n\n\n line = \"catalog {}: {} objects with reduced radius \".format(sexcat1,count2)\n print(line)\n\n\n #writing catalogs\n\n fout = open(newcat, \"w\")\n\n for idx, item in enumerate(N):\n\n line=\"{0:.0f} {1} {2} {3} {4} {5} {6} {7} {8:.0f} {9} {10} {11} {12} {13} {14:.0f} \\n\".format(N[idx], Alpha[idx], Delta[idx], X[idx], Y[idx], Mg[idx], Kr[idx], Fluxr[idx], Isoa[idx], Ai[idx], E[idx], Theta[idx], Bkgd[idx], Idx[idx], Flg[idx])\n\n fout.write(line)\n\n fout.close()\n\n\n\n def CheckFlag(self,val: int, check: int) -> bool:\n \"Check for flag contained in val, returns True if found \"\n\n flag = False\n mod = 1\n maxx=128\n\n\n while (mod != 0):\n\n res = int(val / maxx)\n\n if (maxx == check and res == 1):\n\n flag = True\n\n mod = val % maxx\n\n val = mod\n maxx = maxx / 2\n\n return flag\n\n\n","repo_name":"canorve/CluSex","sub_path":"src/clusex/lib/radcor.py","file_name":"radcor.py","file_ext":"py","file_size_in_byte":4648,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"22049169972","text":"from django.shortcuts import render\nfrom django.http.response import JsonResponse\nfrom main.models import *\nimport datetime\nimport json\n\n\ndef home(request):\n return render(request, 'main/home.html')\n\n\ndef cdd(request):\n if request.method == \"POST\":\n try:\n # get the date and bring the data and send again\n date = request.POST.get('date')\n prev_r = PrevReport.objects.filter(date=date).first()\n if not prev_r:\n prev_r = PrevReport()\n\n interv_r = IntervReport.objects.filter(date=date).first()\n if not interv_r:\n interv_r = IntervReport()\n\n env_r = EnvReport.objects.filter(date=date).first()\n if not env_r:\n env_r = EnvReport()\n\n medic_r = MedicReport.objects.filter(date=date).first()\n if not medic_r:\n medic_r = MedicReport()\n\n adv_r = AdvReport.objects.filter(date=date).first()\n if not adv_r:\n adv_r = AdvReport()\n\n prev_a = PrevActivity.objects.filter(report=prev_r)\n interv_a = IntervActivity.objects.filter(report=interv_r)\n env_a = EnvActivity.objects.filter(report=env_r)\n medic_a = MedicActivity.objects.filter(report=medic_r)\n adv_a = AdvActivity.objects.filter(report=adv_r)\n\n\n # not stored as zeros make then zeros in case needed\n try:\n if prev_r.so_sc_24_prev:\n prev_rso_sc_24_prev = int(prev_r.so_sc_24_prev)\n else : \n prev_rso_sc_24_prev = 0\n\n if interv_r.so_sc_24:\n interv_rso_sc_24 = int(interv_r.so_sc_24)\n else : \n interv_r.so_sc_24 = 0\n\n if env_r.so_sc_24:\n env_rso_sc_24 = int(env_r.so_sc_24)\n else : \n env_rso_sc_24 = 0\n\n if medic_r.so_sc_24:\n medic_rso_sc_24 = int(medic_r.so_sc_24)\n else : \n medic_rso_sc_24 = 0\n\n if prev_r.so_sc_24_jv:\n prev_rso_sc_24_jv = int(prev_r.so_sc_24_jv)\n else : \n prev_rso_sc_24_jv = 0\n \n\n total_24 = prev_rso_sc_24_prev + interv_rso_sc_24 + \\\n env_rso_sc_24 + medic_rso_sc_24 + \\\n prev_rso_sc_24_jv\n \n\n if prev_r.so_sc_mtd_prev:\n prev_rso_sc_mtd_prev = int(prev_r.so_sc_mtd_prev)\n else : \n prev_rso_sc_mtd_prev = 0\n\n if interv_r.so_sc_mtd:\n interv_rso_sc_mtd = int(interv_r.so_sc_mtd)\n else : \n interv_rso_sc_mtd = 0\n\n if env_r.so_sc_mtd:\n env_rso_sc_mtd = int(env_r.so_sc_mtd)\n else : \n env_rso_sc_mtd = 0\n\n if medic_r.so_sc_mtd:\n medic_rso_sc_mtd = int(medic_r.so_sc_mtd)\n else : \n medic_rso_sc_mtd = 0\n\n if prev_r.so_sc_mtd_jv:\n prev_rso_sc_mtd_jv = int(prev_r.so_sc_mtd_jv)\n else : \n prev_rso_sc_mtd_jv = 0\n\n total_mtd = prev_rso_sc_mtd_prev + interv_rso_sc_mtd \\\n + env_rso_sc_mtd + medic_rso_sc_mtd + prev_rso_sc_mtd_jv\n\n except Exception as e:\n total_mtd = \"Invalid inputs!\"\n total_24 = \"Invalid inputs!\"\n\n context = {\n 'date': date,\n 'prev_r': prev_r,\n 'interv_r': interv_r,\n 'env_r': env_r,\n 'medic_r': medic_r,\n 'adv_r': adv_r,\n 'prev_a': prev_a,\n 'interv_a': interv_a,\n 'env_a': env_a,\n 'medic_a': medic_a,\n 'adv_a': adv_a,\n 'total_24': total_24,\n 'total_mtd': total_mtd\n }\n\n print(context['env_r'].solid_waste)\n return render(request, 'main/cdd.html', context)\n except Exception as e:\n print(f'FUCKED UP EHRE : {e}')\n else:\n context = {\n 'date': '...',\n 'prev_r': '...',\n 'interv_r': '...',\n 'env_r': '...',\n 'medic_r': '...',\n 'adv_r': '...',\n 'prev_a': '...',\n 'interv_a': '...',\n 'env_a': '...',\n 'medic_a': '...',\n 'adv_a': '...',\n 'total_24': '...',\n 'total_mtd': '...'\n }\n return render(request, 'main/cdd.html', context)\n\n\ndef medic(request):\n return render(request, 'main/medic.html')\n\n\ndef prev(request):\n return render(request, 'main/prev.html')\n\n\ndef adv(request):\n return render(request, 'main/adv.html')\n\n\ndef env(request):\n return render(request, 'main/env.html')\n\n\ndef interv(request):\n return render(request, 'main/interv.html')\n\n\ndef submit_adv(request):\n data = json.loads(request.GET.get('data'))\n date = data['date']\n name = data['name']\n sosc_24 = data['sosc_24h']\n sosc_mtd = data['sosc_mtd']\n activities = data['activities']\n\n report = AdvReport(date=date, name=name,\n so_sc_24=sosc_24, so_sc_mtd=sosc_mtd)\n report.save()\n\n for r in activities:\n a = AdvActivity(report=report, name=r['name'],\n day=r['day'], number=r['number'], act_type=r['type'])\n a.save()\n\n return JsonResponse({\n 'state': 'good'\n })\n\n\ndef submit_prev(request):\n data = json.loads(request.GET.get('data'))\n\n date = data['date']\n name = data['name']\n so_sc_24_prev = data['so_sc_24_prev']\n so_sc_24_jv = data['so_sc_24_jv']\n so_sc_mtd_prev = data['so_sc_mtd_prev']\n so_sc_mtd_jv = data['so_sc_mtd_jv']\n incident_24 = data['incident_24']\n incident_mtd_rec = data['incident_mtd_rec']\n incident_mtd_all = data['incident_mtd_all']\n incident_ytd_rec = data['incident_ytd_rec']\n incident_ytd_all = data['incident_ytd_all']\n manpower_cds = data['manpower_cds']\n manpower_eng = data['manpower_eng']\n manpower_insp = data['manpower_insp']\n manpower_tech = data['manpower_tech']\n inspection_audit_prev = data['inspection_audit_prev']\n inspection_audit_jv = data['inspection_audit_jv']\n num_ptw_cpf = data['num_ptw_cpf']\n num_ptw_projdeb = data['num_ptw_projdeb']\n num_ptw_log = data['num_ptw_log']\n num_ptw_total = data['num_ptw_total']\n gasdet_avail = data['gasdet_avail']\n gasdet_issued = data['gasdet_issued']\n gasdet_outofserv = data['gasdet_outofserv']\n\n activities = data['activities']\n\n report = PrevReport(\n date=date,\n name=name,\n so_sc_24_prev=so_sc_24_prev,\n so_sc_24_jv=so_sc_24_jv,\n so_sc_mtd_prev=so_sc_mtd_prev,\n so_sc_mtd_jv=so_sc_mtd_jv,\n incident_24=incident_24,\n incident_mtd_rec=incident_mtd_rec,\n incident_mtd_all=incident_mtd_all,\n incident_ytd_rec=incident_ytd_rec,\n incident_ytd_all=incident_ytd_all,\n manpower_cds=manpower_cds,\n manpower_eng=manpower_eng,\n manpower_insp=manpower_insp,\n manpower_tech=manpower_tech,\n inspection_audit_prev=inspection_audit_prev,\n inspection_audit_jv=inspection_audit_jv,\n num_ptw_cpf=num_ptw_cpf,\n num_ptw_projdeb=num_ptw_projdeb,\n num_ptw_log=num_ptw_log,\n num_ptw_total=num_ptw_total,\n gasdet_avail=gasdet_avail,\n gasdet_issued=gasdet_issued,\n gasdet_outofserv=gasdet_outofserv\n )\n\n report.save()\n\n for r in activities:\n a = PrevActivity(report=report, name=r['name'],\n day=r['day'], number=r['number'], act_type=r['type'])\n a.save()\n\n return JsonResponse({\n 'state': 'good'\n })\n\n\ndef submit_medic(request):\n data = json.loads(request.GET.get('data'))\n\n date = data['date']\n so_sc_24 = data['so_sc_24']\n so_sc_mtd = data['so_sc_mtd']\n incident = data['incident']\n doc = data['doc']\n cpf = data['cpf']\n bdv_jour = data['bdv_jour']\n bdv_nuit = data['bdv_nuit']\n iacp_jour = data['iacp_jour']\n iacp_nuit = data['iacp_nuit']\n toyota = data['toyota']\n renault = data['renault']\n mercedes = data['mercedes']\n mercedes4x4 = data['mercedes4x4']\n equip_hors_serv = data['equip_hors_serv']\n cons_jv = data['cons_jv']\n cons_extra = data['cons_extra']\n embauche_jv = data['embauche_jv']\n embauche_extra = data['embauche_extra']\n periodique = data['periodique']\n soins_genereux = data['soins_genereux']\n\n activities = data['activities']\n\n report = MedicReport(\n date=date,\n so_sc_24=so_sc_24,\n so_sc_mtd=so_sc_mtd,\n incident=incident,\n doc=doc,\n cpf=cpf,\n bdv_jour=bdv_jour,\n bdv_nuit=bdv_nuit,\n iacp_jour=iacp_jour,\n iacp_nuit=iacp_nuit,\n toyota=toyota,\n renault=renault,\n mercedes=mercedes,\n mercedes4x4=mercedes4x4,\n equip_hors_serv=equip_hors_serv,\n cons_jv=cons_jv,\n cons_extra=cons_extra,\n embauche_jv=embauche_jv,\n embauche_extra=embauche_extra,\n periodique=periodique,\n soins_genereux=soins_genereux\n )\n\n report.save()\n\n for r in activities:\n a = MedicActivity(report=report, name=r['name'],\n day=r['day'], number=r['number'], act_type=r['type'])\n a.save()\n\n return JsonResponse({\n 'state': 'good'\n })\n\n\ndef submit_interv(request):\n data = json.loads(request.GET.get('data'))\n\n date = data['date']\n so_sc_24 = data['so_sc_24']\n so_sc_mtd = data['so_sc_mtd']\n etat_rescue = data['etat_rescue']\n etat_fire_fighting = data['etat_fire_fighting']\n etat_vma45 = data['etat_vma45']\n etat_silvani = data['etat_silvani']\n etat_landrover = data['etat_landrover']\n etat_others = data['etat_others']\n pers_cmt = data['pers_cmt']\n pers_ce = data['pers_ce']\n pers_tech = data['pers_tech']\n pers_cond = data['pers_cond']\n pers_agt = data['pers_agt']\n pers_total = data['pers_total']\n inspec_cpf = data['inspec_cpf']\n inspec_bdv = data['inspec_bdv']\n inspec_cc = data['inspec_cc']\n inspec_iacp = data['inspec_iacp']\n inspec_airstrip = data['inspec_airstrip']\n perm_jour = data['perm_jour']\n perm_nuit = data['perm_nuit']\n alarms = data['alarms']\n\n activities = data['activities']\n\n report = IntervReport(\n date=date,\n so_sc_24=so_sc_24,\n so_sc_mtd=so_sc_mtd,\n etat_rescue=etat_rescue,\n etat_fire_fighting=etat_fire_fighting,\n etat_vma45=etat_vma45,\n etat_silvani=etat_silvani,\n etat_landrover=etat_landrover,\n etat_others=etat_others,\n pers_cmt=pers_cmt,\n pers_ce=pers_ce,\n pers_tech=pers_tech,\n pers_cond=pers_cond,\n pers_agt=pers_agt,\n pers_total=pers_total,\n inspec_cpf=inspec_cpf,\n inspec_bdv=inspec_bdv,\n inspec_cc=inspec_cc,\n inspec_iacp=inspec_iacp,\n inspec_airstrip=inspec_airstrip,\n perm_jour=perm_jour,\n perm_nuit=perm_nuit,\n alarms=alarms\n )\n\n report.save()\n\n for r in activities:\n a = IntervActivity(report=report, name=r['name'],\n day=r['day'], number=r['number'], act_type=r['type'])\n a.save()\n\n return JsonResponse({\n 'state': 'good'\n })\n\n\ndef submit_env(request):\n data = json.loads(request.GET.get('data'))\n\n date = data['date']\n name = data['name']\n so_sc_24 = data['so_sc_24']\n so_sc_mtd = data['so_sc_mtd']\n incident_24 = data['incident_24']\n incident_mtd = data['incident_mtd']\n incident_ytd = data['incident_ytd']\n solid_waste = data['solid_waste']\n domestic_water_quality = data['domestic_water_quality']\n waste_water_quality_cpf = data['waste_water_quality_cpf']\n waste_water_quality_iacp = data['waste_water_quality_iacp']\n waste_water_quality_sarpi = data['waste_water_quality_sarpi']\n waste_water_quality_military = data['waste_water_quality_military']\n swimming_pool = data['swimming_pool']\n\n activities = data['activities']\n\n report = EnvReport(\n date=date,\n name=name,\n so_sc_24=so_sc_24,\n so_sc_mtd=so_sc_mtd,\n incident_24=incident_24,\n incident_mtd=incident_mtd,\n incident_ytd=incident_ytd,\n solid_waste=solid_waste,\n domestic_water_quality=domestic_water_quality,\n waste_water_quality_cpf=waste_water_quality_cpf,\n waste_water_quality_iacp=waste_water_quality_iacp,\n waste_water_quality_sarpi=waste_water_quality_sarpi,\n waste_water_quality_military=waste_water_quality_military,\n swimming_pool=swimming_pool\n )\n\n report.save()\n\n for r in activities:\n a = EnvActivity(report=report, name=r['name'],\n day=r['day'], number=r['number'], act_type=r['type'])\n a.save()\n\n return JsonResponse({\n 'state': 'good'\n })\n","repo_name":"glennyrenner/dailyreports","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13271783284","text":"\"\"\"\n# Definition for a Node.\nclass Node:\n def __init__(self, val: int = 0, left: 'Node' = None, right: 'Node' = None, next: 'Node' = None):\n self.val = val\n self.left = left\n self.right = right\n self.next = next\n\"\"\"\n\nclass Solution:\n def connect(self, root: 'Node') -> 'Node':\n # 1 BFS\n # if root is None:\n # return None\n # q = [root]\n # while q:\n # n = len(q)\n # for i in range(n):\n # node = q.pop(0)\n # if i + 1 < n:\n # node.next = q[0]\n # else:\n # node.next = None\n # if node.left:\n # q.append(node.left)\n # if node.right:\n # q.append(node.right)\n # return root\n\n # 2 DFS\n if root is None:\n return None\n leftmost = root\n while leftmost.left:\n head = leftmost\n while head:\n head.left.next = head.right\n if head.next:\n head.right.next = head.next.left\n head = head.next\n leftmost = leftmost.left\n return root","repo_name":"maxwang967/kick-start","sub_path":"leetcode/116.py","file_name":"116.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33376950884","text":"'''\n\n设计输入该周每天的存款 , 显示每天对应的存款金额, 和该周存款总合: \n\n1. 此情况有固定次数(7天) , 比较适合 for loop\n2. 第几天用循环去由1-7递增\n3. 建立字串\n4. 建立循环, 相加所有的元素()\n\n'''\n\ndeposit = []\ninMoney = total = 0 # 将串列 deposit 的元素取名为 inMoney , 并且宣告变数 total来做相加总合 ,并都将初值设为0\n\nfor i in range(1 , 8): # 设计循环来显示输入指令\n inMoney = int(input(\"请输入第{}天的存款:\".format(i))) \n deposit.append(inMoney)\n\nfor money in deposit: #在设计一个循环来累计总合 , 另外宣告一个变数 money\n total += money\n\nprint(\" 总额为:{}\".format(total))\n\n\n\n\n\n'''\nfor i in range(1 , 8):\n money = int(input(\"请输入第{}天的存款:\".format(i))) #显示输入指令\n for j in range( 1 , i + 1 ): #使用巢状循环 \n deposit.append(money)\n total += money\nprint(\"第{}天的存款金额为{} , 总存款额为{}\".format(j , i , total) , end = \"\")\n\n'''","repo_name":"alankowabunga/Python","sub_path":"python-textbook/Ch05串列與元組/drill_forloop.py","file_name":"drill_forloop.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36572173314","text":"# -*-coding:utf-8-*-\n\nimport serial\nimport serial.tools.list_ports\nimport time\nimport tool\nimport config\nimport json\n\nser = serial.Serial(config.serial_port, 19200, timeout=0.1) # winsows系统使用com1口连接串行口\n\n\ndef deal230(response):\n # 拆分数据\n array = response.split(' ')\n try:\n base = {\"PHONE\": array[1], \"SEX\": array[2], \"AGE\": array[4], \"HEIGHT\": array[3], \"TEST_TIME\": array[7]}\n\n # 身体成分分析 Body composition analysis\n bodys = {}\n bodys['WT'] = array[5] # 体重\n bodys['BFM'] = array[17] # 体脂肪\n bodys['BFM_MIN'] = array[152] # Lower Limit(BFM Normal Range)\n bodys['BFM_MAX'] = array[151] # Upper Limit(BFM Normal Range)\n bodys['TBW'] = array[18] # 身体水分含量\n bodys['TBW_MIN'] = array[173] # Lower Limit(TBW Normal Range)\n bodys['TBW_MAX'] = array[174] # Upper Limit(TBW Normal Range)\n bodys['FFM'] = array[19] # 去脂体重\n bodys['FFM_MIN'] = array[175] # Lower Limit(FFM Normal Range)\n bodys['FFM_MAX'] = array[176] # Upper Limit(FFM Normal Range)\n bodys['WT_MIN'] = array[154] # Lower Limit(Weight Normal Range)\n bodys['WT_MAX'] = array[153] # Upper Limit(Weight Normal Range)\n bodys['SMM'] = array[20] # 骨骼肌\n bodys['SMM_MIN'] = array[156] # Lower Limit(SMM Normal Range)\n bodys['SMM_MAX'] = array[155] # Upper Limit(SMM Normal Range)\n bodys['LLA'] = array[104] # LBM of Left Arm 节段肌肉\n bodys['LRA'] = array[103] # LBM of Right Arm\n bodys['LT'] = array[105] # LBM of Trunk\n bodys['LLL'] = array[107] # LBM of Left Leg\n bodys['LRL'] = array[106] # LBM of Right Leg\n bodys['PBFLA'] = array[120] # PBF of Left Arm 节段脂肪\n bodys['FLA'] = array[104] # FFM of Left Arm\n bodys['PBFRA'] = array[119] # PBF of Right Arm\n bodys['FRA'] = array[103] # FFM of Right Arm\n bodys['PBFT'] = array[121] # PBF of Trunk 躯干\n bodys['FT'] = array[105] # FFM of Trunk\n bodys['PBFLL'] = array[123] # PBF of Left Leg\n bodys['FLL'] = array[107] # FFM of Left Leg\n bodys['PBFRL'] = array[122] # PBF of Right Leg\n bodys['FRL'] = array[106] # FFM of Right Leg\n bodys['BMI'] = array[22] # 身体质量指数\n bodys['BMI_MIN'] = array[158]\n bodys['BMI_MAX'] = array[157]\n bodys['PBF'] = array[191] # 体脂肪百分比\n bodys['PBF_MIN'] = array[160]\n bodys['PBF_MAX'] = array[159]\n bodys['WHR'] = array[24] # 腰臀比\n bodys['WHR_MIN'] = array[162]\n bodys['WHR_MAX'] = array[161]\n bodys['BMR'] = array[27] # 基础代谢\n bodys['BMR_MIN'] = array[171]\n bodys['BMR_MAX'] = array[172]\n bodys['MC'] = array[25] # 肌肉控制\n bodys['FC'] = array[206] # FFM Control 脂肪控制\n bodys['FS'] = array[207] # InBody Score 健康评估\n bodys['RBMR'] = '' # 每天摄入卡路里\n\n return {\"basic_data\": base, \"hbca_data\": bodys}\n except Exception as e:\n return False\n\n\ndef upload_body_data(data):\n succ = False\n phone = data['basic_data']['PHONE']\n data['bus_id'] = config.bus_id\n data['license'] = config.license\n data['phone'] = phone\n\n dt = {\"bus_id\": data['bus_id'], \"license\": data['license'], \"phone\": phone,\n \"basic_data\": json.dumps(data['basic_data']),\n \"hbca_data\": json.dumps(data['hbca_data'])}\n url = config.pre_url + \"/Web/Inbody/body_data\"\n res = tool.post2(url, dt)\n if res != \"\":\n r = json.loads(res)\n errorcode = r['errorcode']\n if errorcode == 0:\n succ = True\n return succ\n\n\ndef main():\n check_status = False\n # 开机启动位置 C:\\Users\\Administrator\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup\n\n # 开局检测license\n if not check_status:\n succ = tool.check_license(config.bus_id, config.license)\n if succ:\n check_status = True\n if not check_status:\n exit(\"check status error\")\n\n if config.device_version == 230:\n while True:\n data = {}\n response = ser.readline()\n if response.len > 0:\n data = deal230(response)\n if data:\n upload_body_data(data)\n time.sleep(1)\n\n else:\n exit('no device code')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Glorydaqin/inbody","sub_path":"new/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":4456,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"17128718817","text":"import urllib.request, urllib.parse, urllib.error\nimport json\n\ntry:\n url = input('Enter the url: ')\nexcept:\n print('The url is not valid')\n quit()\n\ntesturl1 = 'http://py4e-data.dr-chuck.net/comments_42.json'\ntesturl2 = 'http://py4e-data.dr-chuck.net/comments_101407.json'\n\nurlhandle = urllib.request.urlopen(url)\ndata = urlhandle.read()\n\n#print(data)\njs = json.loads(data)\n\nprint(js['note'])\n\nsum_count = 0\n\n#Printing every value of count\nfor item in js['comments']:\n print(item['count'])\n\nprint('==================================')\n\n#Calculating the count\nfor item in js['comments']:\n sum_count = sum_count + item['count']\n\nprint('The sum is ', sum_count)\n","repo_name":"mmdfaisal1/PythonProjects","sub_path":"py4e/ex_13_02.py","file_name":"ex_13_02.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23673679802","text":"from configs.buildings import *\nfrom configs.units import *\ndescription = {\n\t'background':\t'./resources/images/level0_0.png',\n\t'hero' : \t{\n\t\t\t'construction': yellow_hero,\n\t\t\t'position':\t\t(14, 10),\n\t\t\t'direction': \t(0, 0),\n\t\t},\n\t'enemies': [\n\n\t\t{\n\t\t\t'construction': red_enemy,\n\t\t\t'position':\t\t(1, 1),\n\t\t\t'direction': \t(1, 0),\n\t\t\t'patrol_points': [(1,1),(14,14),(20,3)],\t\n\t\t},\n\t\t{\n\t\t\t'construction': blue_enemy,\n\t\t\t'position':\t\t(9, 7),\n\t\t\t'direction': \t(1, 0),\n\t\t\t'patrol_points': [(1,1),(14,14),(20,3)],\t\n\t\t},\n\t],\n\t'citizens': [\n\n\t\t{\n\t\t\t'construction': green_citizen,\n\t\t\t'position':\t\t(6,6),\n\t\t\t'direction': \t(0, 0),\n\t\t\t'home': \t(20, 4),\n\t\t\t'patrol_points': [(6,6), (11,7)],\n\t\t},\n\t\t{\n\t\t\t'construction': green_citizen,\n\t\t\t'position':\t\t(6,6),\n\t\t\t'direction': \t(0, 0),\n\t\t\t'home': \t(20, 4),\n\t\t\t'patrol_points': [(1,13), (2,13)],\n\t\t},\n\t\t{\n\t\t\t'construction': green_citizen,\n\t\t\t'position':\t\t(6,6),\n\t\t\t'direction': \t(0, 0),\n\t\t\t'home': \t(20, 4),\n\t\t\t'patrol_points': [(16,0), (19,0)],\n\t\t},\n\t],\n\t'walls': [\n\t\t{\n\t\t\t'construction':\tsmall_house,\n\t\t\t'position':\t\t(19, 11),\n\t\t},\n\t\t{\n\t\t\t'construction':\tsmall_house,\n\t\t\t'position':\t\t(3, 2),\n\t\t},\n\t\t{\n\t\t\t'construction':\tlarge_house,\n\t\t\t'position':\t\t(15, 7),\t\t\n\t\t},\n\t\t{\n\t\t\t'construction':\tblinking_stone,\n\t\t\t'position':\t\t(24,10),\n\t\t},\n\t\t{\n\t\t\t'construction':\tborder_horisontal,\n\t\t\t'position':\t\t(5, 3),\n\t\t},\n\t\t{\n\t\t\t'construction': border_horisontal,\n\t\t\t'position':\t\t(13, 2)\n\t\t},\n\t\t{\n\t\t\t'construction':\tborder_left_up,\n\t\t\t'position':\t\t(12, 2),\n\t\t},\n\t\t{\n\t\t\t'construction':\tborder_3_horisontal,\n\t\t\t'position':\t\t(7, 5),\n\t\t},\n\t\t{\n\t\t\t'construction':\tborder_3_horisontal,\n\t\t\t'position':\t\t(9, 5),\n\t\t},\n\t\t{\n\t\t\t'construction':\tborder_3_horisontal,\n\t\t\t'position':\t\t(11, 5),\n\t\t},\n\t\t{\n\t\t\t'construction':\tborder_3_horisontal,\n\t\t\t'position':\t\t(13, 5),\n\t\t},\n\t\t{\n\t\t\t'construction':\tborder_3_horisontal,\n\t\t\t'position':\t\t(4, 6),\n\t\t},\n\t\t{\n\t\t\t'construction': large_house_L_style,\n\t\t\t'position':\t\t(21, 9)\n\n\t\t},\n\t\t{\n\t\t\t'construction': border_horisontal,\n\t\t\t'position':\t\t(15, 5)\n\t\t},\n\t\t{\n\t\t\t'construction': border_left_down,\n\t\t\t'position':\t\t(20, 2)\n\t\t},\n\t\t{\n\t\t\t'construction': border_right_down,\n\t\t\t'position':\t\t(6, 5)\n\t\t},\n\t\t{\n\t\t\t'construction': border_right_up,\n\t\t\t'position':\t\t(3, 5)\n\t\t},\n\t]\n}","repo_name":"wannadie/ideaman","sub_path":"levels/level0_0.py","file_name":"level0_0.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15983088160","text":"def soma(s, w): # função que a faz a soma das parcelas\n s = s + w\n return s\n\n\nresultado = 0 # porque nada foi somado ainda\n# entrada da quantidade de n° a serem somados\nx = int(input('Quantidade de numeros a serem somados: '))\n\n# entrada do numeros por meio de um for e soma dos mesmos\nprint('Entre com os valores: ')\nfor y in range(x):\n z = float(input())\n resultado = soma(resultado, z)\n\n# Saida do resultado da soma dos valores entrados\nprint('A soma dos valores é: ', resultado)\n\n\"\"\"\nRESULTADO:\n Quantidade de numeros a serem somados: 3\n Entre com os valores: \n 6\n 4\n 10\n A soma dos valores é: 20.0\n\"\"\"\n","repo_name":"matheusAFONSECA/Python-codes","sub_path":"Matheus_Henrique_Fonseca_Afonso_exercicios/Questao_2.py","file_name":"Questao_2.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26127034979","text":"#ephucle@VN-00000267:/mnt/c/cygwin/home/ephucle/tool_script/python$ python3 count_file_extension.py './'\nimport sys\nfolder_path = sys.argv[1]\nfrom os import listdir\nfrom os.path import isfile, join\nonlyfiles = [f for f in listdir(folder_path) if isfile(join(folder_path, f))]\n\ndict1 = dict()\nfor f in onlyfiles:\n\tsplit_names = f.split('.')\n\textension = split_names[-1]\n\tif extension not in dict1:\n\t\tdict1[extension] = 1\n\telse:\n\t\tdict1[extension] += 1\n\nprint(dict1)\n#{'csv': 13, 'py': 132, 'log': 4, 'sh': 1, 'html': 2, 'txt': 16, 'xlsx': 4, 'zip': 2, 'png': 2, 'jpg': 1, 'xml': 2, 'pyc': 1, 'json': 1, 'amos': 1, 'db': 1}\n\nitems1 = dict1.items()\nfor item in items1:\n\tprint(item[1],item[0])","repo_name":"ecaohuy/pythonlab","sub_path":"count_file_extension.py","file_name":"count_file_extension.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12645217657","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom cadastro.models import *\nfrom cadastro.forms import *\n\n\ndef index(request):\n\n return render(request, \"index.html\")\n\n\n\ndef registrar_pessoa(request):\n\n form = PessoaForm()\n\n if request.method == \"POST\":\n form = PessoaForm(request.POST)\n\n if form.is_valid():\n pessoa = form.save(commit=False)\n\n pessoa.save()\n\n messages.success(request, \"Pessoa registrada com sucesso.\")\n\n return redirect(\"index\")\n\n context = {\n \"nome_pagina\": \"Registrar Pessoa\",\n \"form\": form\n }\n\n return render(request, \"registrar_pessoa.html\", context)\n\n\ndef registrar_votacao(request):\n\n form = VotacaoForm()\n\n if request.method == \"POST\":\n form = VotacaoForm(request.POST)\n\n if form.is_valid():\n votacao = form.save(commit=False)\n\n votacao.save()\n\n messages.success(request, \"Votação registrada com sucesso.\")\n\n return redirect(\"index\")\n\n context = {\n \"nome_pagina\": \"Registrar Votação\",\n \"form\": form\n }\n\n return render(request, \"registrar_votacao.html\", context)\n\n\ndef registrar_opvoto(request):\n\n form = OpcaoVotoForm()\n\n if request.method == \"POST\":\n form = OpcaoVotoForm(request.POST)\n\n if form.is_valid():\n opvoto = form.save(commit=False)\n\n opvoto.save()\n\n messages.success(request, \"Opcão de voto registrada com sucesso.\")\n\n return redirect(\"index\")\n\n context = {\n \"nome_pagina\": \"Registrar Opção de Voto\",\n \"form\": form\n }\n\n return render(request, \"registrar_opvoto.html\", context)\n\ndef index(request):\n\n pessoas = Pessoa.objects.all()\n context = {\n \"nome_pagina\": \"Controle Votação\",\n \"pessoas\": pessoas\n }\n print(context)\n return render(request, \"index.html\", context)","repo_name":"YaraSilvst/ControleVotacao","sub_path":"cadastro/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5396907753","text":"L=[]\nn=int(input('n='))\nfor i in range(n):\n a=int(input())\n L=L+[a]\nd=0\nH=[]\nfor j in L:\n if j>0:\n d+=1\n if j%2==0:\n H=H+[j]\ntong=0\nfor k in H:\n tong=tong+k\nc=len(H)\nif c==0:tbc=0\nelse:tbc=tong/c\nprint('SND=',d,sep='')\nprint('TBC=',tbc,sep='')","repo_name":"phamhung116/CoSoLapTrinh","sub_path":"Chuong5/BaiTap/5-3.py","file_name":"5-3.py","file_ext":"py","file_size_in_byte":272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37044814239","text":"import frappe\nimport random,json\nimport requests\nfrom one_fm.api.v1.utils import response\n\n\n\n@frappe.whitelist()\ndef fetch_quote(direct_response = False):\n \"\"\"\n Fetch a quote from zenquotes.io \n \n \"\"\"\n base_url = \"https://zenquotes.io/api/quotes/\"\n keyword = fetch_keyword().lower()\n api_key = fetch_key()\n url = base_url+api_key+\"&?keyword={}\".format(keyword)\n try:\n res = requests.get(url)\n if res.status_code == 200:\n \n json_response = json.loads(res.text)\n #Check if the response contains an author tag, this authenticates that a quote was returned and not an error\n if json_response[0].get('a'):\n data = {\n 'quote':json_response[0].get('q'), \n 'author':json_response[0].get('a'),\n 'html':json_response[0].get('h')\n }\n if not direct_response:\n return response(\"Success\",200,data)\n else:\n return (data)\n else:\n return get_cached_quote()\n except Exception as error:\n frappe.log_error(frappe.get_traceback(),\"Error fetching Quote\")\n return response(\"Internal Server Error\", 500, None, error)\n \n \n\n\ndef get_cached_quote():\n #fetch the cached quote .This is the fallback incase any error occurs while fetching quotes\n quote = frappe.cache().get_value('daily_quote')\n return json.loads(quote)\n \n\n\ndef set_cached_quote():\n #Set a daily quote in cache everyday, \n base_url = \"https://zenquotes.io/api/quotes/\"\n keyword = 'inspiration'\n api_key = fetch_key()\n url = base_url+api_key+\"&?keyword={}\".format(keyword)\n try:\n res = requests.get(url)\n if res.status_code == 200:\n json_response = json.loads(res.text)\n #Check if the response contains an author tag, this authenticates that a quote was returned and not an error\n if json_response[0].get('a'):\n quote_dict = json.dumps({'quote':json_response[0].get('q'), \n 'author':json_response[0].get('a'),\n 'html':json_response[0].get('h')\n })\n frappe.cache().set_value('daily_quote',quote_dict)\n return\n \n except Exception as error:\n frappe.log_error(frappe.get_traceback(),\"Error Setting Quote in cache\")\n return response(\"Internal Server Error\", 500, None, error)\n \n \n \n \n \ndef fetch_key():\n \"\"\"\n Fetch the API from frappe conf and set in cache\n \"\"\"\n cached_api = frappe.cache().get_value('zenquotes_api')\n \n if not cached_api:\n cached_api = frappe.local.conf.zenquotes_api_a or frappe.get_doc(\"ONEFM General Setting\",None).get_password('zenquotes_api_key')\n if not cached_api:\n return response(\"Bad Request\", 400, None, f\"Zenquotes API Key not found\") \n frappe.cache().set_value('zenquotes_api',cached_api)\n return cached_api\n\n\ndef fetch_keyword():\n #Fetch the appropriate keyword to be used in generating the quotes. \n # The full list of available keywords can be found in the Zenquotes documentations\n keywords = frappe.get_all(\"Zenquote Keyword Category\",{'parent':'ONEFM General Setting'},['keyword'])\n if not keywords:\n return 'inspiration'\n #randomly return a choice based on the approved keywords\n return random.choice(keywords).keyword\n \n@frappe.whitelist()\ndef run_quotes():\n try:\n return fetch_quote(direct_response=True).get('html')\n except:\n False","repo_name":"ONE-F-M/One-FM","sub_path":"one_fm/api/v2/zenquotes.py","file_name":"zenquotes.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"48"} +{"seq_id":"69987036305","text":"# -*- coding: utf-8 -*-\n\"\"\"py.test tests on main.py\n\n.. moduleauthor:: Mathew Topper \n\"\"\"\n\nimport os\nimport pytest\n\nimport dtocean_dummy\n\n# Using a py.test fixture to reduce boilerplate and test times.\n@pytest.fixture(scope=\"module\")\ndef sheet():\n '''Share a Spreadsheet object'''\n return dtocean_dummy.Spreadsheet()\n\ndef test_array_size(sheet):\n '''Test that the random array size is as requested.'''\n \n assert len(sheet._get_random_array(10)) == 10\n\ndef test_array_low(sheet):\n '''Test that the minimum of the array is greater than or equal to \n Spreadsheet.low.'''\n \n assert min(sheet._get_random_array(10)) >= sheet.low\n \ndef test_array_high(sheet):\n '''Test that the maximum of the array is less than Spreadsheet.high.'''\n \n assert max(sheet._get_random_array(10)) < sheet.high\n \ndef test_table_length():\n \n sheet = dtocean_dummy.Spreadsheet()\n array = sheet._get_random_array(10)\n sheet._array_2_df(array)\n \n assert len(sheet.table) == 10\n \ndef test_table_cumsum_length():\n \n '''Check that the length of the cumulative sum is OK'''\n \n sheet = dtocean_dummy.Spreadsheet()\n array = sheet._get_random_array(10)\n sheet._array_2_df(array)\n sheet._add_cumsum()\n \n cumsum = sheet.table['Cumulative']\n \n assert len(cumsum) == 10\n \ndef test_table_cumsum_total():\n \n '''Check that the cumulative sum in the pandas table is OK'''\n \n sheet = dtocean_dummy.Spreadsheet()\n array = sheet._get_random_array(10)\n sheet._array_2_df(array)\n sheet._add_cumsum()\n \n cumsum = sheet.table['Cumulative']\n \n assert cumsum.irow(-1) == sum(array)\n \n\ndef test_create_csv(tmpdir):\n \n '''Test if csv file is written'''\n \n locd = tmpdir.mkdir(\"sub\")\n p = locd.join(\"test.csv\")\n \n sheet = dtocean_dummy.Spreadsheet()\n sheet.make_table(10)\n sheet.write_csv(str(p))\n \n assert len(locd.listdir()) == 1\n assert os.path.basename(str(locd.listdir()[0])) == \"test.csv\"\n \ndef test_create_xls(tmpdir):\n \n '''Test if excel file is written'''\n \n locd = tmpdir.mkdir(\"sub\")\n p = locd.join(\"test.xls\")\n \n sheet = dtocean_dummy.Spreadsheet()\n sheet.make_table(10)\n sheet.write_xls(str(p))\n \n assert len(locd.listdir()) == 1\n assert os.path.basename(str(locd.listdir()[0])) == \"test.xls\"\n \ndef test_spreadsheet_call(tmpdir):\n \n '''Test if Spreadsheet.call works for all file types'''\n \n locd = tmpdir.mkdir(\"sub\")\n dirp = str(locd)\n \n sheet = dtocean_dummy.Spreadsheet()\n \n # Check the error for a bad file type\n filep = os.path.join(dirp, \"test.docx\")\n \n with pytest.raises(ValueError):\n sheet(10, filep, out_fmt='docx')\n \n for ftype in sheet._valid_formats:\n \n filep = os.path.join(dirp, \"test.{}\".format(ftype))\n \n sheet(10, filep, out_fmt=ftype)\n \n assert len(locd.listdir()) == len(sheet._valid_formats)\n\n@pytest.mark.xfail\ndef test_fail(tmpdir):\n \n '''Generate a test failure'''\n \n assert 0 == 1\n","repo_name":"DTOcean/dtocean-dummy-module","sub_path":"tests/test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25848162681","text":"from enum import Flag\nimport itertools as iter\nimport collections as col\nimport hashlib as hash\nimport math\nimport json\nimport re\nimport os\nimport sys\n\nfrom autoInput import getInput, startInputClock\nfrom autoSubmit import submitAnswer\n\ndef main(lines):\n # print(lines)\n part1, part2 = 0, 0\n jmps = [int(x) for x in lines]\n jmps2 = [int(x) for x in lines]\n\n # PART 1\n ecx = 0\n eip = 0\n while True:\n try:\n jmps[eip] += 1\n eip += jmps[eip] - 1\n ecx += 1\n except:\n break\n part1 = ecx\n\n # PART 2 NOTE: dont reuse the same tampered input for part 2 :)\n ecx = 0\n eip = 0\n while eip >= 0 and eip < len(jmps2):\n if jmps2[eip] >= 3:\n jmps2[eip] -= 1\n eip = eip + jmps2[eip] + 1\n else:\n jmps2[eip] += 1\n eip = eip + jmps2[eip] - 1\n ecx += 1\n part2 = ecx\n\n return part1, part2\n\nif __name__ == '__main__':\n PART_1 = False # NOTE: FLIP FOR PART 2 SUBMISSIONS\n\n year, day = os.path.basename(os.getcwd()), re.findall(r'^.*day(\\d+).py$', __file__)[0]\n if not os.path.isfile(f'{os.getcwd()}/day{day}input.txt'):\n getInput(year, day)\n\n lines = [l.strip() for l in open(f'day{day}input.txt').readlines()]\n\n part1, part2 = main(lines)\n print(part1, part2)\n\n SUBMIT = True if input(\"Submit? (Y: / N: NOT )\") == \"\" else False\n if PART_1 and SUBMIT:\n resp = submitAnswer(year, day, 1, part1)\n print(resp)\n if resp == \"CORRECT\": PART_1 = False # NOTE: This is where we update the db file\n elif not PART_1 and SUBMIT:\n resp = submitAnswer(year, day, 2, part2)\n print(resp)\n if resp == \"CORRECT\": print(\"Problem complete.\") # NOTE: This is where we update the db file","repo_name":"chasecolford/AdventOfCode","sub_path":"2017/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15794731528","text":"import socket\nimport pickle as pkl\nimport time\nfrom train_head_model import train_model\nimport os\n \nHEADERSIZE = 10\nWEIGHTS_FOLDER = 'models'\n\nIP = \"127.0.0.1\"\nPORT = 1234\n\nprint('---------------------Starting Client---------------------')\nprint('- Connecting to server on IP: {}, Port: {}'.format(IP, PORT))\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.connect((IP, PORT))\n\nmessage_header = s.recv(HEADERSIZE)\nmessage_length = int(message_header.decode('utf-8').strip())\nversion = s.recv(message_length).decode('utf-8')\nversion = int(version)\nprint('- Getting weights version: {}'.format(version))\n\nwith open(os.path.join(WEIGHTS_FOLDER, \"weights_{:04d}.h5\".format(version)),'wb') as file:\n message_header = s.recv(HEADERSIZE)\n message_length = int(message_header.decode('utf-8').strip())\n weights = s.recv(message_length)\n file.write(weights)\nprint(' - Weights received')\nprint(' -- Saving weights to file: {}'.format(os.path.join(WEIGHTS_FOLDER, \"weights_{:04d}.h5\".format(version))))\n\nprint(' - Training with latest weights')\ntrain_model(weights=\"weights_{:04d}.h5\".format(version))\n\nprint(' - Sending weights version {} back to server'.format(version+1))\nfilename = os.path.join(WEIGHTS_FOLDER, \"weights_{:04d}.h5\".format(version+1))\nwith open(filename, 'rb') as file:\n sendfile = file.read()\nsendfile = bytes(f\"{len(sendfile):<{HEADERSIZE}}\",'utf-8')+sendfile\ns.send(sendfile)\nprint(' - Weights sent')\n\nprint('')\nprint('- Getting new weights version: {}'.format(version+1))\n\nwith open(os.path.join(WEIGHTS_FOLDER, \"weights_{:04d}.h5\".format(version+1)),'wb') as file:\n message_header = s.recv(HEADERSIZE)\n message_length = int(message_header.decode('utf-8').strip())\n weights = s.recv(message_length)\n file.write(weights)\nprint(' - New weights received')\nprint(' -- Saving weights to file: {}'.format(os.path.join(WEIGHTS_FOLDER, \"weights_{:04d}.h5\".format(version+1))))","repo_name":"ghanbarzadeh/Course_IoT_2021","sub_path":"2/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"19421817234","text":"import unittest\nimport os\nimport logging\nimport shutil\nimport glob\nfrom pathlib import Path\nfrom python.configuration_manager.configurations_manager import ConfigurationsManager\nfrom python.configuration_manager.default_directories_enum import DefaultDirectories\n\n\nclass ConfigurationsManagerTest(unittest.TestCase):\n \"\"\"Tests the behavior of ConfigurationsManager class.\"\"\"\n @classmethod\n def setUpClass(cls) -> None:\n \"\"\"set-up the common resources.\"\"\"\n cls.configurations_manager = ConfigurationsManager()\n # may be better to use mock\n # a workaround for now\n # find xml file for the settings\n root = Path(__file__).parent.parent.parent\n files = glob.glob(os.path.join(os.path.join(root, '**/global_settings.xml')), recursive=True)\n try:\n cls.settings_file = files[0]\n except IndexError as e:\n raise e # TODO: add fallback workaround\n\n @classmethod\n def tearDownClass(cls) -> None:\n \"\"\"clean up the common resources.\"\"\"\n # TODO: clean up -- directories (or may be using mock an idea)\n # e.g shutil.rmtree(cls.configurations_manager.get_directory('output'))\n # to prevent the side effects such as memory leaks\n del cls.configurations_manager\n\n def test_make_directory(self):\n \"\"\"Case: whether a directory is successfully created.\"\"\"\n # make a tests directory\n temp_dir = self.configurations_manager.make_directory('temp_dir', '')\n # tests: whether the directory is successfully created\n self.assertTrue(os.path.isdir(temp_dir))\n shutil.rmtree(temp_dir) # clean up\n\n def test_make_directory_location_correctness(self):\n \"\"\"Case: whether the directory is created at the target location.\"\"\"\n # make a tests directory\n temp_dir = self.configurations_manager.make_directory('temp_dir_location_test', '')\n # tests: whether the location is correct\n temp_dir_path = Path(self.configurations_manager.get_directory('temp_dir_location_test'))\n self.assertTrue(Path.exists(temp_dir_path))\n shutil.rmtree(temp_dir) # clean up\n\n def test_get_directory_exists(self, target_directory=None):\n \"\"\"Case: the target_directory exists.\n It should return the path to that.\"\"\"\n if target_directory is None:\n self.configurations_manager.setup_default_directories('')\n target_directory = DefaultDirectories.OUTPUT # default directory\n self.assertTrue(os.path.isdir(self.configurations_manager.\n get_directory(target_directory)))\n\n def test_get_directory_not_exists(self):\n \"\"\"Case: the target_directory does not exist.\n It should raise an exception.\"\"\"\n target_dir = 'not_exists'\n # tests: it should raise an exception if directory does not exist\n with self.assertRaises(Exception) as context:\n self.configurations_manager.get_directory(target_dir)\n self.assertTrue('directory not found' in str(context.exception))\n\n def test_get_component_configuration_settings_exists(self):\n \"\"\"Case: the target ``component configuration settings`` exists.\n It should return the configuration settings for that.\"\"\"\n target_component_configuration_settings = 'log_configurations'\n # Test: it returns the configurations settings that exist for the target component\n self.assertIsNotNone(self.configurations_manager.\n get_configuration_settings(target_component_configuration_settings, self.settings_file))\n\n def test_get_component_configuration_settings_data_type_correctness(self):\n \"\"\"Case: the return data type for the existing target\n configuration settings is 'correct'.\"\"\"\n target_component_configuration_settings = 'log_configurations'\n target_data_type = dict\n # Test: it returns the correct data type\n self.assertIsInstance(self.configurations_manager.\n get_configuration_settings(target_component_configuration_settings,\n self.settings_file), target_data_type)\n\n def test_get_component_configuration_settings_not_exists(self):\n \"\"\"Case: the target ``component configuration settings``\n does not exist. It should raise an exception.\"\"\"\n target_component = 'not_exists'\n # Test: it should raise an 'LookupError' exception when the\n # target component configuration settings do not exist.\n with self.assertRaises(LookupError) as context:\n self.configurations_manager.get_configuration_settings(target_component, self.settings_file)\n # Test: the captured exception is the one that is raised.\n self.assertTrue(\"configuration settings not found!\" in str(context.exception))\n\n def test_load_log_configurations(self):\n \"\"\"Case: the logger is 'properly' configured. It should return\n the instance of Logging.Logger class with user defined name and\n settings, and should emit the logs at the user specified levels.\"\"\"\n test_logger_name = __name__\n target_component = 'log_configurations'\n self.configurations_manager.setup_default_directories('')\n log_configurations = self.configurations_manager.get_configuration_settings(target_component, self.settings_file)\n with self.assertLogs(__name__, level='INFO') as context:\n logger = self.configurations_manager.load_log_configurations(test_logger_name, log_configurations)\n # tests: whether the logger is created\n self.assertIsNotNone(logger)\n # tests: whether the logger is an instance of class ``Logger``\n self.assertIsInstance(logger, logging.Logger)\n # tests: whether the logger is created with user defined name\n self.assertEqual(logger.name, test_logger_name)\n # emit two tests log messages\n logger.info(\"TEST INFO log\")\n logger.error(\"TEST ERROR log\")\n # tests: whether all log messages are emitted successfully\n self.assertEqual(len(context.records), 2)\n # tests: correctness of the log messages i.e. whether the captured log\n # messages are the ones that were emitted.\n self.assertEqual(context.records[0].getMessage(), \"TEST INFO log\")\n self.assertEqual(context.records[1].getMessage(), \"TEST ERROR log\")\n # tests: correctness of the level of emitted log messages\n self.assertEqual(context.records[0].levelno, logging.INFO)\n self.assertEqual(context.records[1].levelno, logging.ERROR)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"multiscale-cosim/EBRAINS_RichEndpoint","sub_path":"test/python/Test_configurations_manager.py","file_name":"Test_configurations_manager.py","file_ext":"py","file_size_in_byte":6733,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"11661820908","text":"# Standard Library\nimport functools\nimport time\nfrom contextlib import contextmanager\nfrom typing import Callable\n\n\nclass TimeUtil:\n @staticmethod\n @contextmanager\n def timer(name: str):\n start_time = time.time()\n yield\n end_time = time.time()\n print(f\"[{name}] done in {end_time - start_time:.4f} s\")\n\n @staticmethod\n def timer_wrapper(func: Callable):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n start_time = time.time()\n print(f\"[{func.__name__}] start\")\n result = func(*args, **kwargs)\n end_time = time.time()\n print(f\"[{func.__name__}] done in {end_time - start_time:.4f} s\")\n\n return result\n\n return wrapper\n","repo_name":"taka-kawa/kaggle-template","sub_path":"src/utils/time_util.py","file_name":"time_util.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"43147130347","text":"class Array(object):\n def __init__(self, initialSize):\n self.__a = [None] * initialSize\n self.__nItems = 0\n\n def __len__(self):\n return self.__nItems\n\n def get(self, n):\n if 0 <= n < self.__nItems:\n return self.__a[n]\n\n def set(self, n, value):\n if 0 <= n < self.__nItems:\n self.__a[n] = value\n\n def swap(self, j, k):\n if 0 <= j < self.__nItems and 0 <= k < self.__nItems:\n self.__a[j], self.__a[k] = self.__a[k], self.__a[j]\n\n def insert(self, item):\n if self.__nItems >= len(self.__a):\n raise Exception(\"Array overflow\")\n self.__a[self.__nItems] = item\n self.__nItems += 1\n\n def find(self, item):\n for j in range(self.__nItems):\n if self.__a[j] == item:\n return j\n raise ValueError(\"Item not found\")\n\n def search(self, item):\n try:\n return self.get(self.find(item))\n except ValueError:\n return None\n\n def delete(self, item):\n try:\n j = self.find(item)\n except ValueError:\n return False\n self.__nItems -= 1\n for k in range(j, self.__nItems):\n self.__a[k] = self.__a[k + 1]\n return True\n\n def traverse(self, function=print):\n for j in range(self.__nItems):\n function(self.__a[j])\n\n def __str__(self):\n ans = \"[\"\n for i in range(self.__nItems):\n if len(ans) > 1:\n ans += \", \"\n ans += str(self.__a[i])\n ans += \"]\"\n return ans\n\n def bubbleSort(self):\n for last in range(self.__nItems - 1, 0, -1):\n for inner in range(last):\n if self.__a[inner] > self.__a[inner + 1]:\n self.swap(inner, inner + 1)\n\n def selectionSort(self):\n for outer in range(self.__nItems - 1):\n min = outer\n for inner in range(outer + 1, self.__nItems):\n if self.__a[inner] < self.__a[min]:\n min = inner\n self.swap(outer, min)\n\n def insertionSort(self):\n for outer in range(1, self.__nItems):\n temp = self.__a[outer]\n inner = outer\n while inner > 0 and temp < self.__a[inner - 1]:\n self.__a[inner] = self.__a[inner - 1]\n inner -= 1\n self.__a[inner] = temp\n \n def insertionSortAndDedupe(self):\n count = 0\n for i in range(1, self.__nItems):\n temp = self.__a[i]\n j = i - 1\n while j >= 0 and temp < self.__a[j]:\n if temp == self.__a[j]:\n self.__a[j] = -float(\"inf\")\n count += 1\n self.__a[j+1] = self.__a[j]\n j -= 1\n self.__a[j+1] = temp\n for i in range(self.__nItems - 1, -1, -1):\n if self.__a[i] == -float(\"inf\"):\n for j in range(i, self.__nItems - 1):\n self.__a[j] = self.__a[j+1]\n self.__nItems -= 1\n return count\n\na = Array(10)\na.insert(5)\na.insert(3)\na.insert(8)\na.insert(2)\na.insert(7)\na.insert(1)\n\nprint(\"Unsorted array: \", a)\n\n# Bubble sort\na.bubbleSort()\nprint(\"Bubble sorted array: \", a)\n\n# Selection sort\na.selectionSort()\nprint(\"Selection sorted array: \", a)\n\n# Insertion sort\na.insertionSort()\nprint(\"Insertion sorted array: \", a)\n\n# Insertion sort with deduplication\na.insertionSortAndDedupe()\nprint(\"Deduplicated and sorted array: \", a)\n\n\n","repo_name":"cielobuezo/Algoritmos-","sub_path":"Cap_3/3.6/sortArray.py","file_name":"sortArray.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29584500110","text":"from utime import sleep, sleep_ms\n\nclass limpieza:\n \n def __init__ (self,v1,v2,v3,v4,cantidad):\n self.v1 = v1\n self.v2 = v2\n self.v3 = v3\n self.v4 = v4\n self.cantidad = cantidad\n \n def pasos (self):\n in1.value(self.v1)\n in2.value(self.v2)\n in3.value(self.v3)\n in4.value(self.v4)\n sleep(0.02)\n \n def secuencia_una(self):\n for i in range (self.cantidad):\n pasos(1,0,0,0)\n pasos(0,1,0,0)\n pasos(0,0,1,0)\n pasos(0,0,0,1)\n\n def secuencia_dos(self):\n for i in range (self.cantidad):\n pasos(1,1,0,0)\n pasos(0,1,1,0)\n pasos(0,0,1,1)\n pasos(1,0,0,1)\n \n def secuencia_dos_relog(self):\n for i in range (self.cantidad):\n pasos(1,1,0,0)\n pasos(0,1,1,0)\n pasos(0,0,1,1)\n pasos(1,0,0,1)\n \nprint(__name__)","repo_name":"luismongui/Diplomado_python","sub_path":"arenero/limpieza.py","file_name":"limpieza.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12499508573","text":"import matplotlib.pyplot as plt\nimport yfinance as yf\nimport numpy as np\nimport os\nimport random\nimport pandas as pd\nimport torch\nimport torch.optim as optim\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import (Dataset, TensorDataset, DataLoader, Subset)\nfrom collections import OrderedDict\nfrom chapter_10_utils import create_input_data, custom_set_seed\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import MinMaxScaler\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, mean_absolute_percentage_error\n\n\n# 8. Define the model\nclass RNN(nn.Module):\n def __init__(self,\n input_size,\n hidden_size,\n n_layers,\n output_size):\n super(RNN, self).__init__()\n self.rnn = nn.RNN(input_size, hidden_size,\n n_layers, batch_first=True,\n nonlinearity='relu')\n self.fc = nn.Linear(hidden_size, output_size)\n\n def forward(self, x):\n x = x.view(x.size(0), -1, N_LAGS)\n output, _ = self.rnn(x)\n output = self.fc(output[:, -1, :])\n return output\n\n\nif __name__ == '__main__':\n print(torch.__version__)\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n # 2. Define Parameters\n TICKER = '^GSPC'\n START_DATE = '2017-01-01' # '2020-03-01'\n END_DATE = '2023-05-01' # '2020-03-01'\n VALID_START = '2023-03-01' # '2020-01-01'\n N_LAGS = 30\n\n # neural network =\n BATCH_SIZE = 16\n N_EPOCHS = 100\n\n # 3. Download and prepare the data\n\n df = yf.download(TICKER.strip(),\n start=START_DATE,\n end=END_DATE,\n progress=False)\n\n print(df)\n valid_size = df.loc[VALID_START:END_DATE].shape[0]\n prices = df['Adj Close'].values.reshape(-1, 1)\n\n fig, ax = plt.subplots()\n ax.plot(df.index, prices)\n ax.set(title=f\"{TICKER}'s Stock Price\",\n xlabel='Time',\n ylabel='Price ($)')\n\n plt.show()\n\n # 4. Scale the time series of Prices\n\n valid_ind = len(prices) - valid_size\n minmax = MinMaxScaler(feature_range=(0, 1))\n\n prices_train = prices[:valid_ind - N_LAGS]\n prices_valid = prices[valid_ind - N_LAGS:]\n\n minmax.fit(prices_train)\n\n prices_train = minmax.transform(prices_train)\n prices_valid = minmax.transform(prices_valid)\n\n prices_scaled = np.concatenate((prices_train,\n prices_valid)).flatten()\n\n # plt.plot(prices_scaled)\n\n # 5. Transform the time series into input for RNN\n X, y = create_input_data(prices_scaled, N_LAGS)\n\n # 6. Obtain the naive forecast:\n naive_pred = prices[len(prices) - valid_size - N_LAGS: -N_LAGS]\n y_valid = prices[len(prices) - valid_size:]\n\n naive_mse = mean_squared_error(y_valid, naive_pred)\n naive_rmse = np.sqrt(naive_mse)\n print(f\"Naive forecast - MSE: {naive_mse:.4f} | RMSE: {naive_rmse:.4f}\")\n\n # 7. Prepare the data loader objects\n custom_set_seed(42)\n\n valid_ind = len(X) - valid_size\n\n X_tensor = torch.from_numpy(X).float().unsqueeze(2)\n y_tensor = torch.from_numpy(y).float()\n\n dataset = TensorDataset(X_tensor, y_tensor)\n\n train_dateset = Subset(dataset, list(range(valid_ind)))\n valid_dataset = Subset(dataset, list(range(valid_ind, len(X))))\n\n train_loader = DataLoader(dataset=train_dateset,\n batch_size=BATCH_SIZE,\n shuffle=True)\n valid_loader = DataLoader(dataset=valid_dataset,\n batch_size=BATCH_SIZE)\n\n # Check the size of the dataset\n print(f'Size of Dataset : training - {len(train_loader.dataset)} | Validation: {len(valid_loader.dataset)}')\n\n # 9. Instantiate the model, the loss function and the optimizer:\n model = RNN(input_size=N_LAGS, hidden_size=6, n_layers=1, output_size=1).to(device)\n loss_fn = nn.MSELoss()\n optimizer = optim.Adam(model.parameters(), lr=0.001)\n\n # 10. Train the network\n PRINT_EVERY = 10\n train_losses, valid_losses = [], []\n\n for epoch in range(N_EPOCHS):\n running_loss_train = 0\n running_loss_valid = 0\n\n model.train()\n\n for x_batch, y_batch in train_loader:\n optimizer.zero_grad()\n\n x_batch = x_batch.to(device)\n y_batch = y_batch.to(device)\n y_hat = model(x_batch)\n loss = torch.sqrt(loss_fn(y_batch, y_hat))\n loss.backward()\n optimizer.step()\n running_loss_train += loss.item() * x_batch.size(0)\n\n epoch_loss_train = running_loss_train / len(train_loader)\n train_losses.append(epoch_loss_train)\n\n with torch.no_grad():\n model.eval()\n for x_val, y_val in valid_loader:\n x_val = x_val.to(device)\n y_val = y_val.to(device)\n y_hat = model(x_val)\n loss = torch.sqrt(loss_fn(y_val, y_hat))\n running_loss_valid += loss.item() * x_val.size(0)\n\n epoch_loss_valid = running_loss_valid / len(valid_loader.dataset)\n\n if epoch > 0 and epoch_loss_valid < min(valid_losses):\n best_epoch = epoch\n torch.save(model.state_dict(), './rnn_checkpoint.pth')\n\n valid_losses.append(epoch_loss_valid)\n\n if epoch % PRINT_EVERY == 0:\n print(f\"<{epoch}> - Train. Loss: {epoch_loss_train:.4f}, \\t Valid. Loss: {epoch_loss_valid:.4f}\")\n print(f\"Lowest loss recorded in epoch: {best_epoch}\")\n\n # 11. Plot the losses over epochs:\n train_losses = np.array(train_losses)\n valid_losses = np.array(valid_losses)\n\n fig, ax = plt.subplots()\n\n ax.plot(train_losses, color='blue', label=\"Training loss\")\n ax.plot(valid_losses, color='red', label=\"Validation Loss\")\n\n ax.set(title=\"Loss over epochs\",\n xlabel='Epoch',\n ylabel='Loss')\n ax.legend()\n\n plt.tight_layout()\n plt.show()\n\n # 12. Load the best model (with the lowest validation loss):\n state_dict = torch.load('rnn_checkpoint.pth')\n model.load_state_dict(state_dict)\n\n # 13. Obtain the predictions:\n y_pred = []\n\n with torch.no_grad():\n model.eval()\n for x_batch, y_batch in valid_loader:\n x_batch = x_batch.to(device)\n y_batch = y_batch.to(device)\n y_hat = model(x_batch)\n y_pred.extend(y_hat.cpu().detach().numpy())\n\n y_pred = np.array(y_pred).reshape(-1, 1)\n\n # 14. Transform the predictions back to the original scale:\n y_valid_scaled = minmax.transform(y_valid)\n y_pred = minmax.inverse_transform(y_pred)\n\n # 15. Calculate evaluation metrics:\n mse = mean_squared_error(y_valid, y_pred)\n rmse = np.sqrt(mse)\n mape = mean_absolute_percentage_error(y_valid, y_pred)\n print(f\"MSE: {mse:.4f} | RMSE: {rmse:.4f} | MAPE: {mape:.2f}%\")\n\n # 16. Plot the predicted values against the true values:\n fig, ax = plt.subplots()\n ax.plot(df.index[valid_ind:valid_ind+len(y_valid)], y_valid, label='True')\n ax.plot(df.index[valid_ind:valid_ind+len(y_valid)], y_pred.flatten(), label='Predicted')\n ax.set(title=f\"Stock Price Prediction for {TICKER}\",\n xlabel='Time',\n ylabel='Price ($)')\n ax.legend()\n\n plt.tight_layout()\n plt.show()\n\n # 17. Forecast for the next 2 months\n last_prices = prices[-N_LAGS:]\n last_prices_scaled = minmax.transform(last_prices.reshape(-1, 1))\n last_prices_tensor = torch.from_numpy(last_prices_scaled).float().unsqueeze(2).to(device)\n last_prices_tensor = last_prices_tensor.view(1, N_LAGS, 1)\n forecast = []\n\n with torch.no_grad():\n model.eval()\n for _ in range(44): # 22 trading days * 2 months\n output = model(last_prices_tensor)\n forecast.append(output.item())\n last_prices_tensor = torch.cat((last_prices_tensor[:, 1:, :], output.unsqueeze(2)), dim=1)\n\n forecast = np.array(forecast).reshape(-1, 1)\n forecast = minmax.inverse_transform(forecast)\n\n # 18. Plot the forecasted values\n fig, ax = plt.subplots()\n ax.plot(df.index[-valid_size:], y_valid, label='True')\n ax.plot(pd.date_range(start=df.index[-valid_size], periods=len(forecast)), forecast, label='Forecast')\n ax.set(title=f\"Stock Price Forecast for {TICKER}\",\n xlabel='Time',\n ylabel='Price ($)')\n ax.legend()\n\n plt.tight_layout()\n plt.show()\n","repo_name":"yavuzBahceci/MsWorks","sub_path":"msCodes/snp500/SNPFutureRNN.py","file_name":"SNPFutureRNN.py","file_ext":"py","file_size_in_byte":8490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16207864561","text":"\"\"\"This is the implementation of vectors\"\"\"\nclass Vector:\n \"\"\"A class to use to make use of vectors\"\"\"\n def __init__(self, dimensions):\n \"\"\"initilize the vectors and decide on the dimension\"\"\"\n if not int(dimensions): raise ValueError('invalid value for dimensions of the vector')\n self._coordinates = [0]*dimensions\n\n def __len__(self):\n \"\"\"Return the dimension of the vector.\"\"\"\n return len(self._coordinates)\n\n def __getitem__(self, j):\n \"\"\"Return jth coordinate of vector.\"\"\"\n return self._coordinates[j]\n\n def __setitem__(self, j, val):\n \"\"\"Set jth coordinate of vector to given value.\"\"\"\n self._coordinates[j] = val\n\n def __add__(self, other):\n \"\"\"Return sum of two vectors.\"\"\"\n if len(self) != len(other):\n raise ValueError(\"Dimensions must be equal\")\n result = Vector(len(self))\n for j in range(len(self)):\n result[j] = self[j] + other[j]\n return result\n\n def __radd__(self, other):\n \"\"\"Return sum of two vectors, when vector is on thr right of the operation\"\"\"\n if len(self) != len(other):\n raise ValueError(\"Dimensions must be equal\")\n result = Vector(len(self))\n for j in range(len(self)):\n result[j] = self[j] + other[j]\n return result\n\n def __neg__(self):\n \"\"\"Return negative/reverse of the vector.\"\"\"\n result = Vector(len(self))\n for j in range(len(self)):\n result[j] = -self[j]\n return result\n\n def __mul__(self, other):\n \"\"\"Return multiplication of two vectors.\"\"\"\n self_lenght = len(self)\n result = Vector(self_lenght)\n if type(other) is int:\n for j in range(self_lenght):\n result[j] = self[j] * other\n else:\n if self_lenght == len(other):\n for item in range(self_lenght):\n result[item] = self[item] * other[item]\n else:\n raise ValueError(\"Dimensions must be equal\")\n return result\n\n def __rmul__(self, other):\n \"\"\"Return multiplication of two vectors, when vector is on thr right of the operation\"\"\"\n result = Vector(len(self))\n for j in range(len(self)):\n result[j] = self[j] * other\n return result\n\n def __eq__(self, other):\n \"\"\"Return True if vector has same coordinates as other.\"\"\"\n return self._coordinates == other._coordinates\n\n def __ne__(self, other):\n \"\"\"Return True if vector differs from other.\"\"\"\n return not self == other\n\ntest_vector = Vector(3)\ntest_vector_other = Vector(3)\ntest_vector[0] = test_vector_other[2] = 4\ntest_vector[1] = test_vector_other[1] = 5\ntest_vector[2] = test_vector_other[0] = 6\ntest_vector = 3*test_vector\nprint(test_vector[0])\nprint(test_vector[1])\nprint(test_vector[2])\n","repo_name":"nvk681/Python-Practice","sub_path":"python/vectors.py","file_name":"vectors.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10333248806","text":"import math\nimport numpy as np\nimport logging\nimport pygsvd\nimport struct\nimport sys\nimport time\nimport numba.cuda as cuda\nimport cupy as cp\nimport os\nimport scipy\nfrom scipy.sparse import csr_matrix\nfrom scipy.sparse.linalg import svds\nfrom scipy.sparse.linalg import inv\nfrom sortedcontainers import SortedSet\n\nmapping_f = {}\nmapping_b = {}\nnum_vertices = 0\ncount = 0\nnp.set_printoptions(threshold=20000, precision=10)\n\ndef svdUpdate(U, S, V, a, b, rank):\n \"\"\"\n Update SVD of an (m x n) matrix `X = U * S * V^T` so that\n `[X + a * b^T] = U' * S' * V'^T`\n and return `U'`, `S'`, `V'`.\n \n The original matrix X is not needed at all, so this function implements one-pass\n streaming rank-1 updates to an existing decomposition. \n \n `a` and `b` are (m, 1) and (n, 1) matrices.\n \n You can set V to None if you're not interested in the right singular\n vectors. In that case, the returned V' will also be None (saves memory).\n \n The blocked merge algorithm in LsiModel.addDocuments() is much faster; I keep this fnc here\n purely for backup reasons.\n This is the rank-1 update as described in\n **Brand, 2006: Fast low-rank modifications of the thin singular value decomposition**,\n but without separating the basis from rotations.\n \"\"\"\n # convert input to matrices (no copies of data made if already numpy.ndarray or numpy.matrix)\n S = S[:rank]\n \n U = U[:,:rank]\n if V is not None:\n V = V[:,:rank]\n\n # eq (6)\n m = np.dot(U.T, a)\n p = a - np.dot(U, m)\n Ra = np.linalg.norm(p, 2)\n if float(Ra) < 1e-10:\n print(\"input already contained in a subspace of U; skipping update\")\n return U, S, V\n P = (1.0 / float(Ra)) * p\n\n if V is not None:\n # eq (7)\n n = np.dot(V.T, b)\n q = b - np.dot(V, n)\n Rb = np.linalg.norm(q, 2)\n if float(Rb) < 1e-10:\n print(\"input already contained in a subspace of V; skipping update\")\n return U, S, V\n Q = (1.0 / float(Rb)) * q\n else:\n n = np.zeros(rank, 1)\n Rb = 1.0\n \n if float(Ra) > 1.0 or float(Rb) > 1.0:\n print(\"insufficient target rank (Ra=%.3f, Rb=%.3f); this update will result in major loss of information\"\n % (float(Ra), float(Rb)))\n \n # eq (8)\n # form the new matrix\n K_1 = np.diag(np.append(S, 0.0))\n Raa = np.reshape(Ra, (1,1))\n print(np.shape(Raa))\n print(np.shape(m))\n #K_2_1 = np.append(m, Raa)\n K_2_1 = np.bmat([[m],[Raa]])\n Rbb = np.reshape(Ra, (1,1))\n print(np.shape(Rbb))\n #K_2_2 = np.append(n, Rbb)\n K_2_2 = np.bmat([[n],[Rbb]])\n K_2 = np.outer(K_2_1, K_2_2)\n \n K = K_1 + K_2\n \n # eq (5)\n uP, sP, vtP = np.linalg.svd(K, full_matrices = True)\n p_l = len(P)\n q_l = len(Q)\n \n Up = np.dot(np.bmat([U, np.reshape(P, (p_l, 1))]), uP)\n if V is not None:\n Vp = np.dot(np.bmat([V, np.reshape(Q, (q_l, 1))]), vtP.T)\n else:\n Vp = None\n\n print(\"IncrSVD completed\")\n return Up[:,0:rank], sP[0:rank], Vp[:,0:rank]\n\ndef GenSvdUpdate(U, S, V, a, b, rank):\n \"\"\"\n Update SVD of an (m x n) matrix `X = U * S * V^T` so that\n `[X + a * b^T] = U' * S' * V'^T`\n and return `U'`, `S'`, `V'`.\n \n The original matrix X is not needed at all, so this function implements one-pass\n streaming rank-c updates to an existing decomposition. \n \n `a` and `b` are (m, 1) and (n, 1) matrices.\n \"\"\"\n\n # convert input to matrices (no copies of data made if already numpy.ndarray or numpy.matrix)\n S = S[:rank]\n \n U = U[:,:rank]\n if V is not None:\n V = V[:,:rank]\n\n print(\"Inside low-rank incr-SVD\")\n print(\"U shape : \", U.shape)\n print(\"S shape : \", S.shape)\n print(\"V shape : \", V.shape)\n print(\"a shape : \", a.shape)\n print(\"b shape : \", b.shape)\n\n I = np.eye(len(U))\n\n QR1 = np.matmul(I-(np.matmul(U,U.T)),a)\n print(\"QR1 :\", QR1.shape)\n start = time.time()\n P,R = np.linalg.qr(QR1)\n\n end = time.time()\n print(\"QR decomposition time: \", end-start)\n print(\"P :\", P.shape)\n print(\"R :\", R.shape)\n Ra = np.matmul(P.T, QR1)\n end = time.time()\n print(\"Ra :\", Ra.shape)\n\n\n QR2 = np.matmul(I-(np.matmul(V,V.T)),b)\n Q,_ = np.linalg.qr(QR2)\n print(\"Q : \", Q.shape)\n Rb = np.matmul(Q.T, QR2)\n print(\"Rb :\", Rb.shape)\n\n # eq (8)\n # form the new matrix\n K_1 = np.diag(np.append(S, 0.0))\n print(\"K_1 :\", K_1.shape)\n\n K_2_1_1 = np.matmul(U.T,a)\n print(\"K_2_1_1 : \", K_2_1_1.shape)\n K_2_1 = np.bmat([[K_2_1_1], [Ra]])\n print(\"K_2_1 : \", K_2_1.shape)\n\n K_2_2_1 = np.matmul(b.T,V)\n print(\"K_2_2_1 : \", K_2_2_1.shape)\n\n K_2_2 = np.bmat([K_2_2_1, Rb.T])\n print(\"K_2_2 : \", K_2_2.shape)\n\n K_2 = np.matmul(K_2_1, K_2_2)\n print(\"K_2 : \", K_2.shape)\n \n K = K_1 + K_2\n \n # eq (5)\n s = time.time()\n uP, sP, vtP = np.linalg.svd(K, full_matrices = True)\n print(\"Incr-SVD time :\" , time.time()-s)\n p_l = len(P)\n q_l = len(Q)\n \n Up = np.dot(np.bmat([U, P]), uP)\n if V is not None:\n Vp = np.dot(np.bmat([V, Q]), vtP.T)\n else:\n Vp = None\n\n \"\"\"\n print(\"Up shape : \", Up.shape)\n print(\"sP shape : \", sP.shape)\n print(\"Vp shape : \", Vp.shape)\n print(\"IncrSVD completed\")\n \"\"\"\n \n return Up[:,0:rank], sP[0:rank], Vp[:,0:rank]\n\ndef map_node_ids(old_ids):\n global mapping_f\n global mapping_b\n i = int(0)\n for node in old_ids:\n mapping_f[node]=i\n mapping_b[i]=node\n i += 1\n\ndef load_mutation_data(filename):\n raw_data = np.fromfile(filename, dtype=np.uint64, count = -1)\n length = int(len(raw_data)/2)\n A = raw_data[0:length]\n B = raw_data[length:len(raw_data)]\n return A, B\n\n#add comments on functionality\ndef load_graph(filename):\n global num_vertices\n raw_data = np.fromfile(filename, dtype=np.uint64, count = -1)\n \n total_len = len(raw_data)\n\n graph_data = {}\n old_ids = SortedSet()\n\n i = int(0);\n while i < total_len:\n begin = int(i+2)\n added = int(raw_data[i+1])\n end = int(i) + int(2) + added\n\n graph_data[int(raw_data[i])] = raw_data[begin:end]\n old_ids.add(int(raw_data[i]))\n for j in graph_data[int(raw_data[i])]:\n old_ids.add(j)\n \n i = int(end)\n if(i >= total_len):\n break;\n\n map_node_ids(old_ids)\n num_vertices = len(old_ids)\n\n adj_matrix = np.zeros((num_vertices,num_vertices) , dtype=np.float64)\n #use csr/csc format - scipy svd\n for vertex in graph_data.keys():\n for nghbr in graph_data[vertex]:\n adj_matrix[mapping_f[vertex]][mapping_f[nghbr]]=1;\n\n return adj_matrix\n\ndef load_graph_with_mapping(filename):\n global num_vertices\n global mapping_b\n global mapping_f\n\n raw_data = np.fromfile(filename, dtype=np.uint64, count = -1)\n total_len = len(raw_data)\n\n graph_data = {}\n i = int(0);\n while i < total_len:\n begin = int(i+2)\n added = int(raw_data[i+1])\n end = int(i) + int(2) + added\n\n graph_data[int(raw_data[i])] = raw_data[begin:end]\n i = int(end)\n if(i >= total_len):\n break;\n\n adj_matrix = np.zeros((num_vertices,num_vertices) , dtype=np.float64)\n #use csr/csc format - scipy svd\n for vertex in graph_data.keys():\n for nghbr in graph_data[vertex]:\n adj_matrix[mapping_f[vertex]][mapping_f[nghbr]]=1;\n\n return adj_matrix\n\n#Load the graph. Map the vertices in adj_matrix using already available mapping(created while loading a superset graph). Create dict of in-nghbrs called src_dict\ndef load_graph_with_mapping_in_nghbrs(filename):\n global num_vertices\n global mapping_b\n global mapping_f\n\n raw_data = np.fromfile(filename, dtype=np.uint64, count = -1)\n total_len = len(raw_data)\n\n graph_data = {}\n src_dict = {}\n i = int(0);\n while i < total_len:\n begin = int(i+2)\n added = int(raw_data[i+1])\n end = int(i) + int(2) + added\n\n graph_data[int(raw_data[i])] = raw_data[begin:end]\n i = int(end)\n if(i >= total_len):\n break;\n\n adj_matrix = np.zeros((num_vertices,num_vertices) , dtype=np.float64)\n #use csr/csc format - scipy svd\n for vertex in graph_data.keys():\n for nghbr in graph_data[vertex]:\n if nghbr not in src_dict.keys():\n src_dict[nghbr] = []\n src_dict[nghbr].append(vertex)\n adj_matrix[mapping_f[vertex]][mapping_f[nghbr]]=1;\n\n return adj_matrix, src_dict\n\ndef calculate_vectors_alt(U,S,VT,k):\n \"\"\"\n source = np.empty((len(U),k),dtype=np.float64)\n target = np.empty((len(U),k),dtype=np.float64)\n \"\"\"\n V = VT.T\n source = np.dot(U, np.diag(np.sqrt(S)))\n target = np.dot(V, np.diag(np.sqrt(S)))\n return source,target\n\ndef calculate_vectors(V1,S1,V2,S2,k):\n print(\"Received SVDs of proximity matrices, computing HOPE vectors now \\n\")\n source = np.empty((len(V1[0]),k),dtype=np.float64)\n target = np.empty((len(V2[0]),k),dtype=np.float64)\n \n v1 = V1.T\n v2 = V2.T\n print(\"Computed final v1 and v2 \\n\")\n\n sigmas = []\n for i in range(0,k):\n sigmas.append(np.float64(math.sqrt(S2[i]/S1[i])))\n \n source = np.dot(v1, np.diag(sigmas))\n target = np.dot(v2, np.diag(sigmas))\n return source,target\n\ndef ret_prox_param(X):\n eigvalue,eigvecs = scipy.sparse.linalg.eigs(X,k=1,which='LM')\n return abs(eigvalue)\n\ndef return_prox_mat_katz_full(X, prox_param):\n n = len(X)\n id_m = np.eye(n)\n\n Mg = csr_matrix(id_m - (prox_param*X))\n Ml = csr_matrix(prox_param*X)\n\n return Mg,Ml\n\ndef return_prox_mat_common(X):\n n = len(X)\n id_m = np.eye(n)\n\n Ml = csr_matrix(np.matmul(X,X))\n\n return Ml\n\ndef return_prox_mat_aa(X):\n n = len(X)\n id_m = np.eye(n)\n\n Mg = csr_matrix(id_m)\n D = np.zeros((n,n), dtype=np.float64)\n\n for i in range(0,n):\n ele = np.float64(0.0)\n for j in range(0,n):\n ele = ele + X[i][j] + X[j][i]\n D[i][i] = 1/ele\n\n D_csr = csr_matrix(D)\n X_csr = csr_matrix(X)\n Ml = np.matmul(X_csr, np.matmul(D_csr, X_csr))\n\n return Mg,Ml\n\ndef return_prox_mat_katz_partial(X, prox_param):\n n = len(X)\n id_m = np.eye(n)\n\n Mg = id_m - (prox_param*X)\n Ml = prox_param*X\n\n return Mg,Ml\n\ndef katz_full(X, dim, prox_param):\n Mg,Ml = return_prox_mat_katz_full(X, prox_param)\n Mg_inv = inv(Mg)\n \"\"\"Mg_inv_sparse = csr_matrix(Mg_inv)\n Ml_sparse = csr_matrix(Ml)\n S = Mg_inv_sparse.multiply(Ml_sparse)\n \"\"\"\n S_sparse=np.matmul(Mg_inv, Ml)\n\n U,S,VT = svds(S_sparse, k=dim, which='LM')\n return calculate_vectors_alt(U,S,VT,dim)\n\ndef katz_partial(X,dim,prox_param):\n Mg,Ml = return_prox_mat_katz_partial(X, prox_param)\n\n print(\"Inside katz_partial, returned proximity matrices of size : Mg : \", Mg.shape, \" and Ml : \", Ml.shape)\n s = time.time()\n c,s,x,u,v = pygsvd.gsvd(np.linalg.inv(Mg), Ml, full_matrices=True,extras='uv')\n print(\"Time taken for GSVD :\" , time.time() - s)\n\n print(\"c :\", c.shape)\n print(\"s :\", s.shape)\n print(\"u :\", u.shape)\n print(\"v :\", v.shape)\n c_list = c.tolist()\n s_list = s.tolist()\n\n return calculate_vectors(u[0:dim,:],c_list[0:dim],v[0:dim,:],s_list[0:dim],dim)\n\ndef incr_katz_full(X,A,B,dim,prox_param):\n Mg,Ml = return_prox_mat_katz_full(X, prox_param)\n Mg_inv = inv(Mg)\n S_sparse = np.matmul(Mg_inv, Ml)\n U, S, VT = svds(S_sparse, k=dim, which='LM')\n print(\"S_dense :\" ,S_sparse.shape)\n Up,Sp,Vp = svdUpdate(U, S, VT.T, ((-1)*prox_param)*A, B, dim)\n print(\"Computed svd updates for S_sparse\")\n\n return calculate_vectors_alt(Up,Sp,Vp.T,dim)\n\ndef svd_hope_full(X,dim, prox_param):\n Mg,Ml = return_prox_mat_katz_full(X, prox_param)\n Mg_inv = inv(Mg)\n S_dense = np.matmul(Mg_inv, Ml)\n S_sparse = csr_matrix(S_dense)\n \n U, S, VT = svds(S_sparse, k=dim, which='LM')\n return U, S, VT.T\n\ndef incr_katz_full_series(U, S, V,A,B,dim,prox_param):\n Up,Sp,Vp = svdUpdate(U, S, V, ((-1)*prox_param)*A, B, dim)\n \n print(\"Up :\",Up.shape)\n print(\"Sp : \", Sp.shape)\n print(\"Vp :\" ,Vp.shape)\n print(\"Computed svd updates for S_dense\")\n\n src, trgt = calculate_vectors_alt(Up,Sp,Vp.T,dim)\n\n return src, trgt, Up, Sp, Vp\n\ndef incr_hope_partial(X,A,B,dim,prox_param):\n Mg,Ml = return_prox_mat_katz_partial(X, prox_param)\n\n U1, S1, VT1 = svds(Mg, k=dim, which='LM')\n U2, S2, VT2 = svds(Ml, k=dim, which='LM')\n\n print(\"Computed full SVD of Mg and Ml\\n\")\n\n U1p,S1p,V1p = svdUpdate(U1, S1, VT1.T, ((-1)*prox_param)*A, B, len(S1))\n print(\"Computed svd updates for Mg\")\n U2p,S2p,V2p = svdUpdate(U2, S2, VT2.T, (prox_param)*A, B, len(S2))\n print(\"Computed svd updates for Ml\")\n\n return calculate_vectors(V1p.T,S1p,V2p.T,S2p,k)\n\ndef common_nghbrs(X, dim):\n Ml = return_prox_mat_common(X)\n U,S,VT = svds(Ml, k=dim, which='LM')\n return calculate_vectors_alt(U,S,VT,dim)\n\ndef incr_common_nghbrs_series(U, S, V, A, B, dim):\n Up,Sp,Vp = GenSvdUpdate(U, S, V, A, B, dim)\n \n print(\"Up :\",Up.shape)\n print(\"Sp : \", Sp.shape)\n print(\"Vp :\" ,Vp.shape)\n\n src, trgt = calculate_vectors_alt(Up,Sp,Vp.T,dim)\n\n return src, trgt, Up, Sp, Vp\n\ndef common_nghbrs_factorize(X,Y, non_zero_idx):\n \"\"\"\n factorize the term (X*Y.T)**2 to the form of (P*Q.T) - given X, Y; find P and Q\n assumption : X and Y are both n-by-1 vectors\n \"\"\"\n P = X\n n = len(X)\n Z = np.outer(X, Y)\n ZZ = np.matmul(Z,Z)\n Q = np.zeros((n, 1), dtype=np.float64)\n for i in range(0, n):\n Q[i][0] = ZZ[non_zero_idx][i] / P[non_zero_idx][0]\n\n return P, Q\n\ndef adamic_adar(X, dim):\n Mg,Ml = return_prox_mat_aa(X)\n S_sparse = np.matmul(Mg, Ml)\n U,S,VT = svds(S_sparse, k=dim, which='LM')\n return calculate_vectors_alt(U,S,VT,dim)\n\n#write any N by d matrix to a text file\ndef save_matrix(output_graph_file, m):\n N = len(m)\n d = len(m[0])\n for i in range(0,N):\n m_st = \"\"\n for j in range(0,d):\n m_st = m_st + str(m[i][j]) + \" \"\n m_st = m_st + \"\\n\"\n with open(output_graph_file, \"a\") as f:\n f.write(m_st)\n\ndef save_embeddings_bin(output_embs_prefix, source, target):\n source_as_list = source.flatten()\n target_as_list = target.flatten()\n \n if os.path.exists(output_embs_prefix + \"_source.bin\"):\n os.remove(output_embs_prefix + \"_source.bin\")\n if os.path.exists(output_embs_prefix + \"_target.bin\"):\n os.remove(output_embs_prefix + \"_target.bin\")\n \n source.tofile(output_embs_prefix+\"_source.bin\", sep='')\n target.tofile(output_embs_prefix+\"_target.bin\", sep='')\n\ndef save_mappings(output_embs_prefix):\n global mapping_b\n global num_vertices\n if os.path.exists(output_embs_prefix + \"_mapping.bin\"):\n os.remove(output_embs_prefix + \"_mapping.bin\")\n \n mapping_b_mat = np.asarray(list(mapping_b.values()) , dtype=np.uint64).reshape(num_vertices, 1)\n mapping_b_mat.tofile(output_embs_prefix + \"_mapping.bin\",sep='')\n\ndef compare_prox_mat_svd(X, A, B, dim, prox_param):\n X_hat = X + np.outer(A, B)\n Mg_up, Ml_up = return_prox_mat_katz_partial(X_hat, prox_param)\n print(\"Computed proximity matrices for updated matrix\\n\")\n\n U1_up, S1_up, VT1_up = svds(Mg_up, k=dim, which='LM')\n U2_up, S2_up, VT2_up = svds(Ml_up, k=dim, which='LM')\n print(\"Computed full SVD on proximity matrices of updated matrix\\n\")\n with open(\"S1_up\", \"w+\") as f:\n f.write(str(S1_up))\n with open(\"S2_up\", \"w+\") as f:\n f.write(str(S2_up))\n\n Mg_incr, Ml_incr = return_prox_mat_katz_partial(X, prox_param)\n print(\"Computed proximity matrices for initial matrix\\n\")\n U1, S1, VT1 = svds(Mg_incr, k=dim, which='LM')\n U2, S2, VT2 = svds(Ml_incr, k=dim, which='LM')\n print(\"Computed full SVD on proximity matrices of initial matrix\\n\")\n \n U1_incr,S1_incr,V1_incr = GenSvdUpdate(U1, S1, VT1.T, (-1)*prox_param*A, B, dim)\n U2_incr,S2_incr,V2_incr = GenSvdUpdate(U2, S2, VT2.T, prox_param*A, B, dim)\n print(\"Completed incr_svd on proximity matrices of updated matrix\\n\")\n with open(\"S1_incr\", \"w+\") as f:\n f.write(str(S1_incr))\n with open(\"S2_incr\", \"w+\") as f:\n f.write(str(S2_incr))\n print(\"Done comparison and saved results to file\")\n\ndef compare_svd(X, A, B, dim):\n X_hat = X + np.outer(A, B)\n U_hat, S_hat, VT_hat = svds(X_hat, k=dim, which='LM')\n print(\"Computed full SVD on updated matrix\\n\")\n with open(\"S_hat\", \"w+\") as f:\n f.write(str(S_hat))\n \n U, S, VT = svds(X, k=dim, which='LM')\n print(\"Computed full SVD on initial matrix\\n\")\n \n U_incr,S_incr,V_incr = GenSvdUpdate(U, S, VT.T, A, B, dim)\n print(\"Completed incr_svd on updated matrix\\n\")\n with open(\"S_incr\", \"w+\") as f:\n f.write(str(S_incr))\n print(\"Done comparison and saved results to file\")\n\ndef static_embs():\n # arguments : input graph as bin, file name prefix for output embedding files, embedding dim\n input_graph_file = str(sys.argv[1])\n # describe graph format\n output_embs_prefix = str(sys.argv[2])\n dimensions = int(sys.argv[3])\n \n X = load_graph(input_graph_file)\n beta = ret_prox_param(X)\n beta = 7\n print(\"beta :\", beta)\n\n #enter desired embedding function in the following line\n s = time.time()\n source, target = katz_full(X,dimensions, beta)\n print(time.time()-s)\n save_embeddings_bin(output_embs_prefix, source, target)\n save_mappings(output_embs_prefix)\n\ndef incr_svd_test():\n \"\"\"\n arguments : input smaller graph as bin, input larger graph with added edges as bin , embedding dimensions\n \"\"\"\n input_small_graph_file = str(sys.argv[1])\n input_large_graph_file = str(sys.argv[2])\n dim = int(sys.argv[3])\n \n X_hat = load_graph(input_large_graph_file)\n X = load_graph_with_mapping(input_small_graph_file)\n\n global num_vertices\n A = np.zeros((num_vertices,1), dtype=np.float64)\n B = np.zeros((num_vertices,1), dtype=np.float64)\n\n #assuming only single row has changed\n X_diff = X_hat - X\n diff_vertex = 0\n for i in range(0,num_vertices):\n for j in range(0,num_vertices):\n if(X_diff[i][j] != 0):\n diff_vertex = i\n break\n\n A[diff_vertex][0] = np.float64(1.0)\n Y = X_hat[diff_vertex]-X[diff_vertex]\n B = Y.reshape(num_vertices,1)\n\n compare_svd(X, A, B, dim)\n\ndef incr_hope_test():\n \"\"\"\n arguments : input smaller graph as bin, input larger graph with added edges as bin \n file name prefix for output embedding files, embedding dim\n \"\"\"\n input_small_graph_file = str(sys.argv[1])\n output_embs_prefix = str(sys.argv[2])\n dimensions = int(sys.argv[3])\n \n X = load_graph(input_small_graph_file)\n X_hat = np.copy(X)\n\n #delete some edges in a single row of X to create a finite diff\n for i in range(0,len(X[0])):\n if(i%2!=0):\n X[0][i] = 0\n\n A = np.zeros((len(X),1), dtype=np.float64)\n B = np.zeros((len(X),1), dtype=np.float64)\n A[0][0] = 1\n Y = X_hat[0]-X[0]\n B = Y.reshape(len(X),1)\n #beta = ret_prox_param(X)\n beta = 0.0001\n\n source, target = incr_katz_full(X,A,B,dimensions,beta)\n save_embeddings_bin(output_embs_prefix, source, target)\n save_mappings(output_embs_prefix)\n\ndef incr_hope():\n \"\"\"\n arguments : input smaller graph as bin, input larger graph with added edges as bin \n file name prefix for output embedding files, embedding dim\n \"\"\"\n input_small_graph_file = str(sys.argv[1])\n input_large_graph_file = str(sys.argv[2])\n output_embs_prefix = str(sys.argv[3])\n dimensions = int(sys.argv[4])\n \n X_hat = load_graph(input_large_graph_file)\n X = load_graph_with_mapping(input_small_graph_file)\n\n global num_vertices\n A = np.zeros((num_vertices,1), dtype=np.float64)\n B = np.zeros((num_vertices,1), dtype=np.float64)\n\n #assuming only single row has changed\n X_diff = X_hat - X\n diff_vertex = 0\n for i in range(0,num_vertices):\n for j in range(0,num_vertices):\n if(X_diff[i][j] != 0):\n diff_vertex = i\n break\n\n A[diff_vertex][0] = np.float64(1)\n Y = X_hat[diff_vertex]-X[diff_vertex]\n B = Y.reshape(num_vertices,1)\n #beta = ret_prox_param(X)\n beta = 0.000001\n\n source, target = incr_katz_full(X,A,B,dimensions,beta)\n save_embeddings_bin(output_embs_prefix, source, target)\n save_mappings(output_embs_prefix)\n\n# calculate simple diff between adj matrices corresponding to initial and final graphs, also keep a record of which vertices already had some edges in initial graph and which have been added in the final one\ndef calc_diff(X1,X2):\n global num_vertices\n vert_added = []\n edge_added = []\n X_diff = X2-X1\n\n for r in range(0,num_vertices):\n for c in range(0,num_vertices):\n if X_diff[r][c] != 0:\n if np.any(X1[r]):\n edge_added.append(r)\n break\n else:\n vert_added.append(r)\n break\n\n return X_diff, edge_added, vert_added\n\n# the static SVD needs to be performed only once. Subsequently, the results of the previous incr-SVD are used\ndef saturate_edges_katz(X1, X_diff, edge_added, output_embs_prefix, dimensions):\n X_s = X1\n beta = 0.0001\n Ui, Si, Vi = svd_hope_full(X_s, dimensions, beta)\n global num_vertices\n global count\n\n for x in range(0,len(edge_added)):\n i = edge_added[x]\n A = np.zeros((num_vertices,1), dtype=np.float64)\n B = np.zeros((num_vertices,1), dtype=np.float64)\n A[i][0] = np.float64(1)\n B = X_diff[i].reshape(num_vertices,1)\n source, target, Up, Sp, Vp = incr_katz_full_series(Ui, Si, Vi, A, B, dimensions, beta)\n np.reshape(Up, np.shape(Ui))\n Ui = Up\n np.reshape(Sp, np.shape(Si))\n Si = Sp\n np.reshape(Vp, np.shape(Vi))\n Vi = Vp\n if(x == len(edge_added) - 1):\n save_embeddings_bin(output_embs_prefix + \"_\" + str(count), source, target)\n #save_embeddings_bin(output_embs_prefix + \"_\" + str(count), source, target)\n count = count + 1\n X_s = X_s + np.outer(A,B)\n\n return X_s\n\ndef saturate_vertices_katz(X1, X_diff, vert_added, output_embs_prefix, dimensions):\n X_s = X1\n global num_vertices\n global count\n beta = 0.0001\n Ui, Si, Vi = svd_hope_full(X_s, dimensions, beta)\n\n for x in range(0,len(vert_added)):\n i = vert_added[x]\n A = np.zeros((num_vertices,1), dtype=np.float64)\n B = np.zeros((num_vertices,1), dtype=np.float64)\n A[i][0] = np.float64(1)\n B = X_diff[i].reshape(num_vertices,1)\n # source, target = incr_katz_full(X_s,A,B,dimensions,beta)\n source, target, Up, Sp, Vp = incr_katz_full_series(Ui, Si, Vi, A, B, dimensions, beta)\n Ui = Up\n Si = Sp\n Vi = Vp\n if(x == len(vert_added) - 1):\n save_embeddings_bin(output_embs_prefix + \"_\" + str(count), source, target)\n count = count + 1\n X_s = X_s + np.outer(A,B)\n return X_s\n\ndef update_common_nghbrs(X1, X_diff, edge_added, output_embs_prefix, dim):\n X_s = X1\n global num_vertices\n global count\n Ml = return_prox_mat_common(X_s)\n s = time.time()\n Ui,Si,VTi = svds(Ml, k=dim, which='LM')\n print(\"Full SVD time : \", time.time()-s)\n Vi = VTi.T\n\n for x in range(0,len(edge_added)):\n print(\"count :\", count)\n i = edge_added[x]\n A = np.zeros((num_vertices,1), dtype=np.float64)\n B = np.zeros((num_vertices,1), dtype=np.float64)\n A[i][0] = np.float64(1)\n B = X_diff[i].reshape(num_vertices,1)\n A1 = np.matmul(X_s,A)\n B1 = B\n A2 = A\n B2 = np.matmul(X_s.T, B)\n A3,B3 = common_nghbrs_factorize(A,B,i)\n\n U1,S1,V1 = GenSvdUpdate(Ui, Si, Vi, A1, B1, dim)\n U2, S2, V2 = GenSvdUpdate(U1, S1, V1, A2, B2, dim)\n source, target, Up, Sp, Vp = incr_common_nghbrs_series(U2, S2, V2, A, B, dim)\n Ui = Up\n Si = Sp\n Vi = Vp\n if(x == len(edge_added) - 1):\n save_embeddings_bin(output_embs_prefix + \"_\" + str(count), source, target)\n count = count + 1\n X_s = X_s + np.outer(A,B)\n break\n return X_s \n\ndef update_common_nghbrs(X1, X_diff, src_dict, edge_added, output_embs_prefix, dim):\n X_s = X1\n global num_vertices\n global count\n Ml = return_prox_mat_common(X_s)\n s = time.time()\n Ui,Si,VTi = svds(Ml, k=dim, which='LM')\n print(\"Full SVD time : \", time.time()-s)\n Vi = VTi.T\n\n for x in range(0,len(edge_added)):\n print(\"count :\", count)\n u = edge_added[x]\n A = np.zeros((num_vertices,num_vertices), dtype=np.float64)\n B = np.zeros((num_vertices,num_vertices), dtype=np.float64)\n A[u][u] = 1.0\n for s in src_dict[u]:\n A[s][s] = 1.0\n for v in range(0,len(X_diff[u])):\n if(X_diff[u][v] != 0):\n B[s][v] = 1.0\n for v in range(0,len(X_diff[u])):\n if(X_diff[u][v] != 0):\n if v not in src_dict.keys():\n src_dict[v] = []\n src_dict[v].append(u)\n for d in range(0,len(X1[v])):\n if(X1[v][d] != 0):\n B[u][d] += 1.0\n source, target, Up, Sp, Vp = incr_common_nghbrs_series(Ui,Si,Vi,A,B,dim,)\n Ui = Up\n Si = Sp\n Vi = Vp\n if(x == len(edge_added) - 1):\n save_embeddings_bin(output_embs_prefix + \"_\" + str(count), source, target)\n count = count + 1\n X_s = X_s + np.outer(A,B)\n return X_s \n\n \"\"\"\n for x in range(0,len(edge_added)):\n print(\"count :\", count)\n i = edge_added[x]\n A = np.zeros((num_vertices,1), dtype=np.float64)\n B = np.zeros((num_vertices,1), dtype=np.float64)\n A[i][0] = np.float64(1)\n B = X_diff[i].reshape(num_vertices,1)\n A1 = np.matmul(X_s,A)\n B1 = B\n A2 = A\n B2 = np.matmul(X_s.T, B)\n A3,B3 = common_nghbrs_factorize(A,B,i)\n\n U1,S1,V1 = GenSvdUpdate(Ui, Si, Vi, A1, B1, dim)\n U2, S2, V2 = GenSvdUpdate(U1, S1, V1, A2, B2, dim)\n source, target, Up, Sp, Vp = incr_common_nghbrs_series(U2, S2, V2, A, B, dim)\n Ui = Up\n Si = Sp\n Vi = Vp\n if(x == len(edge_added) - 1):\n save_embeddings_bin(output_embs_prefix + \"_\" + str(count), source, target)\n count = count + 1\n X_s = X_s + np.outer(A,B)\n return X_s\n \"\"\" \n\n#this the primary function for checking incremental HOPE\n#has been run before and verified for correctness\n# runs a series of rank-1 svdUpdates and calculates embeddings after each update (currently : does not save any except the final stage, uses full hope)\ndef incr_hope_series():\n \"\"\"\n arguments : input smaller graph as bin, input larger graph with added edges as bin \n file name prefix for output embedding files, embedding dim\n \"\"\"\n input_small_graph_file = str(sys.argv[1])\n input_large_graph_file = str(sys.argv[2])\n output_embs_prefix = str(sys.argv[3])\n dimensions = int(sys.argv[4])\n \n # load final graph first to prepare a mapping of all vertices\n X2 = load_graph(input_large_graph_file)\n # initial(smaller) graph is loaded afterwards but padded with zero rows and columns to enable calculation of diff with final graph\n X1 = load_graph_with_mapping(input_small_graph_file)\n global num_vertices\n print(\"num_vertices :\",num_vertices)\n print(\"X2 :\" ,X2.shape)\n print(\"X1 :\",X1.shape)\n\n X_diff, edge_added, vert_added = calc_diff(X1,X2)\n\n print(\"edge_added :\",len(edge_added))\n print(\"vert_added :\", len(vert_added))\n\n edge_added.extend(vert_added)\n save_mappings(output_embs_prefix)\n # first update edge additions.\n X_hat = saturate_edges_katz(X1, X_diff, edge_added, output_embs_prefix, dimensions)\n # NOTE : currently, vertex additions also behave like edge additions, a row which was entirely zero in initial graph is populated with ones\n #X_hat = saturate_vertices_katz(X_intmdt, X_diff, vert_added, output_embs_prefix, dimensions)\n\n print(\"diff between X_hat and X2 :\" ,np.any(X_hat-X2))\n\n#buggy GenSVD\ndef incr_common_nghbrs():\n \"\"\"\n arguments : input smaller graph as bin, input larger graph with added edges as bin \n file name prefix for output embedding files, embedding dim\n \"\"\"\n input_small_graph_file = str(sys.argv[1])\n input_large_graph_file = str(sys.argv[2])\n output_embs_prefix = str(sys.argv[3])\n dimensions = int(sys.argv[4])\n\n # load final graph first to prepare a mapping of all vertices\n X2 = load_graph(input_large_graph_file)\n # initial(smaller) graph is loaded afterwards but padded with zero rows and columns to enable calculation of diff with final graph\n # X1 = load_graph_with_mapping(input_small_graph_file)\n X1,src_dict = load_graph_with_mapping_in_nghbrs(input_small_graph_file)\n global num_vertices\n print(\"num_vertices :\",num_vertices)\n print(\"X2 :\" ,X2.shape)\n print(\"X1 :\",X1.shape)\n\n X_diff, edge_added, vert_added = calc_diff(X1,X2)\n\n print(\"edge_added :\",len(edge_added))\n print(\"vert_added :\", len(vert_added))\n\n edge_added.extend(vert_added)\n save_mappings(output_embs_prefix)\n\n # first update edge additions.\n # NOTE : currently, vertex additions also behave like edge additions, a row which was entirely zero in initial graph is populated with ones\n X_hat = update_common_nghbrs(X1, X_diff, src_dict, edge_added, output_embs_prefix, dimensions)\n # X_hat = update_common_nghbrs(X_inmdt, X_diff, vert_added, output_embs_prefix, dimensions)\n \nif __name__ == \"__main__\":\n \n Y = np.random.rand(2000,2000)\n y = cp.asarray(Y)\n #y_d = cuda.to_device(y)\n #x = np.random.random(2000).astype(np.float32)\n s = time.time()\n q, r = cp.linalg.qr(y)\n print(time.time()-s)\n print(cp.matmul(q,r) - y)\n s = time.time()\n Q,R = np.linalg.qr(Y)\n print(time.time()-s)\n print(np.matmul(Q,R) - Y)\n #incr_common_nghbrs()\n #static_embs()\n #incr_hope_series()\n","repo_name":"AditiSingh97/CS759","sub_path":"FinalProject759/dyn_embed.py","file_name":"dyn_embed.py","file_ext":"py","file_size_in_byte":30211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33406091652","text":"import argparse\nimport copy\nimport glob\nimport json\nimport logging\nimport numpy as np\nimport os\n\nimport psutil\nimport ray\n\nfrom solvent.data.mmcif_parsing import parse\nfrom solvent.data.protein_utils import process_pdb\nfrom igfold.utils.pdb import cdr_indices\n\n@ray.remote\ndef parse_file(\n f,\n args,\n chain_cluster_size_dict\n):\n pdb_path = os.path.join(args.pdb_dir, f)\n full_name = os.path.splitext(os.path.basename(pdb_path))[0]\n \n out = {}\n out[full_name] = {}\n local_data = out[full_name]\n data = process_pdb(\n pdb_path=pdb_path,\n is_distillation=False,\n )\n\n cdr_index = np.zeros_like(data['residue_index'])\n chain_type = list(set(list(data['chain_index'])))\n if len(chain_type) == 2:\n cdr_names = [\"h1\", \"h2\", \"h3\", \"l1\", \"l2\", \"l3\"]\n else:\n if chain_type[0] == 7:\n cdr_names = [\"h1\", \"h2\", \"h3\"]\n else:\n cdr_names = [\"l1\", \"l2\", \"l3\"]\n \n cdr_h_index = cdr_index[data['chain_index'] == 7]\n cdr_l_index = cdr_index[data['chain_index'] == 11]\n for cdr in cdr_names:\n start_idx, end_idx = cdr_indices(pdb_path, cdr, offset_heavy=False)\n if 'h' in cdr:\n cdr_h_index[start_idx:end_idx+1] = 1\n else:\n cdr_l_index[start_idx:end_idx+1] = 1\n cdr_index = np.concatenate([cdr_h_index, cdr_l_index])\n\n data.update({'cdr_index': cdr_index})\n local_data['seq'] = data['sequence'][0].decode()\n cache_data = copy.deepcopy(local_data)\n for k, v in data.items():\n if v.dtype == 'O':\n continue\n local_data[k] = v.tolist()\n\n json_path = os.path.join(args.output_dir, full_name+'.json')\n with open(json_path, \"w\") as fp:\n fp.write(json.dumps(out, indent=4))\n\n out[full_name] = cache_data\n return out\n\n\ndef main(args):\n json_dir = args.output_dir\n if not os.path.exists(json_dir):\n os.makedirs(json_dir)\n\n chain_cluster_size_dict = None\n if (args.cluster_file is not None):\n chain_cluster_size_dict = {}\n with open(args.cluster_file, \"r\") as fp:\n clusters = [l.strip() for l in fp.readlines()]\n\n for cluster in clusters:\n chain_ids = cluster.split()\n cluster_len = len(chain_ids)\n for chain_id in chain_ids:\n chain_id = chain_id.upper()\n chain_cluster_size_dict[chain_id] = cluster_len\n\n accepted_exts = [\".cif\", \".pdb\"]\n files = list(os.listdir(args.pdb_dir))\n files = [f for f in files if os.path.splitext(f)[-1] in accepted_exts]\n\n parsed_data = [parse_file.remote(f, args, chain_cluster_size_dict) for f in files]\n parsed_data = ray.get(parsed_data)\n\n cache_data = {}\n for d in parsed_data:\n cache_data.update(d)\n\n cache_path = os.path.join(args.output_dir, '..', 'chain_data_cache.json')\n with open(cache_path, \"w\") as fp:\n fp.write(json.dumps(cache_data, indent=4))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--pdb_dir\", type=str, help=\"Directory containing mmCIF or PDB files\"\n )\n parser.add_argument(\n \"--fasta_dir\", type=str, default=None, help=\"Directory containing mmCIF or PDB files\"\n )\n parser.add_argument(\n \"--output_dir\", type=str, help=\"Path for parsed .json output\"\n )\n parser.add_argument(\n \"--cluster_file\", type=str, default=None,\n help=(\n \"Path to a cluster file (e.g. PDB40), one cluster \"\n \"({PROT1_ID}_{CHAIN_ID} {PROT2_ID}_{CHAIN_ID} ...) per line. \"\n \"Chains not in this cluster file will NOT be filtered by cluster \"\n \"size.\"\n )\n )\n\n args = parser.parse_args()\n num_cpus = psutil.cpu_count()\n ray.init(num_cpus=num_cpus)\n\n main(args)\n\n ray.shutdown()\n","repo_name":"kakaobrain/solvent","sub_path":"tools/preprocess_multimer_datasets.py","file_name":"preprocess_multimer_datasets.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"48"} +{"seq_id":"4060846761","text":"from pathlib import Path\nfrom unittest import TestCase\n\nimport ppsi\nfrom ppsi.common import shell\nfrom ppsi.pspbar import pspbar\nfrom ppsi.pspbar.network import IP_ADDR\n\n\nclass TestNtwk(TestCase):\n \"\"\"\n Test IpAddrSeg\n \"\"\"\n def test_ip_addr_seg(self):\n \"\"\"\n Test ip address segment in various contexts\n \"\"\"\n IP_ADDR.call_me()\n ip_out = shell.process_comm(\n 'sh',\n str(\n Path(ppsi.__file__).parent.joinpath(\n 'pspbar/shell_dep/netcheck.sh')), \"-r=0\", \"-n=5\")\n self.assertIsNotNone(ip_out)\n print(ip_out)\n self.assertEqual(int(ip_out.split('\\t')[2]), 5)\n ip_out = shell.process_comm(\n 'sh',\n str(\n Path(ppsi.__file__).parent.joinpath(\n 'pspbar/shell_dep/netcheck.sh', \"-r=1\")))\n self.assertIsNone(ip_out)\n\n\nclass TestBar(TestCase):\n \"\"\"\n Test that PSPBar always sends some output\n \"\"\"\n def untest_alive(self):\n pspbar(1, 1, num_iter=4)\n","repo_name":"pradyparanjpe/ppsi","sub_path":"tests/test_pspbar.py","file_name":"test_pspbar.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16371483817","text":"import json\nimport os\n\nfrom tqdm import tqdm\nfrom transformers import (DataCollatorForLanguageModeling,\n LineByLineTextDataset, Trainer, TrainingArguments,\n XLMRobertaForMaskedLM, XLMRobertaTokenizer)\n\n\ndef convert_weibo_text_into_line_by_line(weibo_dir, line_by_line_f, mini_size=float('inf')):\n text_list = []\n for fname in tqdm(os.listdir(weibo_dir), desc='reading weibo text'):\n with open(os.path.join(weibo_dir, fname), 'r') as fin:\n posts = json.load(fin)\n text_list.append(posts[0][\"text\"])\n # text_list.extend([post[\"user_description\"]\n # for post in posts]) # 136 MB\n if len(text_list) > mini_size:\n break\n text_list = [t for t in text_list if len(t) > 0]\n with open(line_by_line_f, 'w') as fout:\n fout.write('\\n'.join(text_list) + '\\n')\n\n\nif __name__ == '__main__':\n\n line_by_line_f = '/rwproject/kdd-db/20-rayw1/data/line_by_line_post.txt'\n\n model_in = 'xlm-roberta-base'\n config_tag = '-post'\n model_out = '/rwproject/kdd-db/20-rayw1/language_models/' + model_in + config_tag\n output_dir = '/rwproject/kdd-db/20-rayw1/language_models/output' + config_tag\n\n convert_weibo_text_into_line_by_line(\n weibo_dir='/rwproject/kdd-db/20-rayw1/rumdect/weibo_json', line_by_line_f=line_by_line_f)\n\n print('Loading models...')\n tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-base')\n model = XLMRobertaForMaskedLM.from_pretrained(model_in, return_dict=True)\n\n print('Loading dataset...')\n dataset = LineByLineTextDataset(\n tokenizer=tokenizer,\n file_path=line_by_line_f,\n block_size=128,\n )\n\n data_collator = DataCollatorForLanguageModeling(\n tokenizer=tokenizer, mlm=True, mlm_probability=0.15\n )\n\n training_args = TrainingArguments(\n output_dir=output_dir,\n overwrite_output_dir=True,\n num_train_epochs=3,\n per_device_train_batch_size=4, # 8 => CUDA out of memory on raymond's server\n save_steps=8000, # 8000 ~ save every 15 min\n save_total_limit=2,\n )\n\n trainer = Trainer(\n model=model,\n args=training_args,\n data_collator=data_collator,\n train_dataset=dataset,\n )\n\n print('Training...')\n trainer.train()\n trainer.save_model(model_out)\n ","repo_name":"ysunbp/Fake-News-Detection-with-HetGNN","sub_path":"text_embed/finetune_text_embedder.py","file_name":"finetune_text_embedder.py","file_ext":"py","file_size_in_byte":2371,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"10593105491","text":"import sys\nimport pandas as pd\nfrom didipack.trainer.trainer_abstract import BaseTrainer\nfrom didipack.utils_didi.ridge import run_efficient_ridge\nimport numpy as np\nfrom didipack import Params\n\nclass TrainerRidge(BaseTrainer):\n def __init__(self, par: Params):\n super().__init__(par)\n self.beta = None\n\n def _predict(self, x):\n return x.values @ self.beta.T\n\n def _train_model(self, x: pd.DataFrame, y: pd.DataFrame, hyper_params):\n beta = run_efficient_ridge(signals=x.values, labels=y.values,shrinkage_list=hyper_params['shrinkage'])\n self.beta= beta\n\n def _validation_procedure(self, x_train: pd.DataFrame, y_train: pd.DataFrame, x_val: pd.DataFrame,\n y_val: pd.DataFrame, hyper_parameters_set):\n self.beta = run_efficient_ridge(signals=x_train.values, labels=y_train.values,shrinkage_list=self.par.train.shrinkage_list)\n y_pred = self._predict(x_val)\n val_perf =np.mean(np.square(y_pred-y_val.values),axis=0)\n best_lambda = self.par.train.shrinkage_list[np.argmin(val_perf)]\n print('Select best lambda', best_lambda,flush=True)\n return {'shrinkage':[best_lambda]}\n\n\n\nif __name__ == \"__main__\":\n try:\n grid_id = int(sys.argv[1])\n model_id = int(sys.argv[2])\n print('Running with args', grid_id, flush=True)\n except:\n print('Debug mode on local machine', flush=True)\n grid_id = 0\n model_id = 5\n\n par = Params()\n par.train.testing_window = 5\n self = TrainerRidge(par)\n\n N = 1000\n P = 100\n fake_data = pd.DataFrame(np.random.normal(size=(N,P)))\n y = pd.DataFrame(fake_data.mean(1))\n dates = pd.Series(np.arange(N))\n ids = dates.copy()\n\n y_test, _ = self.train_at_time_t(x=fake_data,y=y,ids=ids,times=dates,t_index_for_split=100)\n\n","repo_name":"AntoineDidisheim/didipack","sub_path":"didipack/trainer/trainer_ridge.py","file_name":"trainer_ridge.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32824825629","text":"import pygame, sys\nfrom pygame.locals import *\n\npygame.init()\n\nscreen = pygame.display.set_mode((400, 300))\npygame.display.set_caption('Example with sound')\n\npygame.mixer.music.load(\"Calema - Tempo.mp3\")\npygame.mixer.music.play(-1, 0)\n\nwhile True:\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n pygame.display.update()\n\n\n\n\n","repo_name":"habraino/meus-scripts","sub_path":"_prog/_python/_pygame/soundTest.py","file_name":"soundTest.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"110221219","text":"import rmad.expressions as expressions\nimport numpy as np\nfrom functools import singledispatch\n\n\ndef forwardmodeAD(expr, conditions):\n \"\"\"Visit an expression in post-order applying a function.\"\"\"\n symbols = dict()\n for symbol in conditions.keys():\n if isinstance(expr, np.ndarray):\n stack = []\n for expression in expr:\n stack.append(expression)\n else:\n stack = [expr]\n visited = {}\n while stack:\n element = stack.pop()\n element.adjoint = 0\n unvisited_children = []\n for operand in element.operands:\n if operand not in visited:\n unvisited_children.append(operand)\n if unvisited_children:\n stack.append(element)\n for x in unvisited_children:\n stack.append(x)\n else:\n visited[element] = forwardevaluate(element, symbol,\n *(operand for operand in\n element.operands),\n symbol_map=conditions)\n element.storedvalue = visited[element]\n if isinstance(expr, np.ndarray):\n adjointlist = []\n for expression in expr:\n adjointlist.append(expression.adjoint)\n else:\n adjointlist = expr.adjoint\n symbols[symbol] = adjointlist\n return symbols\n\n\n@singledispatch\ndef forwardevaluate(expr, seed, *o, **kwargs):\n \"\"\"Evaluate an expression node.\n\n Parameters\n ----------\n expr: Expression\n The expression node to be evaluated.\n *o: numbers.Number\n The results of evaluating the operands of expr.\n **kwargs:\n Any keyword arguments required to evaluate specific types of\n expression.\n symbol_map: dict\n A dictionary mapping Symbol names to numerical values, for\n example:\n\n {'x': 1}\n \"\"\"\n raise NotImplementedError(\n f\"Cannot evaluate a {type(expr).__name__}\")\n\n\n@forwardevaluate.register(expressions.Number)\ndef _(expr, seed, *o, **kwargs):\n value = expr.value\n expr.storedvalue = value\n expr.adjoint += 0\n return value\n\n\n@forwardevaluate.register(int)\ndef _(expr, seed, *o, **kwargs):\n value = expr\n expr.storedvalue = value\n expr.adjoint += 0\n return value\n\n\n@forwardevaluate.register(expressions.Symbol)\ndef _(expr, seed, *o, symbol_map, **kwargs):\n value = symbol_map[expr]\n expr.storedvalue = value\n expr.adjoint += 0\n if seed == expr:\n expr.adjoint += 1\n return value\n\n\n@forwardevaluate.register(expressions.Add)\ndef _(expr, seed, *o, **kwargs):\n value = o[0].storedvalue + o[1].storedvalue\n expr.storedvalue = value\n expr.adjoint += o[0].adjoint + o[1].adjoint\n return value\n\n\n@forwardevaluate.register(expressions.Sub)\ndef _(expr, seed, *o, **kwargs):\n value = o[0].storedvalue - o[1].storedvalue\n expr.storedvalue = value\n expr.adjoint += o[0].adjoint - o[1].adjoint\n return value\n\n\n@forwardevaluate.register(expressions.Mul)\ndef _(expr, seed, *o, **kwargs):\n value = o[0].storedvalue * o[1].storedvalue\n expr.storedvalue = value\n expr.adjoint += o[0].storedvalue * \\\n o[1].adjoint + o[1].storedvalue * o[0].adjoint\n return value\n\n\n@forwardevaluate.register(expressions.Div)\ndef _(expr, seed, *o, **kwargs):\n value = o[0].storedvalue / o[1].storedvalue\n expr.adjoint += (o[0].adjoint*o[1].storedvalue\n - o[1].adjoint*o[0].storedvalue) / (o[1].storedvalue ** 2)\n return value\n\n\n@forwardevaluate.register(expressions.Pow)\ndef _(expr, seed, *o, **kwargs):\n value = o[0].storedvalue ** o[1].storedvalue\n expr.adjoint += o[0].adjoint * o[1].storedvalue * \\\n o[0].storedvalue ** (o[1].storedvalue - 1)\n return value\n\n\n@forwardevaluate.register(expressions.Sin)\ndef _(expr, seed, *o, **kwargs):\n value = np.sin(o[0].storedvalue)\n expr.storedvalue = value\n expr.adjoint += np.cos(o[0].storedvalue) * o[0].adjoint\n return value\n\n\n@forwardevaluate.register(expressions.Cos)\ndef _(expr, symbol, *o, **kwargs):\n value = np.cos(o[0].storedvalue)\n expr.storedvalue = value\n expr.adjoint += -np.sin(o[0].storedvalue) * o[0].adjoint\n return value\n\n\n@forwardevaluate.register(expressions.Exp)\ndef _(expr, symbol, *o, **kwargs):\n value = np.exp(o[0].storedvalue)\n expr.storedvalue = value\n expr.adjoint += np.exp(o[0].storedvalue) * o[0].adjoint\n return value\n\n\n@forwardevaluate.register(expressions.Log)\ndef _(expr, symbol, *o, **kwargs):\n value = np.log(o[0].storedvalue)\n expr.storedvalue = value\n expr.adjoint += (1/o[0].storedvalue) * o[0].adjoint\n return value\n","repo_name":"callumfirth/M2R-RMAD","sub_path":"rmad/forwardmode.py","file_name":"forwardmode.py","file_ext":"py","file_size_in_byte":4788,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"42039221631","text":"from gmusicapi import Mobileclient, Musicmanager\n\nmm = Musicmanager()\napi = Mobileclient()\ncheck = api.oauth_login(Mobileclient.FROM_MAC_ADDRESS)\nif check == False:\n api.perform_oauth()\n api.oauth_login(Mobileclient.FROM_MAC_ADDRESS)\ncheck = mm.login()\nif check == False:\n mm.perform_oauth()\n mm.login()\nplid = \"\"\ndef createplist(name):\n global plid\n plid = api.create_playlist(name)\n \ndef upload(path):\n mm.upload(path)\n addplaylist(path)\n\nsids = []\n\n\ndef addplaylist(title):\n lst = mm.get_uploaded_songs()\n sid = \"\"\n for i in lst:\n if(i[\"title\"]==title):\n sid = i[\"id\"]\n break\n sids.append(sid)\n\ndef appendtoplaylist():\n global plid\n api.add_songs_to_playlist(plid, sids)","repo_name":"knightron0/tunefind","sub_path":"upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"40461910622","text":"def egg_drop_recursive(eggs, floors):\r\n # Base cases\r\n if eggs == 1:\r\n return floors\r\n if floors == 0 or floors == 1:\r\n return floors\r\n\r\n min_drops = float('inf')\r\n\r\n # Consider dropping the egg from each floor and find the worst case\r\n for i in range(1, floors + 1):\r\n # If egg breaks, search in lower floors with one less egg\r\n breaks = egg_drop_recursive(eggs - 1, i - 1)\r\n # If egg doesn't break, search in upper floors with the same number of eggs\r\n doesn_break = egg_drop_recursive(eggs, floors - i)\r\n\r\n # Find the maximum in the worst case\r\n worst_case = max(breaks, doesn_break)\r\n\r\n # Update the minimum drops\r\n min_drops = min(min_drops, worst_case)\r\n\r\n # Add 1 for the current drop\r\n return 1 + min_drops\r\n\r\n# Example usage:\r\neggs = 2\r\nfloors = 10\r\n\r\nresult = egg_drop_recursive(eggs, floors)\r\nprint(f\"Minimum drops needed: {result}\")\r\n","repo_name":"369harshit/Day26-DP","sub_path":"egg dropping.py","file_name":"egg dropping.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34652078067","text":"from http import HTTPStatus\nimport re\nfrom flask import Blueprint, Response, request, jsonify\n\nfrom br_funds.quote.quotes import get_latest_quote, get_quotes\nfrom br_funds.utils import convert_to_xml, validate_url_params\n# from ..utils import add_funds, get_all_funds, get_fund\nfrom ..funds.funds import add_funds, get_all_funds, get_fund\n\nfunds_blueprint = Blueprint('funds_blueprint', __name__)\n\n@funds_blueprint.route('/funds/', methods=['GET', 'POST'])\ndef route_funds() -> Response:\n if request.method == 'GET':\n resp = get_all_funds()\n return resp\n\n if request.method == 'POST':\n request_data = request.get_json()\n resp = add_funds(request_data['cnpjs'])\n return resp\n\n return None\n\n@funds_blueprint.route('/funds/', methods=['GET'])\ndef route_funds_get(cnpj: str):\n\n resp = get_fund(cnpj)\n return resp\n\n@funds_blueprint.route('/funds//quote/', methods=['GET'])\ndef route_funds_get_quote(cnpj: str) -> Response:\n # cnpj = '08.968.733/0001-26'\n # Artesanal 09.625.909/0001-00 09625909000100\n if not validate_url_params(request.args):\n resp = jsonify({\n 'msg': 'Invalid values for the URL parameters',\n 'error': True,\n 'data': None\n })\n resp.status_code = HTTPStatus.BAD_REQUEST\n return resp\n\n print(request.args.get('format', default='JSON', type=str))\n param_format = request.args.get('format', default='JSON', type=str).upper()\n print(f'param ---> {param_format}')\n\n resp = get_latest_quote(cnpj)\n\n if param_format == 'XML':\n return convert_to_xml(resp)\n \n return resp","repo_name":"fabiolv/fundsbr","sub_path":"br_funds/blueprints/funds_blueprint.py","file_name":"funds_blueprint.py","file_ext":"py","file_size_in_byte":1635,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"71797979666","text":"import time\r\nimport json\r\nimport socket\r\nimport time\r\nimport datetime\r\nfrom gpiozero import LED\r\nfrom azure.servicebus import ServiceBusService\r\n\r\n# Connection strings for Event Hub\r\nsecrets = json.load(open('secrets.json'))\r\nkey_name = secrets['key_name']\r\nkey_value = secrets['key_value']\r\nservice_namespace = secrets['service_namespace']\r\neventhub_name = secrets['eventhub_name']\r\n\r\n# Establish connection to Event Hub\r\nhost = socket.gethostname()\r\nsbs = ServiceBusService(service_namespace,\r\nshared_access_key_name=key_name,\r\nshared_access_key_value=key_value)\r\nsbs.create_event_hub(eventhub_name)\r\n\r\n# Parameters\r\ninterval = 10 #seconds\r\nsensor1 = \"28-041700305eff\"\r\nsensor2 = \"28-0517001488ff\"\r\nled_red = LED(23)\r\nled_green = LED(24)\r\n\r\n# Current timestamp\r\nstart_time = datetime.datetime.now()\r\n\r\n# Error Tracker\r\nerror = False \r\n\r\n# Status timestamps\r\ntimer_on = None\r\ntimer_ready = None\r\n\r\n# Fetch values from sensors\r\ndef read_temp(id):\r\n global error\r\n\r\n try:\r\n with open(\"/sys/bus/w1/devices/\" + id + \"/w1_slave\") as f:\r\n temp = f.read()\r\n temp = temp.split(\"\\n\")[1].split(\" \")[9]\r\n temp = float(temp[2:]) / 1000\r\n except OSError:\r\n temp = 0.0\r\n error = True\r\n print('>Read Error<')\r\n return temp\r\n\r\ndef set_time(start_time, end_time):\r\n return round((end_time - start_time).total_seconds() / 60.0,2)\r\n\r\nwhile True:\r\n # Read sensor values\r\n temp1 = read_temp(sensor1)\r\n temp2 = read_temp(sensor2)\r\n\r\n # Create timestamp\r\n now = datetime.datetime.now()\r\n \r\n # Flag for when machine is on\r\n if temp1 > 30:\r\n on = True\r\n led_red.on()\r\n if timer_on is None:\r\n timer_on_start = now\r\n timer_on = set_time(timer_on_start, now)\r\n else:\r\n on = False\r\n timer_on = None\r\n led_red.off()\r\n\r\n # Flag for espresso temperature reached\r\n if temp1 > 90:\r\n ready = True\r\n led_green.on()\r\n if timer_ready is None:\r\n timer_ready_start = now\r\n timer_ready = set_time(timer_ready_start, now)\r\n else:\r\n ready = False\r\n timer_ready = None\r\n led_green.off()\r\n\r\n # Startup Signal\r\n if (now - start_time) < datetime.timedelta(0,20):\r\n led_red.on()\r\n led_green.on()\r\n\r\n # Package data\r\n data = dict(\r\n Hostname = host, \r\n Timestamp = str(now), \r\n Temperature1 = temp1, \r\n Temperature2 = temp2, \r\n On = on, \r\n DurationOn = str(timer_on),\r\n Ready = ready,\r\n DurationReady = str(timer_ready)\r\n )\r\n msg = json.dumps(data)\r\n print(msg)\r\n # Send to event hub\r\n if not error:\r\n try:\r\n sbs.send_event(eventhub_name, msg)\r\n except Exception as e:\r\n print(\"ERROR sending data to event hub. Check if the event hub is up and running: \", str(e))\r\n # Reset error for next round\r\n error = False\r\n\r\n time.sleep(interval)\r\n","repo_name":"maknotavailable/CoffeeHub","sub_path":"send_eh.py","file_name":"send_eh.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5056192413","text":"#!/usr/bin/python \n# -*- coding: utf-8 -*-\n#\n# Gradient Boosting of Decision Trees\n# HW-1, sphere.mail.ru\n#\n# Author: Rakitin Vitaliy\n# vitaliyrakitin@ya.ru\n#\n\nimport numpy as np\nfrom .LinearTree import LinearTree\nfrom tqdm import tqdm\nfrom sklearn.tree import DecisionTreeRegressor\n\nclass GradientBoosting(object):\n ''' \n Gradient Boosting with L = (y - h(x))**2 / 2\n dL/dh = y - h(x)\n '''\n def __init__(self, n_estimators=10, max_depth=10, model = None):\n '''\n Parameters:\n * n_estimators (int) - estimators number\n * max_depth (int) of a tree\n '''\n \n self.estimators_list = []\n self.n_estimators = n_estimators\n self.max_depth = max_depth\n\n if model is not None:\n self.model = model\n else:\n self.model = LinearTree\n \n self.estimators_list = []\n self.b = []\n self.loss = []\n self.test_loss = []\n\n @staticmethod\n def MSE(target, predicted = None, is_test = False):\n ''' \n MSE criterion \n \n Parameters:\n * target (np.array)\n * predicted (np.array) - predicted target (Mean if None)\n Default: None\n * is_mean - divide on the target len or not\n Default: False\n\n '''\n if predicted is None:\n predicted = np.mean(target)\n \n if is_test:\n return ((predicted - target)**2).sum() / predicted.shape[0]\n MSE = ((predicted - target)**2).sum() / 2\n return MSE\n\n \n def _count_b(self, target, current_predict, new_predict, step = 1e-1, max_b = 5000):\n '''\n Count optimal parameter b\n \n Parameters:\n * target \n * prediction \n * new_predicton\n * step (int) - length of the step to count b\n * max_b - max value of b\n \n Returns:\n * b\n '''\n b = step * 2\n loss = self.MSE(target, current_predict + step * new_predict)\n anti_loss = self.MSE(target, current_predict - step * new_predict)\n \n # determine should the parameter be negative or positive\n if anti_loss < loss:\n new_loss = self.MSE(target, current_predict - (b + step) * new_predict)\n \n while new_loss < loss and b < max_b:\n b += step\n loss = new_loss\n new_loss = self.MSE(target, current_predict - (b + step) * new_predict)\n \n else:\n new_loss = self.MSE(target, current_predict + (b + step) * new_predict)\n \n while new_loss < loss and b < max_b:\n b += step\n loss = new_loss\n new_loss = self.MSE(target, current_predict + (b + step) * new_predict)\n \n return b\n \n def fit(self, data, target, test_data=None, test_target=None, shrinkage = 0.1):\n ''' Fitting model '''\n self.estimators_list = []\n self.b = []\n self.loss = []\n self.test_loss = []\n\n # step 1. Initialization\n\n first_estimator = self.model(max_depth = self.max_depth).fit(data, target) \n #first_estimator = DecisionTreeRegressor(max_depth = self.max_depth).fit(data, target)\n \n self.estimators_list.append(first_estimator)\n self.b.append(1)\n \n prediction = first_estimator.predict(data)\n self.loss.append(self.MSE(target, prediction, is_test = True))\n\n if test_data is not None:\n self.test_loss.append(self.MSE(test_target, self.predict(test_data), is_test = True))\n \n for i in tqdm(range(1, self.n_estimators)):\n \n # step 2.a Count antigrad\n antigrad = target - prediction\n \n # step 2.b count new base model\n new_estimator = self.model(max_depth=self.max_depth).fit(data, antigrad)\n\n #new_estimator = DecisionTreeRegressor(max_depth = self.max_depth).fit(data, antigrad)\n \n # step 2.c count parameter b for the model\n new_prediction = new_estimator.predict(data) \n b = self._count_b(target, prediction, new_prediction)\n \n # step 2.d save estimator\n self.estimators_list.append(new_estimator)\n self.b.append(b * shrinkage)\n\n prediction += shrinkage * b * new_prediction\n self.loss.append(self.MSE(target, prediction, is_test = True))\n if test_data is not None:\n self.test_loss.append(self.MSE(test_target, self.predict(test_data), is_test = True))\n\n \n def predict(self, data): \n ''' Prediction '''\n y = None \n for ind, estimator in enumerate(self.estimators_list):\n if y is not None:\n y += estimator.predict(data) * self.b[ind] \n else:\n y = estimator.predict(data) \n return y","repo_name":"VitaliyRakitin/dm3","sub_path":"Boost/boosting.py","file_name":"boosting.py","file_ext":"py","file_size_in_byte":4995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17408874095","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom hwt.hdl.constants import Time\nfrom hwt.hdl.types.bits import Bits\nfrom hwt.hdl.types.hdlType import HdlType\nfrom hwt.hdl.types.struct import HStruct\nfrom hwt.hdl.types.structUtils import field_path_get_type\nfrom hwt.pyUtils.arrayQuery import flatten\nfrom hwt.synthesizer.typePath import TypePath\nfrom hwtLib.amba.axiLite_comp.endpoint import AxiLiteEndpoint\nfrom hwtLib.amba.axiLite_comp.endpoint_test import AxiLiteEndpointTC, \\\n AxiLiteEndpointDenseStartTC, AxiLiteEndpointDenseTC\nfrom hwtLib.amba.constants import RESP_OKAY, RESP_SLVERR\nfrom hwtLib.types.ctypes import uint32_t\nfrom pyMathBitPrecise.bit_utils import mask\n\nstructTwoArr = HStruct(\n (uint32_t[4], \"field0\"),\n (uint32_t[4], \"field1\")\n )\nstructTwoArr_str = \"\"\"\\\nstruct {\n [4] field0 // start:0x0(bit) 0x0(byte)\n [4] field1 // start:0x80(bit) 0x10(byte)\n}\"\"\"\n\nstructTwoArr2 = HStruct(\n (uint32_t[3], \"field0\"),\n (uint32_t[4], \"field1\")\n )\nstructTwoArr2_str = \"\"\"\\\nstruct {\n [3] field0 // start:0x0(bit) 0x0(byte)\n [4] field1 // start:0x60(bit) 0xc(byte)\n}\"\"\"\n\nstructStructsInArray = HStruct(\n (HStruct(\n (uint32_t, \"field0\"),\n (uint32_t, \"field1\")\n )[4],\n \"arr\"),\n )\nstructStructsInArray_str = \"\"\"\\\nstruct {\n struct {\n field0 // start:0x0(bit) 0x0(byte)\n field1 // start:0x20(bit) 0x4(byte)\n }[4] arr // start:0x0(bit) 0x0(byte)\n}\"\"\"\n\n\nclass AxiLiteEndpointArrayTC(AxiLiteEndpointTC):\n STRUCT_TEMPLATE = structTwoArr\n FIELD_ADDR = [0x0, 0x10]\n\n def test_nop(self):\n u = self.mySetUp(32)\n MAGIC = 100\n\n for i in range(8):\n u.decoded.field0._ag.mem[i] = MAGIC + 1\n u.decoded.field1._ag.mem[i] = 2 * MAGIC + 1\n\n self.randomizeAll()\n self.runSim(100 * Time.ns)\n\n self.assertEmpty(u.bus._ag.r.data)\n for i in range(8):\n self.assertValEqual(u.decoded.field0._ag.mem[i], MAGIC + 1)\n self.assertValEqual(u.decoded.field1._ag.mem[i], 2 * MAGIC + 1)\n\n def test_read(self):\n u = self.mySetUp(32)\n regs = self.regs\n MAGIC = 100\n\n for i in range(4):\n u.decoded.field0._ag.mem[i] = MAGIC + i + 1\n u.decoded.field1._ag.mem[i] = 2 * MAGIC + i + 1\n regs.field0[i].read()\n regs.field1[i].read()\n\n self.randomizeAll()\n self.runSim(2 * 8 * 100 * Time.ns)\n\n self.assertValSequenceEqual(u.bus._ag.r.data, [\n (MAGIC + 1, RESP_OKAY),\n (2 * MAGIC + 1, RESP_OKAY),\n (MAGIC + 2, RESP_OKAY),\n (2 * MAGIC + 2, RESP_OKAY),\n (MAGIC + 3, RESP_OKAY),\n (2 * MAGIC + 3, RESP_OKAY),\n (MAGIC + 4, RESP_OKAY),\n (2 * MAGIC + 4, RESP_OKAY),\n ])\n\n def test_write(self):\n u = self.mySetUp(32)\n regs = self.regs\n MAGIC = 100\n\n for i in range(4):\n u.decoded.field0._ag.mem[i] = None\n u.decoded.field1._ag.mem[i] = None\n regs.field0[i].write(MAGIC + i + 1)\n regs.field1[i].write(2 * MAGIC + i + 1)\n\n self.randomizeAll()\n self.runSim(2 * 8 * 100 * Time.ns)\n\n self.assertEmpty(u.bus._ag.r.data)\n for i in range(4):\n self.assertValEqual(u.decoded.field0._ag.mem[i],\n MAGIC + i + 1, f\"index={i:d}\")\n self.assertValEqual(u.decoded.field1._ag.mem[i],\n 2 * MAGIC + i + 1, f\"index={i:d}\")\n\n def test_registerMap(self):\n self.mySetUp(32)\n s = self.addrProbe.discovered.__repr__(withAddr=0, expandStructs=True)\n self.assertEqual(s, structTwoArr_str)\n\n\nclass AxiLiteEndpointArray2TC(AxiLiteEndpointTC):\n STRUCT_TEMPLATE = structTwoArr2\n FIELD_ADDR = [0x0, 3 * 0x04]\n\n def test_nop(self):\n u = self.mySetUp(32)\n MAGIC = 100\n\n for i in range(4):\n if i < 3:\n u.decoded.field0._ag.mem[i] = MAGIC + 1\n u.decoded.field1._ag.mem[i] = 2 * MAGIC + 1\n\n self.randomizeAll()\n self.runSim(100 * Time.ns)\n\n self.assertEmpty(u.bus._ag.r.data)\n for i in range(4):\n if i < 3:\n self.assertValEqual(u.decoded.field0._ag.mem[i], MAGIC + 1)\n self.assertValEqual(u.decoded.field1._ag.mem[i], 2 * MAGIC + 1)\n\n def test_read(self):\n u = self.mySetUp(32)\n regs = self.regs\n MAGIC = 100\n\n for i in range(4):\n if i < 3:\n u.decoded.field0._ag.mem[i] = MAGIC + i + 1\n regs.field0[i].read()\n\n u.decoded.field1._ag.mem[i] = 2 * MAGIC + i + 1\n regs.field1[i].read()\n\n self.randomizeAll()\n self.runSim(2 * 8 * 100 * Time.ns)\n\n self.assertValSequenceEqual(u.bus._ag.r.data, [\n (MAGIC + 1, RESP_OKAY),\n (2 * MAGIC + 1, RESP_OKAY),\n (MAGIC + 2, RESP_OKAY),\n (2 * MAGIC + 2, RESP_OKAY),\n (MAGIC + 3, RESP_OKAY),\n (2 * MAGIC + 3, RESP_OKAY),\n (2 * MAGIC + 4, RESP_OKAY),\n ])\n\n def test_write(self):\n u = self.mySetUp(32)\n regs = self.regs\n MAGIC = 100\n\n for i in range(4):\n if i < 3:\n u.decoded.field0._ag.mem[i] = None\n regs.field0[i].write(MAGIC + i + 1)\n\n u.decoded.field1._ag.mem[i] = None\n regs.field1[i].write(2 * MAGIC + i + 1)\n\n self.randomizeAll()\n self.runSim(2 * 8 * 100 * Time.ns)\n\n self.assertEmpty(u.bus._ag.r.data)\n for i in range(4):\n if i < 3:\n self.assertValEqual(u.decoded.field0._ag.mem[i],\n MAGIC + i + 1, f\"index={i:d}\")\n self.assertValEqual(u.decoded.field1._ag.mem[i],\n 2 * MAGIC + i + 1, f\"index={i:d}\")\n\n def test_registerMap(self):\n self.mySetUp(32)\n s = self.addrProbe.discovered.__repr__(withAddr=0, expandStructs=True)\n self.assertEqual(s, structTwoArr2_str)\n\n\nclass AxiLiteEndpointStructsInArrayTC(AxiLiteEndpointTC):\n STRUCT_TEMPLATE = structStructsInArray\n\n def mySetUp(self, data_width=32):\n\n def shouldEnterFn(root: HdlType, field_path: TypePath):\n return (True, isinstance(field_path_get_type(root, field_path), Bits))\n\n u = AxiLiteEndpoint(self.STRUCT_TEMPLATE,\n shouldEnterFn=shouldEnterFn)\n self.u = u\n\n self.DATA_WIDTH = data_width\n u.DATA_WIDTH = self.DATA_WIDTH\n\n self.compileSimAndStart(self.u, onAfterToRtl=self.mkRegisterMap)\n return u\n\n def test_nop(self):\n u = self.mySetUp(32)\n\n self.randomizeAll()\n self.runSim(100 * Time.ns)\n\n self.assertEmpty(u.bus._ag.r.data)\n for item in u.decoded.arr:\n self.assertEmpty(item.field0._ag.dout)\n self.assertEmpty(item.field1._ag.dout)\n\n def test_registerMap(self):\n self.mySetUp(32)\n s = self.addrProbe.discovered.__repr__(withAddr=0, expandStructs=True)\n self.assertEqual(s, structStructsInArray_str)\n\n def test_read(self):\n u = self.mySetUp(32)\n MAGIC = 100\n MAGIC2 = 300\n\n a = u.bus.ar._ag.create_addr_req\n u.bus.ar._ag.data.extend([a(i * 0x4) for i in range(4 * 2 + 1)])\n\n for i, a in enumerate(u.decoded.arr):\n a.field0._ag.din.extend([MAGIC + i])\n a.field1._ag.din.extend([MAGIC2 + i])\n\n self.randomizeAll()\n self.runSim(500 * Time.ns)\n expected = list(flatten([[(MAGIC + i, RESP_OKAY),\n (MAGIC2 + i, RESP_OKAY)]\n for i in range(4)], level=1)\n ) + [(None, RESP_SLVERR)]\n self.assertValSequenceEqual(u.bus.r._ag.data, expected)\n\n def test_write(self):\n u = self.mySetUp(32)\n MAGIC = 100\n MAGIC2 = 300\n m = mask(32 // 8)\n N = 4\n\n a = u.bus.ar._ag.create_addr_req\n u.bus.aw._ag.data.extend([a(i * 0x4) for i in range(N * 2 + 1)])\n\n expected = [\n [(MAGIC + i + 1, m) for i in range(N)],\n [(MAGIC2 + i + 1, m) for i in range(N)]\n ]\n\n u.bus.w._ag.data.extend(flatten(zip(expected[0], expected[1]),\n level=1))\n u.bus.w._ag.data.append((123, m))\n\n self.randomizeAll()\n self.runSim(800 * Time.ns)\n\n for i, a in enumerate(u.decoded.arr):\n # [index of field][index in arr][data index]\n self.assertValSequenceEqual(a.field0._ag.dout, [expected[0][i][0]])\n self.assertValSequenceEqual(a.field1._ag.dout, [expected[1][i][0]])\n\n self.assertValSequenceEqual(u.bus.b._ag.data,\n [RESP_OKAY for _ in range(2 * N)]\n +[RESP_SLVERR])\n\n\nAxiLiteEndpointArrTCs = [\n AxiLiteEndpointArrayTC,\n AxiLiteEndpointArray2TC,\n AxiLiteEndpointStructsInArrayTC,\n]\n\nif __name__ == \"__main__\":\n import unittest\n testLoader = unittest.TestLoader()\n _ALL_TCs = [\n AxiLiteEndpointTC,\n AxiLiteEndpointDenseStartTC,\n AxiLiteEndpointDenseTC,\n *AxiLiteEndpointArrTCs,\n ]\n testLoader = unittest.TestLoader()\n # suite = unittest.TestSuite([AxiLiteEndpointTC(\"test_read\")])\n loadedTcs = [testLoader.loadTestsFromTestCase(tc) for tc in _ALL_TCs]\n suite = unittest.TestSuite(loadedTcs)\n runner = unittest.TextTestRunner(verbosity=3)\n runner.run(suite)\n\n # u = AxiLiteEndpoint(structStructsInArray,\n # shouldEnterFn=lambda tmpl: True)\n # u.DATA_WIDTH = 32\n # print(to_rtl_str(u))\n","repo_name":"Nic30/hwtLib","sub_path":"hwtLib/amba/axiLite_comp/endpoint_arr_test.py","file_name":"endpoint_arr_test.py","file_ext":"py","file_size_in_byte":10138,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"48"} +{"seq_id":"26153744052","text":"import os, sys\nimport json\nimport pickle\nimport praw\nfrom praw.models import Comment, User\nfrom datetime import datetime \nfrom dateutil.relativedelta import relativedelta \nfrom retrying import retry\n\n\n# Connect to praw\ndef connect():\n # Load config\n with open('config.json', 'r') as f:\n config = json.load(f)\n \n r = praw.Reddit(\n client_id = config[\"client_id\"],\n client_secret = config[\"client_secret\"],\n user_agent='Best /r/WritingPrompts authors by /u/raymestalez'\n )\n return r\n\n\n# Convert timestamp to age\ndef age(timestamp):\n dt1 = datetime.fromtimestamp(timestamp)\n dt2 = datetime.now()\n rd = relativedelta(dt2, dt1)\n age = \"%d days, %d hours\" % (rd.days, rd.hours)\n return age\n\n# Execute a function, retry if fails, and save results into a file\n@retry(stop_max_attempt_number=10)\ndef cache(function, argument_dict, filename, refetch=False):\n if os.path.isfile(filename) and not refetch:\n # If there's a file, and I'm not telling it to refetch, I just read from file\n print(\"Reading from file \" + filename)\n with open (filename, 'rb') as fp:\n results = pickle.load(fp)\n return results\n else:\n # On the first run, or if I want to refetch - execute the passed function\n # And save results into a file\n print(\"Refetched, writing to file \" + filename) \n results = function(**argument_dict)\n with open(filename, 'wb') as fp:\n pickle.dump(results, fp)\n return results\n\n\n \n# Grab top posts\ndef fetch_top_posts(limit=10, time_filter='week'):\n top_posts=list(r.subreddit('writingprompts').top(time_filter=time_filter, limit=limit))\n return top_posts\n\n\n# Get comments from the top posts, and sort them\ndef fetch_top_comments(top_posts, limit=2500):\n all_comments = []\n number_of_posts = len(top_posts)\n for index, post in enumerate(top_posts):\n # Looping through posts, taking their comments, adding them all into one list\n real_comments = [c for c in post.comments if isinstance(c, Comment)]\n all_comments += real_comments\n print(\"Added comments from post: \" + str(index) + \"/\" + str(number_of_posts))\n\n # Sort comments by score\n sorted_comments = sorted(all_comments, key=lambda x: x.score, reverse=True)\n print(\"Comments sorted!\")\n\n return sorted_comments\n\n# Put authors of all comments into one list\ndef extract_authors(comments):\n all_authors = []\n number_of_comments = len(comments)\n for index, comment in enumerate(comments):\n author = comment.author\n # Add author to the list if he's not there yet\n if author and not author in all_authors:\n print(\"Added to authors: \" + author.name)\n print(str(index) + \"/\" + str(number_of_comments))\n all_authors.append(author)\n\n print(\"Authors extracted!\")\n return all_authors\n\n\ndef is_in_list(word, filename):\n try:\n with open(filename, 'r') as f:\n if (str(word) in [x.strip() for x in f.readlines()]):\n print(word + \" already in the list!\")\n return 1\n else:\n print(word + \" not in the list!\")\n return 0\n except IOError as e:\n # catch non-existing file\n if e.errno == 2:\n return 0\n\ndef add_to_list(word, filename):\n if not is_in_list(word, filename):\n with open(filename, 'a') as f:\n f.write(str(word))\n f.write('\\n') \n\n# @retry(stop_max_attempt_number=10)\ndef calculate_karma(author, time_filter='week', limit=1000):\n if (time_filter=='week'):\n filename = 'processed_authors_week.pkl'\n else:\n filename = 'processed_authors_all.pkl'\n if (time_filter=='week'):\n namesfile = 'processed_authors_names_week.db'\n else:\n namesfile = 'processed_authors_names_all.db' \n\n # Create cache file if it doesn't exist\n if not os.path.isfile(filename):\n print(\"Creating processed_authors file\")\n processed_authors = []\n with open(filename, 'wb') as fp:\n pickle.dump(processed_authors, fp)\n\n # Open already processed authors\n with open (filename, 'rb') as fp:\n processed_authors = pickle.load(fp)\n\n # If author is not among the processed authors - calculate his karma/stories\n if not is_in_list(author.name, namesfile):\n author.wpscore = 0\n author.beststories = []\n # Combine stories score, collect the best stories.\n comments = author.comments.top(time_filter=time_filter, limit=limit)\n for comment in comments:\n if comment.subreddit.display_name == \"WritingPrompts\" and comment.is_root:\n author.wpscore += comment.score\n author.beststories.append(comment)\n\n # Append author to the list and save the file\n processed_authors.append(author)\n with open(filename, 'wb') as fp:\n pickle.dump(processed_authors, fp)\n print(author.name + \" processed, returning.\")\n add_to_list(author.name, namesfile)\n return author\n else:\n print(author.name + \" has been processed before, reading from file and returning.\")\n # If author has already been processed - find him and return him\n for processed_author in processed_authors:\n if author.name == processed_author.name:\n return processed_author\n\n\ndef process_authors(authors, time_filter='week'):\n authors = authors[:800]\n numberofauthors = len(authors)\n for index, author in enumerate(authors):\n try:\n processed_author = calculate_karma(author, time_filter)\n print(\"/r/WPs karma calculated: \" + processed_author.name + \" - \" + str(processed_author.wpscore))\n print(str(index) + \"/\" + str(numberofauthors))\n except:\n pass\n\n print(\"All karma calculated.\")\n\ndef sort_authors(authors, time_filter='week', reprocess=False):\n # Calculate combined karma from all stories and sort by it\n # And attach a list of user's best stories\n # Here, limit is the number of user's comments to calculate karma from\n\n if reprocess:\n # Loop through all authors, calculate stories/karma, write into a file\n process_authors(authors, time_filter)\n\n authorsdata = []\n # Open already processed authors\n if (time_filter=='week'):\n filename = 'processed_authors_week.pkl'\n else:\n filename = 'processed_authors_all.pkl'\n with open (filename, 'rb') as fp:\n print(\"Loading processed authors\")\n authorsdata = pickle.load(fp)\n\n print(\"Total authors: \" + str(len(authorsdata)))\n print(\"Last author's karma \" + str(authorsdata[-1].wpkarma))\n # Sort authors by their /r/WritingPrompts karma\n sorted_authors = sorted(authorsdata, key=lambda x: x.wpscore, reverse=True)\n sorted_authors = sorted_authors[:100]\n print(\"Authors sorted!\")\n \n return sorted_authors\n\ndef authors_to_json(sorted_authors, filename):\n authors_list = []\n\n for index, author in enumerate(sorted_authors):\n # print(\"Author \" + author.name)\n author_dict = {}\n author_dict['username'] = author.name\n author_dict['karma'] = author.wpscore\n\n author_dict['beststories'] = []\n for index, story in enumerate(author.beststories[:5]):\n # print(\"Story score \" + str(story.score))\n story_dict = {}\n story_dict['url'] = story.link_url + story.id\n story_dict['prompt'] = story.link_title.replace('[WP]', '')\n # Append story to author's stories\n author_dict['beststories'].append(story_dict)\n # Append author to author's list\n authors_list.append(author_dict)\n # Generate json out of author's list\n authors_json = json.dumps(authors_list)\n print(\"JSON generated for \" + str(len(authors_list)))\n with open(filename, \"w\") as text_file:\n text_file.write(authors_json)\n\n\ndef top_authors_week():\n limit = 1000\n\n top_posts = cache(\n fetch_top_posts, {'limit': limit, 'time_filter':'week'},\n 'top_posts_week.pkl', refetch=False\n )\n\n # Combine and sort their comments\n sorted_comments = cache(\n fetch_top_comments, {'top_posts':top_posts[:limit]},\n 'top_comments_week.pkl', refetch=False\n )\n # Get comment's authors\n all_authors = cache(\n extract_authors, {'comments':sorted_comments},\n 'all_authors_week.pkl', refetch=False\n )\n # Sort them in order of combined story karma\n sorted_authors = cache(\n sort_authors, {'authors':all_authors[:2000],\n 'time_filter':'week',\n 'reprocess':False},\n 'sorted_authors_week.pkl', refetch=True\n )\n\n authors_to_json(sorted_authors, 'top_authors_week.json')\n\ndef top_authors_all():\n limit = 1000\n\n top_posts = cache(\n fetch_top_posts, {'limit': limit, 'time_filter':'all'},\n 'top_posts_all.pkl', refetch=False\n )\n\n # Combine and sort their comments\n sorted_comments = cache(\n fetch_top_comments, {'top_posts':top_posts[:limit]},\n 'top_comments_all.pkl', refetch=False\n )\n print(\"Top comments \" + str(len(sorted_comments)))\n # Get comment's authors (considering only 5k top stories ever)\n all_authors = cache(\n extract_authors, {'comments':sorted_comments[:5000]},\n 'all_authors_all.pkl', refetch=False\n )\n print(\"Top authors \" + str(len(all_authors)))\n # Sort them in order of combined story karma (considering only first 1k authors)\n sorted_authors = cache(\n sort_authors, {'authors':all_authors[:5000],\n 'time_filter':'all',\n 'reprocess':True},\n 'sorted_authors_all.pkl', refetch=True\n )\n\n authors_to_json(sorted_authors, 'top_authors_all.json')\n\n\n# Doing stuff\nr = connect()\nsubreddit = r.subreddit('writingprompts')\ntop_authors_week()\n# top_authors_all()\n\n# sorted_authors = calculate_karma(authors)\n# write_authors_to_file(sorted_authors) \n# user = r.redditor('raymestalez')\n# prompts = list(subreddit.top(time_filter='week', limit=10))\n# prompt = prompts[0]\n# print(age(prompt.created_utc))\n\n\n\n","repo_name":"lumenwrites/nulis","sub_path":"server/misc/topauthors_bk.py","file_name":"topauthors_bk.py","file_ext":"py","file_size_in_byte":10287,"program_lang":"python","lang":"en","doc_type":"code","stars":740,"dataset":"github-code","pt":"48"} +{"seq_id":"31772944234","text":"\r\n\r\ndomain=list(input(\"Enter the schema : \"))\r\nfd=input(\"Enter the Functional Dependences : \")\r\ns=[]\r\n\r\n\r\ndef step0(domain,fd):\r\n s1=fd.split(\",\")\r\n s2=[]\r\n a=[]\r\n #s2=s1.split(\"->\")\r\n for i in s1:\r\n s2.append(i.split(\"->\"))\r\n for i in range(len(s2)):\r\n if len(s2[i][1])!=1:\r\n \r\n a=list(s2[i][1])\r\n s2[i][1]=a\r\n dom=list(domain)\r\n print(s2)\r\n print(dom)\r\n return s2 ,dom \r\narr0,dom=step0(domain,fd)\r\n\r\ndef step1(arr0):\r\n s2=arr0\r\n c=[]\r\n for i in range(len(s2)):\r\n #print(i)\r\n if len(s2[i][1])!=1:\r\n x=i\r\n for a in range(len(s2[i][1])):\r\n #print(len(s2[i][1]))\r\n b=([s2[i][0],s2[i][1][a]])\r\n c.append(b)\r\n del s2[i]\r\n else :\r\n pass \r\n for i in range(len(c)):\r\n s2.append(c[i])\r\n \r\n\r\n print(s2) \r\n return s2 \r\nprint(\"step1\") \r\narr1=step1(arr0)\r\n\r\ndef closure(closure,arr1,x):\r\n s2=arr1\r\n x=x\r\n b,c,d,s3=[],[],[],[]\r\n s3+=closure\r\n \r\n\r\n for i in range(len(s3)):\r\n \r\n b+=s3[i]\r\n #a.append(b) \r\n \r\n flag=1 \r\n #print(\"s3\",s3,\"i\",x)\r\n \r\n while flag==1:\r\n # if len(s3)>1:\r\n # s3=s3[1]\r\n # print(\"s3\",s3)\r\n\r\n for i in range(len(s2)):\r\n # if len(s3[x])>1:\r\n # s3=s3[1]\r\n \r\n if s3[1] == s2[i][0]:\r\n\r\n c+=s2[i]\r\n s3=s2[i]\r\n #print(c)\r\n flag=1\r\n\r\n flag=0 \r\n \r\n #print(c)\r\n d=c+b\r\n #,a=b,c\r\n #a.append(d)\r\n res = []\r\n for i in d:\r\n if i not in res:\r\n res.append(i)\r\n res.sort()\r\n return b,res\r\n\r\ndef closure_without(arr1):\r\n s3=arr1\r\n s2=arr1\r\n s3=s3[0][0]\r\n a=s2.pop(0)\r\n c=[]\r\n c+=s3\r\n sum=0\r\n j=0\r\n x=[]\r\n for i in range(len(s2)):\r\n sum+=1\r\n if s3 == s2[i][0]:\r\n c+=s2[i]\r\n #print(s2[i][1])\r\n #s3=s2[i][1]\r\n x.append(i) \r\n \r\n for i in range(len(s2)):\r\n if s3 == s2[i][0]:\r\n c+=s2[i]\r\n #print(s2[i][1])\r\n s3=s2[i][1]\r\n \r\n\r\n #print(sum,i,j)\r\n s2.append(a) \r\n res = []\r\n for i in c:\r\n if i not in res:\r\n res.append(i)\r\n res.sort()\r\n return res \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef step2(arr1):\r\n s2=arr1\r\n s3=arr1\r\n w,wo=[],[]\r\n print(\"with\")\r\n for i in range(len(s2)):\r\n c,d=closure(s3[i],s2,i)\r\n w.append(d)\r\n print(c,d)\r\n\r\n print(\"without\")\r\n for i in range(len(s2)):\r\n #print(\"s3\",s3)\r\n a=closure_without(s2)\r\n wo.append(a)\r\n print(a)\r\n for i in range(len(w)):\r\n if w[i]==wo[i]:\r\n pass\r\n else:\r\n s.append(s2[i])\r\n \r\n # print(\"w\",len(w)) \r\n # print(\"wo\",len(wo)) \r\n return w,wo\r\n \r\n\r\n\r\n\r\nprint(\"step2\")\r\narr21,arr22=step2(arr1)\r\n\r\ndef step3(arr1):\r\n s2=arr1\r\n s4,w31,w32=[],[],[]\r\n for i in range(len(s2)):\r\n if len(s2[i][0])>1:\r\n c,d=closure(s2[i],s2,i)\r\n w31.append(d)\r\n s4=list(s2[i][0])\r\n print(s4,\"s4\")\r\n for i in range(len(s4)):\r\n s5=[]\r\n s5.append(\"\")\r\n s5+=s4[i]\r\n a=closure(s5,s2,i)\r\n w32.append(a)\r\n #print(a) \r\n print(\"w31\",w31)\r\n print(\"w32\",w32) \r\n for i in range(len(s4)):\r\n if w31[0]==w32[i][1]:\r\n pass\r\n \r\n\r\n \r\n return a \r\nprint(\"step3\")\r\narr3=step3(arr1)\r\n\r\n\r\nprint(\"\\n cannonical cover\",s)\r\n\r\ncandy=[]\r\ndef candidate_key(arr1,arr21,dom):\r\n s2=arr1\r\n s3=arr21\r\n dom=dom\r\n #print(\"s2\",s2,\"s3\",s3,\"d0m\",dom)\r\n for i in range(len(s2)):\r\n if s3[i]==dom:\r\n candy.append(arr1[i])\r\n res = []\r\n for i in candy:\r\n if i not in res:\r\n res.append(i)\r\n res.sort()\r\n \r\n print(res) \r\n return res\r\n\r\nprint(\"candidate_key\")\r\ncan=candidate_key(arr1,arr21,dom) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n#a->b,b->c,c->da->b,b->c,c->d\r\n#a->b,b->c,c->d,ab->c,a->abcd\r\n","repo_name":"Khuusshhi/Canonical-cover","sub_path":"canonical cover_DBMS.py","file_name":"canonical cover_DBMS.py","file_ext":"py","file_size_in_byte":4298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7439817493","text":"\"\"\"画等腰三角形\n\n关于程序的几点说明: \n1. 用户输入等腰三角形的顶角和腰长,程序画出等腰三角形,并给出底角和底边长。\n\"\"\"\n\nimport turtle\n\n\nMAX_LEG_LENGTH = 500\nVERTEX_Y = 200\n\n# 输入顶角,计算底角。\nvertex_angle = float(input('请输入等腰三角形的顶角: '))\nwhile True:\n if 0 < vertex_angle < 180:\n break\n print('三角形的内角必须大于 0 度且小于 180 度。')\n vertex_angle = float(input('请重新输入: '))\nbase_angle = (180 - vertex_angle) / 2\nif int(vertex_angle) == vertex_angle:\n vertex_angle = int(vertex_angle)\nif int(base_angle) == base_angle:\n base_angle = int(base_angle)\n\n# 输入腰长。\nleg = float(input(f'请输入腰长 (0 - {MAX_LEG_LENGTH}): '))\nwhile True:\n if 0 < leg <= MAX_LEG_LENGTH:\n break\n print(f'腰长必须在 (0, {MAX_LEG_LENGTH}] 范围内。')\n leg = float(input('请重新输入: '))\nif int(leg) == leg:\n leg = int(leg)\n\n# 画等腰三角形。\nscreen = turtle.Screen()\npen = turtle.Turtle()\npen.up()\npen.goto(0, VERTEX_Y)\npen.down()\npen.right(90 - vertex_angle / 2)\npen.forward(leg)\npoint = pen.position()\npen.backward(leg)\npen.right(vertex_angle)\npen.forward(leg)\npen.left(180 - base_angle)\n# 获取底边两端点之间的距离,也就是底边长。\nbase = pen.distance(point)\npen.forward(base)\n\nprint(f'\\n顶角: {vertex_angle} 底角: {base_angle} 腰长: {leg} 底长: {round(base)}')\n\nscreen.exitonclick()\n","repo_name":"feli10/math-coding","sub_path":"_cn/g425_triangle/isosceles_triangle.py","file_name":"isosceles_triangle.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"17091914157","text":"import json\nimport time\nfrom dateutil.parser import parse\n\nd = json.load(open('articles.json', 'r'))\n\nseen_titles = set()\nto_remove = [\n 'recipe', 'nfl', 'ufc', 'car', 'vehicle',\n 'musical', 'cubs', 'football', 'quarterback',\n 'fashion', 'baseball', 'restaurants', 'restaurant',\n 'basketball', 'warriors', 'game', 'nba', 'sport',\n 'volkswagen', 'jaguar', 'food', 'tv', 'music',\n 'photographers', 'halloween', 'art', 'bears', 'permalink',\n 'soccer', 'films', 'thanksgiving', 'forecast', 'hbo', 'quiz',\n 'polo', 'tennis', 'series', 'inferno', 'disney', 'beachy',\n 'outfit', 'memoir', 'supermoon'\n]\n\ndef process_article(a):\n created_at = parse(a['created_at'])\n created_at = time.mktime(created_at.timetuple())\n return {\n 'created_at': created_at,\n 'url': a['url'],\n 'title': a['title'],\n 'image': a['image'],\n 'text': a['text'],\n 'summary': a['summary'],\n 'keywords': a['keywords']\n }\n\ndef keep(a):\n \"\"\"removes duplicates and articles containing skipped keywords\"\"\"\n seen = a['title'] in seen_titles\n if not seen:\n seen_titles.add(a['title'])\n return not (set(to_remove) & set(a['keywords'])) and not seen\n\narticles = [process_article(a) for a in d if keep(a)]\n\n# from collections import defaultdict\n# words = defaultdict(int)\n# for a in articles:\n# for kw in a['keywords']:\n# words[kw] += 1\n\n# sortkws = sorted(words.items(), key=lambda i: i[1])\n# for k, n in sortkws:\n# print(k, n)\n# print('---')\n\nprint('removed {}'.format(len(d) - len(articles)))\nprint('remains {}'.format(len(articles)))\nwith open('articles_processed.json', 'w') as f:\n json.dump(articles, f)","repo_name":"frnsys/ml-design-workshop","sub_path":"data/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"38519945745","text":"from PyQt5.QtCore import QObject, pyqtSignal\n\nfrom ui.page_elements.condition_box import ConditionBox\n\n\nclass ConditionGroup(QObject):\n boxes_changed = pyqtSignal()\n\n def __init__(self, fields):\n QObject.__init__(self)\n self.fields = set(fields)\n self.used_fields = set()\n self._boxes = []\n self.box_field = {}\n\n @property\n def available_fields(self):\n return self.fields - self.used_fields\n\n def add_box(self, box: ConditionBox):\n box.set_fields([i for i in self.available_fields])\n field = box.current_field\n self.used_fields.add(field)\n self.box_field[box] = field\n for i in self._boxes:\n i.del_fields(field)\n self._boxes.append(box)\n box.field_changed.connect(self.field_change)\n self.boxes_changed.emit()\n\n def del_box(self, box: ConditionBox):\n self._boxes.remove(box)\n field = box.current_field\n self.used_fields.remove(field)\n for i in self._boxes:\n i.add_fields(field)\n self.box_field.pop(box)\n box.deleteLater()\n self.boxes_changed.emit()\n\n def field_change(self):\n sender = self.sender()\n old_field = self.box_field[sender]\n new_field = sender.current_field\n self.used_fields.remove(old_field)\n self.used_fields.add(new_field)\n self.box_field[sender] = new_field\n for i in self._boxes:\n if sender == i:\n continue\n i.add_fields(old_field)\n i.del_fields(new_field)\n","repo_name":"ArcherLuo233/election-s-prediction","sub_path":"ui/page_elements/condition_group/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29625404330","text":"import typer\n\nfrom . import __version__\nfrom .install import _install, _uninstall, is_installed\nfrom .stripout import list_dotenv_file_paths, strip_file, strip_stdin\n\ncli = typer.Typer(help=\"Strip secrets from all .env files in the current repo\")\n\n\n@cli.callback(invoke_without_command=True)\ndef main(\n ctx: typer.Context,\n dry_run: bool = typer.Option(\n False,\n help=\"Show the effect of the command without running it\",\n ),\n stdin: bool = typer.Option(\n False,\n help=\"Read lines from stdin and write stripped lines to stdout\",\n ),\n version: bool = typer.Option(\n False,\n \"-v\",\n help=\"Print the package version\",\n ),\n):\n if ctx.invoked_subcommand is None:\n if version:\n typer.echo(__version__)\n raise typer.Exit(0)\n try:\n if stdin:\n strip_stdin()\n else:\n paths = list_dotenv_file_paths()\n if dry_run:\n typer.echo(\"Dry run - Would have stripped secrets from:\")\n for path in paths:\n typer.echo(path)\n elif typer.confirm(\n \"Are you sure you want to strip secrets from this repo?\"\n ):\n typer.echo(\"Stripping secrets from:\")\n for path in paths:\n typer.echo(path)\n strip_file(path)\n else:\n raise typer.Abort()\n\n except OSError as e:\n typer.echo(e)\n raise typer.Exit(1)\n\n\n@cli.command(help=\"Check whether the filter has been installed\")\ndef status(\n _global: bool = typer.Option(\n False,\n \"--global\",\n help=(\n \"If set, the command will check for the filter in the \"\n \"global git config instead of the current repo\"\n ),\n ),\n):\n scope = \"global\" if _global else \"local\"\n if is_installed(scope):\n typer.echo(f\"Filter is installed {scope}ly\")\n else:\n typer.echo(f\"Filter is not installed {scope}ly\")\n if typer.confirm(\"Would you like to install it?\"):\n _install(scope)\n typer.echo(\"Done!\")\n else:\n raise typer.Abort()\n\n\n@cli.command(help=\"Install dotenv-stripout as a git filter\")\ndef install(\n _global: bool = typer.Option(\n False,\n \"--global\",\n help=(\n \"If set, the filter will be added to your global git \"\n \"config instead of the current repo\"\n ),\n ),\n):\n scope = \"global\" if _global else \"local\"\n if is_installed(scope):\n typer.echo(f\"Filter is already {scope}ly installed!\")\n raise typer.Exit(1)\n else:\n _install(scope)\n typer.echo(\"Done!\")\n\n\n@cli.command(help=\"Uninstall the filter\")\ndef uninstall(\n _global: bool = typer.Option(\n False,\n \"--global\",\n help=(\n \"If set, the filter will be removed from your global git \"\n \"config instead of the current repo\"\n ),\n ),\n):\n scope = \"global\" if _global else \"local\"\n if is_installed(scope):\n _uninstall(scope)\n typer.echo(\"Done!\")\n else:\n typer.echo(f\"Filter is not yet {scope}ly installed!\")\n raise typer.Exit(1)\n\n\nif __name__ == \"__main__\":\n cli()\n","repo_name":"harrisonpim/dotenv-stripout","sub_path":"dotenv_stripout/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3338,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"48"} +{"seq_id":"21707542581","text":"import allure\nimport pytest\n\nfrom extensions.verifications import Verifications\nfrom worksflows.electron_flows import ElectronFlows\n\n\n@pytest.mark.usefixtures('init_electron_driver')\nclass TestElectron:\n @allure.title('Test 01: Add And Verify New Task')\n @allure.description('This test add a new task and verifies it in the list of tasks')\n def test_add_and_verify_new_task(self):\n ElectronFlows.add_new_task_flow('Learn JS')\n Verifications.verify_equals(ElectronFlows.get_number_of_tasks_flow(), 1)\n\n\n @allure.title('Test 02: Add And Verify New Tasks')\n @allure.description('This test adds a new tasks and verifies them in the list of tasks')\n def test_add_and_verify_new_tasks(self):\n ElectronFlows.add_new_task_flow('Learn Python')\n ElectronFlows.add_new_task_flow('Learn Java')\n ElectronFlows.add_new_task_flow('Learn C#')\n Verifications.verify_equals(ElectronFlows.get_number_of_tasks_flow(), 3)\n\n @allure.title('Test 03: Add And Verify New Tasks From a List')\n @allure.description('This test adds a new tasks and verifies them from a pre defined LIST')\n def test_add_and_verify_new_tasks_from_a_list(self):\n ElectronFlows.add_new_task_from_a_list_flow()\n Verifications.verify_equals(ElectronFlows.get_number_of_tasks_flow(), 10)\n\n def teardown_method(self):\n ElectronFlows.empty_list_flow()\n\n\n","repo_name":"heziporf/Final-Project","sub_path":"test_cases/test_electron.py","file_name":"test_electron.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34514369044","text":"import matplotlib.pyplot as plt\nimport numpy as np\nx = np.linspace(0, 10, 1000)\ny=0\nfor k in range(1, 10, 1):\n y=y+4*np.sin((2*k-1)*x)/((2*k-1)*np.pi)\nplt.plot(x,y,'k',color='r',label=\"w=1\",linewidth=3)\nplt.axis([0,10,-1.5,1.5])\nplt.legend()\nplt.show()\n","repo_name":"ZLEI-ZL/pythonCode","sub_path":"任务八/9.1.py","file_name":"9.1.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73935430864","text":"\"\"\"A program to find the sum of numbers in a given range.\"\"\"\n\nnum1, num2 = 3, 6 # here the range is given i.e. 3 to 6\nsum = 0\n\n#method 1\nfor i in range(num1, num2+1):\n sum += i\nprint(sum)\n\n#method 2\nsum = int((num2*(num2+1)/2) - (num1*(num1+1)/2) + num1)\nprint(sum)\n\n#method 3\ndef recursum(sum, num1, num2):\n if num1 > num2:\n return sum\n return num1 + recursum(sum, num1 + 1, num2)\n\nprint(recursum(0, num1, num2)) # sum = 0\n","repo_name":"gaurimetkar/Python_Codes","sub_path":"Basic 100 Codes/C004.py","file_name":"C004.py","file_ext":"py","file_size_in_byte":449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73872992144","text":"import sys\n\n\nshapes = [\n {\n 'width': 4,\n 'height': 1,\n 'data': [\"####\"]\n },\n {\n 'width': 3,\n 'height': 3,\n 'data': [\".#.\", \"###\", \".#.\"]\n },\n {\n 'width': 3,\n 'height': 3,\n 'data': [\"..#\", \"..#\", \"###\"]\n },\n {\n 'width': 1,\n 'height': 4,\n 'data': [\"#\", \"#\", \"#\", \"#\"]\n },\n {\n 'width': 2,\n 'height': 2,\n 'data': [\"##\", \"##\"]\n },\n]\n\n\ndef create_line():\n return [False for x in range(7)]\n\n\ndef is_empty(line):\n for cell in line:\n if cell:\n return False\n return True\n\n\ndef spawn_shape(level, shape_index):\n shape = shapes[shape_index]\n\n lowest_empty = -1\n for i in range(len(level)-1, -1, -1):\n if is_empty(level[i]):\n lowest_empty = i\n else:\n break\n \n return 2, lowest_empty+3+shape['height']-1\n\n\n\ndef print_level(level):\n for i in range(len(level)-1, -1, -1):\n row = level[i]\n print('|', end ='')\n for col in row:\n print('@' if col else '.', end='')\n print(f'| {i}')\n print(\"+-------+\")\n\n\ndef intersects(level, shape, x, y):\n if x < 0 or y - shape['height'] + 1 < 0 or x + shape['width'] > 7:\n return True\n\n for sx in range(shape['width']):\n for sy in range(shape['height']):\n if level[y-sy][x+sx] and shape['data'][sy][sx] == '#':\n return True\n\n return False\n\n\ndef apply_shape(level, shape, x, y):\n for sx in range(shape['width']):\n for sy in range(shape['height']):\n level[y-sy][x+sx] |= shape['data'][sy][sx] == '#'\n\n\ndef get_height(level):\n for i in range(len(level)-1, -1, -1):\n if not is_empty(level[i]):\n return i+1\n\ndef run1(lines):\n gusts = lines[0].strip()\n level = [create_line(), create_line()]\n gust_index = 0\n shape_index = 0\n\n heights = [0]\n increases = [0]\n\n for i in range(100000):\n x, y = spawn_shape(level, shape_index)\n \n while y >= len(level):\n level.append(create_line())\n \n while True:\n g = gusts[gust_index]\n if g == '<' and not intersects(level, shapes[shape_index], x-1, y):\n x -= 1\n elif g == '>' and not intersects(level, shapes[shape_index], x+1, y):\n x += 1\n gust_index = (gust_index+1)%len(gusts)\n \n if intersects(level, shapes[shape_index], x, y-1):\n apply_shape(level, shapes[shape_index], x, y)\n shape_index = (shape_index+1)%len(shapes)\n break\n else:\n y -= 1\n \n heights.append(get_height(level))\n increases.append(heights[-1] - heights[-2])\n\n return increases\n\n\nif __name__ == \"__main__\":\n with open(sys.argv[1]) as f:\n lines = f.readlines()\n print(run1(lines))","repo_name":"citiral/aoc","sub_path":"2022/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42803864894","text":"def greeting(text):\n print('Text user type:' + str(text))\n\n\nperson = {\n 'name': 'Phạm Văn Nhất',\n 'age': 30,\n 'country': 'Hà Nội',\n 'job': 'IT',\n\n}\n\n\ndef showinfo(person):\n \"\"\"[summary]\n\n Args:\n person ([type]): [description]\n \"\"\"\n print('Xin chao ' + person['name'] + '. ')\n\n\ndef moreInfo(age, job): return print(\n 'Tuoi: ' + str(age) + '. Nghe nghiep: ' + job)\n\n\ndef giaTriLonNhat(a):\n max = a[0]\n for i in range(len(a)):\n if max <= a[i]:\n max = a[i]\n return max\n\n\ndef giaTriNhoNhat(a):\n min = a[0]\n for i in range(len(a)):\n if min > a[i]:\n min = a[i]\n return min\n\n\ndef tinhGiaiThuaDeQuy(a):\n if a == 0:\n return 1\n return a * tinhGiaiThuaDeQuy(a - 1)\n\n\ndef tinhgiaithua(n):\n giai_thua = 1\n if (n == 0 or n == 1):\n return giai_thua\n else:\n for i in range(2, n + 1):\n giai_thua = giai_thua * i\n return giai_thua\n","repo_name":"phamnhatcn06/python","sub_path":"ham-module/mymodule.py","file_name":"mymodule.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25286991083","text":"import xml.etree.ElementTree as eT\r\n\r\nfrom ants_algorithm.model.AntEnvironment import AntEnvironment\r\nfrom ants_algorithm.model.Demands import Demands, Demand\r\nfrom ants_algorithm.model.Links import Links, Link, NeighbourLink\r\n\r\n\r\nclass InputFileParser:\r\n\r\n @staticmethod\r\n def parse(file_path, max_link_capacity):\r\n root = eT.parse(file_path).getroot()\r\n\r\n new_links = InputFileParser.__parse_links(root, max_link_capacity)\r\n new_demands = InputFileParser.__parse_demands(root)\r\n\r\n environment = AntEnvironment(new_links, new_demands)\r\n\r\n return environment\r\n\r\n @staticmethod\r\n def __parse_links(root, max_link_capacity):\r\n new_links = Links()\r\n for link in root.iter('link'):\r\n link_id = link.attrib['id']\r\n link_first_node = link.find('source').text\r\n link_second_node = link.find('target').text\r\n additional_module = link.find('additionalModules')[0]\r\n # cost = int(float(additional_module.find('cost').text))\r\n cost = 1\r\n # capacity = int(float(additional_module.find('capacity').text))\r\n capacity = max_link_capacity\r\n new_link = Link(link_id, link_first_node, link_second_node, cost, capacity)\r\n neighbour_for_first = NeighbourLink(link_id, link_second_node, cost)\r\n neighbour_for_second = NeighbourLink(link_id, link_first_node, cost)\r\n new_links.links_map_id[link_id] = new_link\r\n if link_first_node not in new_links.neighbours_links_map:\r\n new_links.neighbours_links_map[link_first_node] = [neighbour_for_first]\r\n else:\r\n new_links.neighbours_links_map[link_first_node].append(neighbour_for_first)\r\n\r\n if link_second_node not in new_links.neighbours_links_map:\r\n new_links.neighbours_links_map[link_second_node] = [neighbour_for_second]\r\n else:\r\n new_links.neighbours_links_map[link_second_node].append(neighbour_for_second)\r\n return new_links\r\n\r\n @staticmethod\r\n def __parse_demands(root):\r\n new_demands = Demands()\r\n for link in root.iter('demand'):\r\n demand_id = link.attrib['id']\r\n demand_source = link.find('source').text\r\n demand_target = link.find('target').text\r\n demand_value = int(float(link.find('demandValue').text))\r\n new_demand = Demand(demand_id, demand_source, demand_target, demand_value)\r\n new_demands.demands_map_id[demand_id] = new_demand\r\n\r\n return new_demands\r\n","repo_name":"assist-iot/auto_configurable_network","sub_path":"software/onos-opa-example-with-delay/ants_algorithm/utils/InputFileParser.py","file_name":"InputFileParser.py","file_ext":"py","file_size_in_byte":2590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29771508993","text":"from rest_framework import serializers\nfrom ..models import Comment\nfrom base.api.serializers import UserPublicSerializer\n\nclass ChildCommentSeriailzer(serializers.ModelSerializer):\n user = UserPublicSerializer()\n # is_user = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = Comment\n fields = ('id', 'user', 'content', 'date', 'datetime')\n\n # def get_is_user(self, obj):\n # request = self.context.get(\"request\")\n # is_user = False\n # if request.user == obj.user:\n # is_user = True\n # return is_user\n\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n user = UserPublicSerializer(read_only=True)\n children = serializers.SerializerMethodField(read_only=True)\n # is_user = serializers.SerializerMethodField(read_only=True)\n\n class Meta:\n model = Comment\n fields = ('id', 'user', 'content', 'date', 'datetime', 'children')\n\n def get_children(self, obj):\n qs = Comment.objects.filter(parent__pk=obj.pk).order_by(\"datetime\")\n serializer = ChildCommentSeriailzer(\n qs, many=True)\n return serializer.data\n\n # def get_is_user(self, obj):\n # request = self.context.get(\"request\")\n # is_user = False\n # if request.user == obj.user:\n # is_user = True\n # return is_user","repo_name":"Nepul321/FitnessJourney","sub_path":"comments/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"2707394708","text":"'''\nTesting WxPython GUI development to encapsulate future projects \n'''\n#!/usr/bin/env python\nimport wx\n\n\n\nclass MainWindow(wx.Frame):\n\n \"\"\" We simply derive a new class of Frame. \"\"\"\n def __init__(self, parent, title):\n wx.Frame.__init__(self, parent, title=title, size=(200,100))\n self.control = wx.TextCtrl(self, style=wx.TE_MULTILINE)\n self.CreateStatusBar()\n\n filemenu= wx.Menu()\n filemenu.Append(wx.ID_ABOUT, \"&About\", \" Info about this you\")\n filemenu.AppendSeparator()\n filemenu.Append(wx.ID_EXIT, \"&Exit\", \"Exit\")\n menuBar = wx.MenuBar()\n menuBar.Append(filemenu, \"&File\") # addig filemenu to menubar\n self.SetMenuBar(menuBar)\n self.Show(True)\n\n\n\n\n\ndef main():\n app = wx.App(False)\n frame = MainWindow(None, \"editor\")\n app.MainLoop()\n\n\nif __name__ == \"__main__\":\n main()\n\n\n","repo_name":"CloudsWeight/python","sub_path":"numbers.py","file_name":"numbers.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4684235435","text":"#Funes Santiago\n#41489259\n\n#Diseno de datos\n#Empleado (nombre, antiguedad, salario bruto, descuentos, bonificaciones)\n#Empleado : (String, Number, Number, Number, Number)\n#donde :\n#\t-nombre : Nombre del empleado\n#\t-antiguedad : Antiguedad del empleado inicializado en 0 salvo que se indique lo contrario\n#\t-salario bruto : Salario del empleado inicializado en $55.000 salvo que se indique lo contrario\n#\t-descuentos : descuentos del empleado inicializado en 0. El numero indica porcentaje\n#\t-bonificaciones : bonificaciones del empleado inicializado en 0 . EL numero indica porcentaje\n\ndef DatoAntiguedad(n):\n\t''' DatoAntiguedad : Int -> Bollean\n\t\tRecibe la antiguedad y determina si es valida\n\t\tDatoSalario(4) = True\n\t\tDatoSalario(-1) = False\n\t'''\n\tif(n >= 0):\n\t\tresp = True\n\telse:\n\t\tresp = False\n\treturn resp\n\t\ndef DatoSalario(n):\n\t''' DatoSalario : Int -> Bollean\n\t\tRecibe el salario y determina si es valido\n\t\tDatoSalario(56000) = True\n\t\tDatoSalario(35000) = False\n\t'''\n\tif(n < 55000):\n\t\tresp = False\n\telse:\n\t\tresp = True\n\treturn resp\n\ndef empleado_valido(t):\n\t''' empleado_valido : Empleado -> Empleado\n\t\tRecibe datos del empleado, verifica si es valido y construye un Empleado.\n\t''' \n\ttamano = len(t)\n\tif(tamano == 1):\n\t\trespuesta = (t[0], 0, 55000, 0, 0)\n\telif(tamano == 2 and DatoAntiguedad(t[1])):\n\t\trespuesta = (t[0], t[1], 55000, 0, 0)\n\telif(tamano == 3 and DatoAntiguedad(t[1]) and DatoSalario(t[2])):\n\t\trespuesta = (t[0], t[1], t[2], 0, 0)\n\telif(tamano == 5):\n\t\trespuesta = (t[0], t[1], t[2], t[3], t[4])\n\telse:\n\t\trespuesta = \"Error\"\n\treturn respuesta\n\t\n\t\ndef mostrar_empleado(Empleado):\n\t''' mostrar_empleado : Empleado -> String\n\t\tRecibe una tupla Empleado y la muestra en pantalla\n\t'''\n\tprint(\"Nombre: \",Empleado[0], \"\\n\")\n\tprint(\"Antiguedad: \",Empleado[1], \"\\n\")\n\tprint(\"Salario Bruto: \",Empleado[2], \"\\n\")\n\tprint(\"Descuentos: \",Empleado[3], \"\\n\")\n\tprint(\"bonificaciones: \",Empleado[4])\n\t\n#print(mostrar_empleado((\"Hola\", 3, 70000, 0, 0)))\n\ndef Descuento(Salario):\n\t''' Descuento : Integer -> Integer\n\t\tRecibe un Empleado y calcula el descuento\n\t'''\n\treturn Salario - 0.08 * Salario\n\t\n\ndef calcula_sueldo(Empleado):\n\t''' calcula_sueldo : Empleado -> Empleado\n\t\tRecibe un Empleado y aplica los descuentos/bonificaciones correspondientes\n\t\tcalcula_sueldo((\"Julian\",12, 65000,0 ,0)) = 65780\n\t\tcalcula_sueldo((\"Maria\",4, 55000,0 ,0)) = 53130\n\t'''\n\tdescuento = Descuento(Empleado[2])\n\tif(Empleado[1] >= 11):\n\t\trespuesta = 0.1 * descuento + descuento\n\t\trespuesta2 = (8, 10)\n\telif(Empleado[1] > 4 and Empleado[1] <= 10):\n\t\trespuesta = 0.07 * descuento + descuento\n\t\trespuesta2 = (8, 7)\n\telif(Empleado[1] > 3 and Empleado[1] <= 5):\n\t\trespuesta = 0.05 * descuento + descuento\n\t\trespuesta2 = (8, 5)\n\telif(Empleado[1] >= 1 and Empleado[1] <= 3):\n\t\trespuesta = 0.03 * descuento + descuento\n\t\trespuesta2 = (8, 3)\n\telse:\n\t\trespuesta = descuento\n\t\trespuesta2 = (8, 0)\n\t\t\n\tnew_empleado = empleado_valido((Empleado[0], Empleado[1], Empleado[2], respuesta2[0], respuesta2[1]))\n\tmostrar_empleado(new_empleado)\n\treturn(\"Sueldo neto :\", respuesta)\n\t\ndef test_calcula_sueldo():\n\tassert(calcula_sueldo((\"Julian\",12, 65000,0 ,0))) == 65780\n\tassert(calcula_sueldo((\"Maria\",4, 55000,0 ,0))) == 53130\n\t\n\t\n\t\n\t\n\t\n\n","repo_name":"Santy-9/Programacion-II","sub_path":"Parcial Python/ejercicio2-FunesSantiago.py","file_name":"ejercicio2-FunesSantiago.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73292687184","text":"\"\"\"\nwtor\nby Cth103, 2017\nUsage:\nwtor.py http://www.google.com\nOpen port 5000 and 80.\n\"\"\"\n\nimport os\nimport shutil\nimport urllib\nfrom stem.control import Controller\nfrom flask import Flask\napp = Flask(__name__)\n@app.route('/')\ndef index():\n hitmebabyonemoretime = urllib.request.urlopen(sys.argv[1])\n endhtml = hitmebabyonemoretime.read()\n return endhtml\n \nprint('wtor')\nprint('By cth103')\nprint('Connecting to Tor...')\n\nwith Controller.from_port() as controller:\n controller.authenticate()\n \n response = controller.create_ephemeral_hidden_service({80: 5000}, await_publication = True)\n print(\"Service online at %s.onion, \" % response.service_id)\n\n try:\n app.run()\n finally:\n print(\"Shutting down the hidden service\")\n","repo_name":"cthpw103/wtor","sub_path":"wtor.py","file_name":"wtor.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"746914636","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nModule implementing Player.\n\"\"\"\n\nfrom PyQt5.QtCore import pyqtSlot, QThread, pyqtSignal, QTimer\nfrom PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QMessageBox, QTableWidgetItem\n\nfrom Ui_MainWindow import Ui_MainWindow\nfrom Music import Music\n\nimport os\nimport re\nimport time\n\n\nclass Player(QMainWindow, Ui_MainWindow):\n \"\"\"\n Class documentation goes here.\n \"\"\"\n\n def __init__(self, parent=None):\n \"\"\"\n Constructor\n \n @param parent reference to the parent widget\n @type QWidget\n \"\"\"\n super(Player, self).__init__(parent)\n self.setupUi(self)\n self.init_ui_states()\n self.init_ui_signals_slots()\n self.init_music()\n\n def init_ui_states(self):\n self.pushButton_play.setCheckable(True)\n self.on_off_ui(False)\n\n def init_ui_signals_slots(self):\n self.horizontalSlider_time.sliderMoved.connect(lambda x: self.statusbar.showMessage(self.sec2hms(x / 1000)))\n self.horizontalSlider_rate.sliderMoved.connect(lambda x: self.statusbar.showMessage('速度:%+4d %%' % x))\n self.horizontalSlider_pitch.sliderMoved.connect(lambda x: self.statusbar.showMessage('音调:%+8.2f' % (x / 100)))\n self.horizontalSlider_tempo.sliderMoved.connect(lambda x: self.statusbar.showMessage('节拍:%+4d %%' % x))\n\n self.horizontalSlider_time.sliderReleased.connect(self.statusbar.clearMessage)\n self.horizontalSlider_rate.sliderReleased.connect(self.statusbar.clearMessage)\n self.horizontalSlider_pitch.sliderReleased.connect(self.statusbar.clearMessage)\n self.horizontalSlider_tempo.sliderReleased.connect(self.statusbar.clearMessage)\n\n self.doubleSpinBox_rate.valueChanged.connect(lambda x: self.horizontalSlider_rate.setValue(int(x)))\n self.doubleSpinBox_pitch.valueChanged.connect(lambda x: self.horizontalSlider_pitch.setValue(round(x * 100.)))\n self.doubleSpinBox_tempo.valueChanged.connect(lambda x: self.horizontalSlider_tempo.setValue(int(x)))\n\n self.pushButton_rate.clicked.connect(lambda: self.horizontalSlider_rate.setValue(0))\n self.pushButton_pitch.clicked.connect(lambda: self.horizontalSlider_pitch.setValue(0))\n self.pushButton_tempo.clicked.connect(lambda: self.horizontalSlider_tempo.setValue(0))\n\n self.action_exit.triggered.connect(self.close)\n self.action_aboutqt.triggered.connect(lambda: QMessageBox.aboutQt(None))\n\n def init_music(self):\n self.directory = r'F:\\Dell\\Music'\n\n self.timer = QTimer(self)\n self.timer.timeout.connect(self.update_time)\n\n self.playmusic = PlayThread()\n self.playmusic.end_sigOut.connect(self.reset)\n\n def update_time(self):\n time = self.playmusic.music.tell()\n self.label_time.setText(self.sec2hms(time))\n if self.horizontalSlider_time.isSliderDown():\n return\n else:\n self.horizontalSlider_time.setSliderPosition(int(time * 1000))\n\n def reset(self):\n if self.pushButton_play.isChecked():\n self.pushButton_play.click()\n self.pushButton_begin.click()\n\n @pyqtSlot()\n def on_pushButton_begin_clicked(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n self.horizontalSlider_time.setValue(0)\n self.label_time.setText(self.sec2hms(0))\n\n @pyqtSlot(int)\n def on_horizontalSlider_time_valueChanged(self, milliseconds):\n \"\"\"\n Slot documentation goes here.\n\n @param value DESCRIPTION\n @type int\n \"\"\"\n self.playmusic.music.seek(milliseconds / 1000)\n\n @pyqtSlot(int)\n def on_horizontalSlider_rate_valueChanged(self, value):\n \"\"\"\n Slot documentation goes here.\n\n @param value DESCRIPTION\n @type int\n \"\"\"\n self.playmusic.music.rate = value\n self.doubleSpinBox_rate.setValue(value)\n\n @pyqtSlot(int)\n def on_horizontalSlider_pitch_valueChanged(self, value):\n \"\"\"\n Slot documentation goes here.\n\n @param value DESCRIPTION\n @type int\n \"\"\"\n self.playmusic.music.pitch = round(value / 100, 2)\n self.doubleSpinBox_pitch.setValue(round(value / 100, 2))\n\n @pyqtSlot(int)\n def on_horizontalSlider_tempo_valueChanged(self, value):\n \"\"\"\n Slot documentation goes here.\n\n @param value DESCRIPTION\n @type int\n \"\"\"\n self.playmusic.music.tempo = value\n self.doubleSpinBox_tempo.setValue(value)\n\n @pyqtSlot(bool)\n def on_radioButton_loop_toggled(self, checked):\n \"\"\"\n Slot documentation goes here.\n \n @param checked DESCRIPTION\n @type bool\n \"\"\"\n self.playmusic.music.loop = checked\n\n @pyqtSlot(bool)\n def on_pushButton_play_clicked(self, checked):\n \"\"\"\n Slot documentation goes here.\n \n @param checked DESCRIPTION\n @type bool\n \"\"\"\n if checked:\n self.playmusic.music.pause = False\n self.timer.start(50)\n self.pushButton_play.setText(\"暂停\")\n else:\n self.playmusic.music.pause = True\n self.timer.stop()\n self.pushButton_play.setText(\"播放\")\n\n @pyqtSlot()\n def on_action_open_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n path = QFileDialog.getOpenFileName(self, caption='打开音频文件', directory=self.directory,\n filter=r'''\n Audio files(*.aac;*.ape;*.flac;*.m4a;*.mp3;*.wav);;\n All Files(*.*)''')[0]\n if path == '':\n return\n try:\n self.playmusic.music.load(path)\n\n self.directory, filename = os.path.split(self.playmusic.music.path)\n filesize = re.findall(r'Length.*?(\\d+)\\n', self.playmusic.music.sf_info.extra_info)\n filesize = '{:,} 字节'.format(int(filesize[0])).replace(',', ' ') if filesize else '未知 字节'\n duration = self.sec2hms(self.playmusic.music.sf_info.duration, r'{:.0f}:{:.0f}:{:.0f}')\n frames = ' ({:,} 采样)'.format(self.playmusic.music.sf_info.frames).replace(',', ' ')\n samplerate = '{} Hz'.format(self.playmusic.music.sf_info.samplerate)\n channels = str(self.playmusic.music.sf_info.channels)\n subtype_info = self.playmusic.music.sf_info.subtype_info\n self.tableWidget.setItem(0, 1, QTableWidgetItem(filename))\n self.tableWidget.setItem(1, 1, QTableWidgetItem(filesize))\n self.tableWidget.setItem(2, 1, QTableWidgetItem(duration + frames))\n self.tableWidget.setItem(3, 1, QTableWidgetItem(samplerate))\n self.tableWidget.setItem(4, 1, QTableWidgetItem(channels))\n self.tableWidget.setItem(5, 1, QTableWidgetItem(subtype_info))\n self.tableWidget.resizeColumnToContents(1)\n\n self.pushButton_begin.click()\n self.horizontalSlider_time.setMaximum(int(self.playmusic.music.sf_info.duration * 1000))\n self.on_off_ui(True)\n self.playmusic.start()\n except:\n QMessageBox.warning(self, '警告', '文件打开失败')\n\n @pyqtSlot()\n def on_action_save_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n _wp = QFileDialog.getSaveFileName(self, caption='另存为', directory=self.directory,\n filter=r'WAV(*.wav);;All Files(*.*)')[0]\n if _wp == '':\n return\n _rp = os.path.abspath(self.playmusic.music.path)\n if os.path.abspath(_wp) == _rp:\n QMessageBox.information(self, '提示', '输出文件路径不能与打开文件相同')\n return\n\n rate = self.playmusic.music.rate\n pitch = self.playmusic.music.pitch\n tempo = self.playmusic.music.tempo\n _str = os.popen(\n r'''soundstretch \"%s\" \"%s\" -rate=%d -pitch=%.2f -tempo=%d''' % (_rp, _wp, rate, pitch, tempo)).read()\n print(_str)\n\n @pyqtSlot()\n def on_action_usinghelp_triggered(self):\n \"\"\"\n Slot documentation goes here.\n \"\"\"\n _string = '''\n 变速(速度,rate,变速变调)\n 变调(音调,pitch,变调不变速)\n 变拍(节拍,tempo,变速不变调)\n '''\n QMessageBox.about(self, '使用帮助', _string)\n\n def on_off_ui(self, able: bool):\n self.horizontalSlider_time.setEnabled(able)\n self.horizontalSlider_rate.setEnabled(able)\n self.horizontalSlider_pitch.setEnabled(able)\n self.horizontalSlider_tempo.setEnabled(able)\n self.doubleSpinBox_rate.setEnabled(able)\n self.doubleSpinBox_pitch.setEnabled(able)\n self.doubleSpinBox_tempo.setEnabled(able)\n self.pushButton_rate.setEnabled(able)\n self.pushButton_pitch.setEnabled(able)\n self.pushButton_tempo.setEnabled(able)\n self.pushButton_play.setEnabled(able)\n self.pushButton_begin.setEnabled(able)\n self.radioButton_loop.setEnabled(able)\n\n def closeEvent(self, event):\n reply = QMessageBox.question(self, '询问', '确定退出吗?',\n QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)\n if reply == QMessageBox.Yes:\n self.timer.stop()\n self.playmusic.exit()\n time.sleep(0.1)\n event.accept()\n else:\n event.ignore()\n\n @staticmethod\n def sec2hms(sec, hms_mat=r'{:0>2.0f}:{:0>2.0f}:{:0>6.3f}'):\n m, s = divmod(sec, 60)\n h, m = divmod(m, 60)\n return hms_mat.format(h, m, s)\n\n\nclass PlayThread(QThread):\n end_sigOut = pyqtSignal()\n\n def __init__(self, parent=None):\n super(PlayThread, self).__init__(parent)\n self.music = Music()\n\n def __del__(self):\n self.music.exit()\n\n def run(self):\n def playmusic():\n try:\n self.music.play()\n except:\n pass\n\n while True:\n self.music.wait()\n playmusic()\n while self.music.loop:\n self.music.seek(0)\n playmusic()\n self.music.pause = True\n self.end_sigOut.emit()\n\n\nif __name__ == \"__main__\":\n import sys\n\n app = QApplication(sys.argv)\n player = Player()\n player.show()\n sys.exit(app.exec_())\n","repo_name":"wjroo/SoundTouch-PyQt5-Demo","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10514,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"41554623516","text":"# coding: utf-8\n\nimport sys\nfrom setuptools import setup, find_packages\n\nNAME = \"instrumental_agent\"\nVERSION = \"1.3.0\"\n\n\n# To install the library, run the following\n#\n# python setup.py install\n#\n# prerequisite: setuptools\n# http://pypi.python.org/pypi/setuptools\n\nREQUIRES = []\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"A wrapper for the Instrumental service.\",\n author=\"Expected Behavior\",\n author_email=\"support@instrumentalapp.com\",\n maintainer=\"Instrumental Support\",\n maintainer_email=\"support@instrumentalapp.com\",\n url=\"https://github.com/instrumental/instrumental_agent-python\",\n keywords=[\"Instrumental\", \"API\", \"Metrics\", \"APM\"],\n classifiers=[ # see https://pypi.python.org/pypi?%3Aaction=list_classifiers\n \"Development Status :: 5 - Production/Stable\",\n\n \"Environment :: Web Environment\",\n\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: POSIX\",\n \"Operating System :: Unix\",\n ],\n license=\"\",\n install_requires=REQUIRES,\n packages=find_packages(),\n include_package_data=True,\n long_description=\"\"\"\\\n A native client library for the Instrumental service.\n \"\"\"\n)\n","repo_name":"Instrumental/instrumental_agent-python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"9276181020","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport capnp # noqa\nimport measurements_capnp\nimport psutil\nimport zmq\nimport time\nimport datetime\nfrom itertools import count\n\ncontext = zmq.Context()\nsocket = context.socket(zmq.PUB)\nsocket.bind(\"tcp://*:5559\")\n\ncounter = count(0)\n\n\ndef sample():\n timestamp = datetime.datetime.utcnow().isoformat()\n sample = measurements_capnp.Measurements.new_message()\n sample.timestamp = timestamp\n sample.id = next(counter)\n sample.cpu.cpu = psutil.cpu_percent()\n mem = psutil.virtual_memory()\n sample.memory.total = mem.total\n sample.memory.available = mem.available\n sample.memory.percent = mem.percent\n sample.memory.used = mem.used\n sample.memory.free = mem.free\n sample.memory.active = mem.active\n sample.memory.inactive = mem.inactive\n socket.send_multipart([b\"Measurements\", sample.to_bytes()])\n\n\nwhile True:\n sample()\n time.sleep(0.1)\n","repo_name":"benthomasson/zeromq_capnproto_test","sub_path":"sampler2.py","file_name":"sampler2.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21455188300","text":"# coding: utf-8\nfrom __future__ import absolute_import as _abs\n\nimport ctypes\nfrom ctypes import cdll\nfrom ctypes import c_void_p, c_int, c_float, c_char_p, byref, POINTER, c_longlong\nimport numpy as np\nfrom functools import reduce\nfrom operator import mul\nfrom argparse import ArgumentParser\nimport sys\nimport os\n\nfrom .libpath import find_lib_path\n\nclass DLRError(Exception):\n \"\"\"Error thrown by DLR\"\"\"\n pass\n\ndef _load_lib():\n \"\"\"Load DLR library.\"\"\"\n lib_paths = find_lib_path()\n if len(lib_paths) == 0:\n return None\n try:\n pathBackup = os.environ['PATH'].split(os.pathsep)\n except KeyError:\n pathBackup = []\n lib_success = False\n os_error_list = []\n for lib_path in lib_paths:\n try:\n # needed when the lib is linked with non-system-available dependencies\n os.environ['PATH'] = os.pathsep.join(pathBackup + [os.path.dirname(lib_path)])\n lib = ctypes.cdll.LoadLibrary(lib_path)\n lib_success = True\n except OSError as e:\n os_error_list.append(str(e))\n continue\n finally:\n os.environ['PATH'] = os.pathsep.join(pathBackup)\n if not lib_success:\n libname = os.path.basename(lib_paths[0])\n raise DLRError(\n 'DLR library ({}) could not be loaded.\\n'.format(libname) +\n 'Likely causes:\\n' +\n ' * OpenMP runtime is not installed ' +\n '(vcomp140.dll or libgomp-1.dll for Windows, ' +\n 'libgomp.so for UNIX-like OSes)\\n' +\n ' * You are running 32-bit Python on a 64-bit OS\\n' +\n 'Error message(s): {}\\n'.format(os_error_list))\n lib.DLRGetLastError.restype = ctypes.c_char_p\n return lib\n\n\n# load the DLR library globally\n_LIB = _load_lib()\n\n\ndef _check_call(ret):\n \"\"\"\n Check the return value of C API call\n This function will raise exception when error occurs.\n Wrap every API call with this function\n\n Parameters\n ----------\n ret : int\n return value from API calls\n \"\"\"\n if ret != 0:\n raise DLRError(_LIB.DLRGetLastError().decode('ascii'))\n\nclass DLRModel:\n \"\"\"\n Load a Neo-compiled model\n\n Parameters\n ----------\n model_path : str\n Full path to the directory containing the compiled model\n dev_type : str\n Device type ('cpu', 'gpu', or 'opencl')\n dev_id : int\n Device ID\n \"\"\"\n\n def _lazy_init_output_shape(self):\n self.output_shapes = []\n self.output_size_dim = []\n for i in range(self.num_outputs):\n shape = self._get_output_shape(i)\n self.output_shapes.append(shape)\n\n def _parse_backend(self):\n backend = c_char_p()\n _check_call(_LIB.GetDLRBackend(byref(self.handle),\n byref(backend)))\n return backend.value.decode('ascii')\n\n def __init__(self, model_path, dev_type='cpu', dev_id=0):\n if not os.path.exists(model_path):\n raise ValueError(\"model_path %s doesn't exist\" % model_path)\n\n self.handle = c_void_p()\n device_table = {\n 'cpu': 1,\n 'gpu': 2,\n 'opencl': 4,\n }\n\n _check_call(_LIB.CreateDLRModel(byref(self.handle),\n c_char_p(model_path.encode()),\n c_int(device_table[dev_type]),\n c_int(dev_id)))\n\n self.backend = self._parse_backend()\n\n self.num_inputs = self._get_num_inputs()\n self.input_names = []\n for i in range(self.num_inputs):\n self.input_names.append(self._get_input_name(i))\n\n self.num_outputs = self._get_num_outputs()\n self._lazy_init_output_shape()\n\n def __del__(self):\n if getattr(self, \"handle\", None) is not None and self.handle is not None:\n if getattr(self, \"lib\", None) is not None:\n _check_call(_LIB.DeleteDLRModel(byref(self.handle)))\n self.handle = None\n\n def _get_num_inputs(self):\n \"\"\"Get the number of inputs of a network\"\"\"\n num_inputs = c_int()\n _check_call(_LIB.GetDLRNumInputs(byref(self.handle),\n byref(num_inputs)))\n return num_inputs.value\n\n def get_input_names(self):\n \"\"\"\n Get all input names\n\n Returns\n -------\n out : list of :py:class:`str`\n \"\"\"\n return self.input_names\n\n def _get_input_name(self, index):\n name = ctypes.c_char_p()\n _check_call(_LIB.GetDLRInputName(byref(self.handle),\n c_int(index), byref(name)))\n return name.value.decode(\"utf-8\")\n\n def _set_input(self, name, data):\n \"\"\"Set the input using the input name with data\n\n Parameters\n __________\n name : str\n The name of an input.\n data : list of numbers\n The data to be set.\n \"\"\"\n in_data = np.ascontiguousarray(data, dtype=np.float32)\n shape = np.array(in_data.shape, dtype=np.int64)\n _check_call(_LIB.SetDLRInput(byref(self.handle),\n c_char_p(name.encode('utf-8')),\n shape.ctypes.data_as(POINTER(c_longlong)),\n in_data.ctypes.data_as(POINTER(c_float)),\n c_int(in_data.ndim)))\n if self.backend == 'treelite':\n self._lazy_init_output_shape()\n\n def _run(self):\n \"\"\"A light wrapper to call run in the DLR backend.\"\"\"\n _check_call(_LIB.RunDLRModel(byref(self.handle)))\n\n def _get_num_outputs(self):\n \"\"\"Get the number of outputs of a network\"\"\"\n num_outputs = c_int()\n _check_call(_LIB.GetDLRNumOutputs(byref(self.handle),\n byref(num_outputs)))\n return num_outputs.value\n\n def _get_output_size_dim(self, index):\n \"\"\"Get the size and the dimenson of the index-th output.\n\n Parameters\n __________\n index : int\n The index of the output.\n\n Returns\n _______\n size : int\n The size of the index-th output.\n dim : int\n The dimension of the index-th output.\n \"\"\"\n idx = ctypes.c_int(index)\n size = ctypes.c_longlong()\n dim = ctypes.c_int()\n _check_call(_LIB.GetDLROutputSizeDim(byref(self.handle), idx,\n byref(size), byref(dim)))\n return size.value, dim.value\n\n def _get_output_shape(self, index):\n \"\"\"Get the shape for the index-th output.\n\n Parameters\n __________\n index : int\n The index of the output.\n\n Returns\n _______\n shape : list\n The shape of the index-th output.\n \"\"\"\n size, dim = self._get_output_size_dim(index)\n if not self.output_size_dim:\n self.output_size_dim = [(0, 0)] * self._get_num_outputs()\n self.output_size_dim[index] = (size, dim)\n shape = np.zeros(dim, dtype=np.int64)\n _check_call(_LIB.GetDLROutputShape(byref(self.handle),\n c_int(index),\n shape.ctypes.data_as(ctypes.POINTER(ctypes.c_longlong))))\n return shape\n\n def _get_output(self, index):\n \"\"\"Get the index-th output\n\n Parameters\n __________\n index : int\n The index of the output.\n\n Returns\n _______\n out : np.array\n A numpy array contains the values of the index-th output\n \"\"\"\n if index >= len(self.output_shapes) or index < 0:\n raise ValueError(\"index is expected between 0 and \"\n \"len(output_shapes)-1, but got %d\" % index)\n\n output = np.zeros(self.output_size_dim[index][0], dtype=np.float32)\n _check_call(_LIB.GetDLROutput(byref(self.handle), c_int(index),\n output.ctypes.data_as(ctypes.POINTER(ctypes.c_float))))\n out = output.reshape(self.output_shapes[index])\n return out\n\n def run(self, input_values):\n \"\"\"\n Run inference with given input(s)\n\n Parameters\n ----------\n input_values : a single :py:class:`numpy.ndarray` or a dictionary\n For decision tree models, provide a single :py:class:`numpy.ndarray`\n to indicate a single input, as decision trees always accept only one\n input.\n\n For deep learning models, provide a dictionary where keys are input\n names (of type :py:class:`str`) and values are input tensors (of type\n :py:class:`numpy.ndarray`). Deep learning models allow more than one\n input, so each input must have a unique name.\n\n Returns\n -------\n out : :py:class:`numpy.ndarray`\n Prediction result\n \"\"\"\n out = []\n # set input(s)\n if isinstance(input_values, (np.ndarray, np.generic)):\n # Treelite model or single input tvm/treelite model.\n # Treelite has a dummy input name 'data'.\n if self.input_names:\n self._set_input(self.input_names[0], input_values)\n elif isinstance(input_values, dict):\n # TVM model\n for key, value in input_values.items():\n if self.input_names and key not in self.input_names:\n raise ValueError(\"%s is not a valid input name.\" % key)\n self._set_input(key, value)\n else:\n raise ValueError(\"input_values must be of type dict (tvm model) \" +\n \"or a np.ndarray/generic (representing treelite models)\")\n # run model\n self._run()\n # get output\n for i in range(self.num_outputs):\n ith_out = self._get_output(i)\n out.append(ith_out)\n return out\n","repo_name":"aws-samples/aws-builders-fair-projects","sub_path":"reinvent-2019/automatic-anomaly-detection-tool/LambdaFunction/dlr/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":9998,"program_lang":"python","lang":"en","doc_type":"code","stars":85,"dataset":"github-code","pt":"48"} +{"seq_id":"17736723293","text":"\"\"\"\nFilters are intended to remove datetimes not meeting our criteria or to\nalter datetimes (currently only to apply a time of day to a date).\n\"\"\"\n\nimport re\nimport calendar\nimport consts\nfrom functools import partial, wraps\nfrom datetime import datetime, timedelta\nfrom consts import DAY_INDEXES, TIME_INDEXES, SELECTOR_INDEXES\nfrom consts import TimePattern\n\ndef generic_filter(filter_func):\n \"\"\"Decorates filter function to value generator.\n\n Each function that starts with _filter should a generator, that yield value \n based on some conditions. \n\n Function like:\n \n >>> def _filter_monday(regexp_result):\n ... monday = \n ... def f(gen):\n ... for v in gen:\n ... if v == monday:\n ... yield v\n ... return f\n\n might be defined in more generalized form:\n\n >>> @generic_filter\n ... def _filter_monday(regexp_result, item):\n ... monday = \n ... return item == monday\n \"\"\"\n @wraps(filter_func)\n def filter_(regex_result):\n def f(gen):\n for v in gen:\n if filter_func(regex_result, v):\n yield v\n return f\n\n return filter_\n\ndef _filter_everyother(regex_result):\n def f(gen):\n flag = False\n for v in gen:\n flag = not flag\n if flag:\n yield v\n return f\n\n\n@generic_filter\ndef _filter_weekday(regex_result, value):\n the_day = regex_result.groupdict()['principle']\n return value.weekday() in DAY_INDEXES[the_day]\n\ndef _get_xs_in_month(x, year, month):\n \"\"\"Used to get all Xs from a month where X is something like Tuesday\"\"\"\n day_index = DAY_INDEXES[x][0]\n c = calendar.monthcalendar(year, month)\n results = [week[day_index] for week in c if week[day_index]]\n\n return results\n\n\n@generic_filter\ndef _filter_identifier_in_month(regex_result, item):\n selector = regex_result.groupdict()['selector']\n the_day = regex_result.groupdict()['principle']\n \n acceptable_days = DAY_INDEXES[the_day]\n selector_index = SELECTOR_INDEXES[selector]\n \n if item.weekday() in acceptable_days:\n xs_in_month = _get_xs_in_month(the_day, item.year, item.month)\n return xs_in_month[selector_index] == item.day\n\n return False\n\n\n@generic_filter\ndef _filter_identifier_in_month_after(regex_result, item):\n selector = regex_result.groupdict()['selector']\n the_day = regex_result.groupdict()['principle']\n selector2 = regex_result.groupdict()['selector2']\n the_day2 = regex_result.groupdict()['principle2'] \n\n #calculate the 'after' date of the month\n xs_in_month = _get_xs_in_month(the_day2, item.year, item.month)\n after = datetime(item.year, item.month, xs_in_month[SELECTOR_INDEXES[selector2]])\n\n acceptable_days = DAY_INDEXES[the_day]\n selector_index = SELECTOR_INDEXES[selector]\n \n if item.weekday() in acceptable_days and after < item:\n xs_in_month = _get_xs_in_month(the_day, item.year, item.month)\n xs_in_month = [day for day in xs_in_month if day > after.day] # have to skip those dates that came before the 'after' date\n return xs_in_month[selector_index] == item.day\n\n return False \n\n\n@generic_filter\ndef _filter_day_number_in_month(regex_result, item):\n selector = int(regex_result.groupdict()['selector'])\n return item.day == selector\n\n\n@generic_filter\ndef _filter_end_of_month(regex_result, item):\n next_month = datetime(item.year, item.month, 28) + timedelta(days=4)\n last_day = next_month - timedelta(days=next_month.day)\n return item.date() == last_day.date()\n\n\n\"\"\"Application functions take a value and apply changes\nto it before yielding it on.\n\"\"\"\n#_compiled_12_hour_time = re.compile(Pattern.TIME_12H)\n_compiled_12_hourmin_time = re.compile(TimePattern.TIME_12H)\n_compiled_24_hour_time = re.compile(TimePattern.TIME_24H)\n_compiled_time_names = re.compile(TimePattern.TIME_TERM)\n_compiled_this_time = re.compile(TimePattern.TIME_CURRENT)\n\ndef _apply_time(regex_result):\n def f(gen, hour, minute):\n for value in gen:\n yield datetime(value.year, value.month, value.day, hour, minute)\n \n the_time = regex_result.groupdict()['applicant']\n\n r = _compiled_12_hourmin_time.match(the_time)\n if r:\n groupdict = r.groupdict()\n hour = int(groupdict.get(\"hour\") or 0)\n minute = int(groupdict.get(\"minute\") or 0)\n suffix = groupdict.get(\"suffix\")\n if suffix == \"pm\":\n hour += 12\n \n return partial(f, hour=hour, minute=minute) \n \n # Okay, 24 hour time?\n r = _compiled_24_hour_time.match(the_time)\n if r:\n hour, minute = r.groups()\n hour = int(hour)\n minute = int(minute)\n return partial(f, hour=hour, minute=minute)\n \n # Named time\n r = _compiled_time_names.match(the_time)\n if r:\n hour, minute = TIME_INDEXES[r.groups()[0]]\n return partial(f, hour=hour, minute=minute) \n \n # Relative time\n r = _compiled_this_time.match(the_time)\n if r:\n def f(gen):\n for value in gen:\n yield value\n return f\n\n raise Exception(\"Unable to find time applicant for '{}'\".format(the_time))\n\n\ndef _cut_time(regex_result):\n def f(gen):\n for value in gen:\n yield datetime(value.year, value.month, value.day)\n return f\n","repo_name":"Teifion/human_time_py","sub_path":"filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":5400,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"22527271663","text":"#!/usr/bin/python3\n\n\nimport numpy as np\n\n\ndef extract_low_points(matrix):\n \"\"\"--- Day 9: Smoke Basin ---\"\"\"\n # pad matrix with 9s to allow rolling (up/down rows; left/right cols)\n m = np.pad(matrix, pad_width=(1, 1), mode=\"constant\", constant_values=9)\n row_mask = (np.roll(m, 1, 0) > m) & (np.roll(m, -1, 0) > m)\n col_mask = (np.roll(m, 1, 1) > m) & (np.roll(m, -1, 1) > m)\n local_minima_conditions = (row_mask & col_mask)[1:-1, 1:-1] # crop\n low_points = np.extract(local_minima_conditions, matrix)\n return low_points\n\n\ndef traverse_sccs(grid):\n \"\"\"--- Part Two ---\"\"\"\n\n def dfs(grid, i, j):\n out_of_bounds_i = i < 0 or i >= len(grid)\n out_of_bounds_j = j < 0 or j >= len(grid[0])\n if out_of_bounds_i or out_of_bounds_j or grid[i][j] == 9:\n return\n grid[i][j] = 9 # mark as visited\n dfs(grid, i + 1, j)\n dfs(grid, i - 1, j)\n dfs(grid, i, j + 1)\n dfs(grid, i, j - 1)\n\n nines_ = (grid == 9).sum()\n scc_count = 0 # count of strongly connected components aka basins\n ncs = [] # node counts per SCC\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] != 9: # if node not visited\n dfs(grid, i, j)\n scc_count += 1\n # just visited = all visited - initial nodes - prev visited\n ncs.append((grid == 9).sum() - nines_ - sum(ncs))\n return ncs\n\n\nif __name__ == \"__main__\":\n matrix = np.genfromtxt(\"input/day9.txt\", delimiter=1, dtype=int)\n low_points = extract_low_points(matrix)\n risk = (low_points + 1).sum()\n scc_node_counts = sorted(traverse_sccs(matrix[:]), reverse=True)\n top3_areas = np.prod(scc_node_counts[:3])\n print(f\"Part 1 -- Total risk from all low points: {risk}\")\n print(f\"Part 2 -- Area of top 3 SCCs (basins) {top3_areas}\")\n","repo_name":"rafaelmarino/aoc2021","sub_path":"code/day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6091971535","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: Rachid and Kimia\n\"\"\"\n\n\nfrom gammatone_utils import *\nfrom scikits.talkbox import segment_axis\nfrom scikits.audiolab import Sndfile, play\nimport matplotlib.pyplot as plt\nplt.style.use('ggplot')\n\n\ndef matching_pursuit(signal, dict_kernels, threshold=0.1, max_iter=2000):\n \"\"\"\n Matching pursuit algorithm for encoding\n :param signal: input signal\n :param dict_kernels: dictionary of kernels, each column is a kernel\n :param threshold: stop condition\n :param max_iter: maximum number of iterations\n :return: array of scalar weighting factor (one per kernel)\n \"\"\"\n # Initialization\n res = signal\n coeff = np.zeros(dict_kernels.shape[0])\n # Iterative decomposition\n for i in range(max_iter):\n inner_prod = res.dot(dict_kernels.T)\n max_kernel = np.argmax(inner_prod)\n coeff[max_kernel] = inner_prod[max_kernel] / np.linalg.norm(dict_kernels[max_kernel,: ])**2\n res = res - coeff[max_kernel] * dict_kernels[max_kernel,: ]\n if np.linalg.norm(res) < threshold:\n return coeff\n return coeff\n\n\n# Parametrization\nb = 1.019\nresolution = 160\nstep = 8\nn_channels = 128\noverlap = 50\n\n# Compute gammatone-based dictionary\nD_multi = np.r_[tuple(gammatone_matrix(b, fc, resolution, step)[0] for\n fc in erb_space(150, 8000, n_channels))]\nfreq_c = np.array([gammatone_matrix(b, fc, resolution, step)[1] for\n fc in erb_space(150, 8000, n_channels)]).flatten()\ncenters = np.array([gammatone_matrix(b, fc, resolution, step)[2] + i*resolution for\n i, fc in enumerate(erb_space(150, 8000, n_channels))]).flatten()\n\n# Load test signal\nfilename = 'data/fsew/fsew0_001.wav'\nf = Sndfile(filename, 'r')\nnf = f.nframes\nfs = f.samplerate\nlength_sound = 20000\ny = f.read_frames(5000)\ny = f.read_frames(length_sound)\nY = segment_axis(y, resolution, overlap=overlap, end='pad')\nY = np.hanning(resolution) * Y\n\n# Encoding with matching pursuit\nX = np.zeros((Y.shape[0],D_multi.shape[0]))\nfor idx in range(Y.shape[0]):\n X[idx, :] = matching_pursuit(Y[idx, :], D_multi)\n\n# Reconstruction of the signal\nout = np.zeros(int((np.ceil(len(y)/resolution)+1)*resolution))\nfor k in range(0, len(X)):\n idx = range(k*(resolution-overlap), k*(resolution-overlap) + resolution)\n out[idx] += np.dot(X[k], D_multi)\nsquared_error = np.sum((y - out[0:len(y)]) ** 2)\n\n# Play the original signal and the reconstructed for comparison\nplay(y, fs=16000)\nplay(out, fs=16000)\n\n# Plotting results\n\n# 1st plot: original signal/reconstructed signal/residuals\narr = np.array(range(length_sound))/float(fs)\nplt.figure(1)\nplt.subplot(311)\nplt.plot(arr, y, 'b', label=\"Input Signal\")\nplt.legend()\nplt.subplot(312)\nplt.plot(arr, out[0:len(y)], 'r', label=\"Recontruction\")\nplt.legend()\nplt.subplot(313)\nplt.plot(arr, (y - out[0:len(y)])**2, 'g', label=\"Residual\")\nplt.legend()\nplt.xlabel(\"Time in s\")\nplt.show()\n\n# 2nd plot: spike train\nplt.figure(2)\nspikes_pos = np.array(np.nonzero(X))\ntemporal_position = centers[spikes_pos[0][:]]\ncentre_freq = freq_c[spikes_pos[1][:]]\nplt.scatter(temporal_position, centre_freq, marker='+', s=1)\nplt.show()\n\n# 3rd plot: example of gammatone-based dictionary\nfig = plt.figure(3)\nfig.suptitle(\"Gammatone filters\", fontsize=\"x-large\")\nfreqs = [1000, 300, 40]\nresolution = 5000\nfor center in [100, 1500, 3000]:\n plt.subplot(311)\n plt.plot(gammatone_function(resolution, freqs[0], center), linewidth=1.5)\n plt.subplot(312)\n plt.plot(gammatone_function(resolution, freqs[1], center+300), linewidth=1.5)\n plt.ylabel(\"Kernel values\")\n plt.subplot(313)\n plt.plot(gammatone_function(resolution, freqs[2], center+1000), linewidth=1.5)\n plt.xlabel(\"Time (s)\")\nplt.show()\n","repo_name":"Rachine/AuditoryCoding","sub_path":"encoding.py","file_name":"encoding.py","file_ext":"py","file_size_in_byte":3769,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"17330517234","text":"# -*- coding: utf-8 -*-\nwhile True:\n try:\n f, v = map(int,input().split())\n voltaram = input().split()\n total = []\n for a in range(v):\n voltaram[a] = int(voltaram[a])\n if f == v:\n print(\"*\")\n elif f!=v:\n for i in range(f):\n total.append(i+1)\n for u in voltaram:\n total.remove(u)\n print(*total, end=\" \\n\")\n except EOFError:\n break\n","repo_name":"ThiagoCComelli/URI-Online-Judge","sub_path":"URI-py/1471.py","file_name":"1471.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30227523176","text":"import argparse\n\nfrom tkinter import Tk, Canvas, Frame, Button, BOTH, TOP, BOTTOM\n\nBOARDS = ['DeBuG', 'sIMplE', 'meDiUm', 'HarD']\nMARGIN = 20\nSIDE = 50\nWIDTH = HEIGHT = MARGIN * 2 + SIDE * 9\n\n\nclass Error(Exception):\n '''\n Specific Error\n '''\n pass\n\n\ndef parse_argument():\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\"--board\", help=\"Desired board\", type=str, choices=BOARDS, required=True\n )\n\n args = vars(arg_parser.parse_args())\n return args['board']\n\n\nclass Board(object):\n def __init__(self, board_fl):\n self.board = self.__create_board(board_fl)\n\n def __create_board(self, board_fl):\n board = []\n\n for line in board_fl:\n line = line.strip()\n if len(line) != 9:\n board = []\n raise Error(\n \"Each line in the puzzle must have 9 numbers.\"\n )\n\n board.append([])\n\n for c in line:\n if not c.isdigit():\n raise Error(\n \"Numbers must be 0-9\"\n )\n board[-1].append(int(c))\n\n if len(board) != 9:\n raise Error(\"Each puzzle must be 9 lines long\")\n return board\n\n\nclass Game(object):\n\n # responsible for keeping the state of the board and checking whether the puzzle is complete\n\n def __init__(self, board_fl):\n self.board_fl = board_fl\n self.start_puzzle = Board(board_fl).board\n self.end_game = False\n\n def start(self):\n self.end_game = False\n self.puzzle = []\n for i in range(9):\n self.puzzle.append([])\n\n for i in range(9):\n for j in range(9):\n self.puzzle[i].append(self.start_puzzle[i][j])\n # print(self.start_puzzle[i][j])\n # self.puzzle[i].append(self.start_puzzle[i][j])\n pass\n\n def has_any_empty(self):\n for row in range(9):\n if self.__check_row_for_empty(row):\n return True\n\n for column in range(9):\n if self.__check_column_for_empty(column):\n return True\n\n return False\n\n def check_win(self):\n for row in range(9):\n if not self.__check_row(row):\n return False\n\n for column in range(9):\n if not self.__check_column(column):\n return False\n\n for row in range(3):\n for column in range(3):\n if not self.__check_square(row, column):\n return False\n\n self.end_game = True\n\n return True\n\n def __check_block_any_empty(self, block):\n # 1 1 1 _ _ _ _ _ _\n for i in block:\n if i == 0:\n return True\n return False\n\n def __check_row_for_empty(self, row):\n return self.__check_block_any_empty(self.puzzle[row])\n\n def __check_column_for_empty(self, column):\n return self.__check_block_any_empty([self.puzzle[row][column] for row in range(9)])\n\n def __check_block(self, block):\n return set(block) == set(range(1, 10))\n\n def __check_row(self, row):\n return self.__check_block(self.puzzle[row])\n\n def __check_column(self, column):\n return self.__check_block([self.puzzle[row][column] for row in range(9)])\n\n def __check_square(self, row, column):\n return self.__check_block(\n [self.puzzle[r][c] for r in range(row * 3, (row + 1) * 3) for c in range(column * 3, (column + 1) * 3)])\n\n\nclass UI(Frame):\n\n def __init__(self, parent, game):\n self.game = game\n self.parent = parent\n Frame.__init__(self, parent)\n\n self.row, self.col = -1, -1\n\n self.UI()\n\n def UI(self):\n self.parent.title(\"Sudoku\")\n self.pack(fill=BOTH, expand=1)\n self.canvas = Canvas(self, width=WIDTH, height=HEIGHT)\n self.canvas.pack(fill=BOTH, side=TOP)\n clear_button = Button(self, text=\"Clear Answers\", command=self.__clear_answers)\n clear_button.pack(fill=BOTH, side=BOTTOM)\n\n self.__draw_grid()\n self.__draw_puzzle()\n\n self.canvas.bind(\"\", self.__cell_clicked)\n self.canvas.bind(\"\", self.__key_pressed)\n\n def __draw_grid(self):\n for i in range(10):\n color = \"blue\" if i % 3 == 0 else \"gray\"\n\n x0 = MARGIN + i * SIDE\n y0 = MARGIN\n x1 = MARGIN + i * SIDE\n y1 = HEIGHT - MARGIN\n self.canvas.create_line(x0, y0, x1, y1, fill=color)\n\n def __draw_puzzle(self):\n self.canvas.delete(\"numbers\")\n for i in range(9):\n for j in range(9):\n answer = self.game.puzzle[i][j]\n if answer != 0:\n x = MARGIN + j * SIDE + SIDE / 2\n y = MARGIN + i * SIDE + SIDE / 2\n original = self.game.start_puzzle[i][j]\n color = \"black\" if answer == original else \"sea green\"\n self.canvas.create_text(\n x, y, text=answer, tags=\"numbers\", fill=color\n )\n\n def __clear_answers(self):\n self.game.start()\n self.canvas.delete(\"victory\")\n self.__draw_puzzle()\n\n def __cell_clicked(self, event):\n if self.game.check_win():\n return\n\n x, y = event.x, event.y\n if MARGIN < x < WIDTH - MARGIN and MARGIN < y < HEIGHT - MARGIN:\n self.canvas.focus_set()\n\n row, col = (y - MARGIN) / SIDE, (x - MARGIN) / SIDE\n\n row = int(row)\n col = int(col)\n\n if (row, col) == (self.row, self.col):\n self.row, self.col = -1, -1\n elif self.game.puzzle[row][col] == 0:\n self.row, self.col = row, col\n\n self.__draw_cursor()\n\n def __draw_cursor(self):\n self.canvas.delete(\"cursor\")\n if self.row >= 0 and self.col >= 0:\n x0 = MARGIN + self.col * SIDE + 1\n y0 = MARGIN + self.row * SIDE + 1\n x1 = MARGIN + (self.col + 1) * SIDE - 1\n y1 = MARGIN + (self.row + 1) * SIDE - 1\n self.canvas.create_rectangle(\n x0, y0, x1, y1,\n outline=\"red\", tags=\"cursor\"\n )\n\n def __key_pressed(self, event):\n # if self.game.check_win():\n # return\n\n if self.row >= 0 and self.col >= 0 and event.char in \"1234567890\":\n\n self.game.puzzle[self.row][self.col] = int(event.char)\n self.col, self.row = -1, -1\n self.__draw_puzzle()\n self.__draw_cursor()\n\n if self.game.has_any_empty():\n return\n\n if self.game.check_win():\n self.__draw_victory(\"YOU WIN!\")\n else:\n self.__draw_victory(\"You lose!\")\n\n def create_circle(self, x, y, r): # center coordinates, radius\n x0 = x - r\n y0 = y - r\n x1 = x + r\n y1 = y + r\n return self.canvas.create_oval(x0, y0, x1, y1)\n\n def __draw_victory(self, message: str):\n x0 = y0 = MARGIN + SIDE * 2\n x1 = y1 = MARGIN + SIDE * 7\n self.create_circle(x0, y0, 20)\n self.canvas.create_oval(\n x0, y0, x1, y1,\n tags=\"victory\", fil=\"dark orange\", outline=\"orange\"\n )\n\n x = y = MARGIN + 4 * SIDE + SIDE / 2\n self.canvas.create_text(\n x, y,\n text=message, tags=\"WINNER\",\n fill=\"white\", font=(\"Arial\", 32)\n )\n\n\nif __name__ == '__main__':\n board_name = parse_argument()\n\n with open('%s.sudoku' % board_name, 'r') as boards_file:\n game = Game(boards_file)\n game.start()\n\n root = Tk()\n UI(root, game)\n root.geometry(\"%dx%d\" % (WIDTH, HEIGHT + 40))\n root.mainloop()\n","repo_name":"livelovebll/CSC131","sub_path":"Final/Sudoku.py","file_name":"Sudoku.py","file_ext":"py","file_size_in_byte":7844,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41134951231","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[2]:\n\n\nimport numpy as np\nimport scipy.stats as sts\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nsns.set()\n\n\n# In[3]:\n\n\nn = 100\nactual = np.array([22, 53, 25])\n# prob_m = (actual * [2, 1, 0] / (n * 2)).sum()\nprob_m = 0.5\nprob_i = np.array([prob_m * prob_m, 2 * prob_m * (1 - prob_m), (1 - prob_m) * (1 - prob_m)])\nexpect = prob_i * n\n# chi2_c = ((actual - expect) ** 2 / expect).sum()\nsts.chisquare(actual, expect)\n\n\n# In[4]:\n\n\nn = 100\nactual = np.array([22, 33, 45])\nprob_m = (actual * [2, 1, 0] / (n * 2)).sum()\nprob_i = np.array([prob_m * prob_m, 2 * prob_m * (1 - prob_m), (1 - prob_m) * (1 - prob_m)])\nexpect = prob_i * n\n# chi2_c = ((actual - expect) ** 2 / expect).sum()\n# в сложной теореме уменьшаем количество степеней свободы. в даном случае остается только prob_m как независимая величина\n# потому оставляем ddof = 1 (dof = k - 1 - ddof)\nsts.chisquare(actual, expect, ddof=1)\n\n\n# In[5]:\n\n\n# no expect -- tests for discrete uniform dist\nsts.chisquare(actual, ddof=1)\n\n\n# In[6]:\n\n\n1 - sts.chi2.cdf(9.189136689774069, 2)\n\n\n# In[7]:\n\n\nobserved = sts.norm.rvs(0, 1, 100)\nobserved\n\n\n# In[8]:\n\n\nbins = np.histogram(observed, bins='auto')\nsns.distplot(observed, bins=bins[1], kde=False)\n\n\n# In[9]:\n\n\nbins\n\n\n# In[10]:\n\n\nprob_bins = list(bins[1])\nprob_bins[0] = -np.inf\nprob_bins[-1] = np.inf\nprob_bins\n\n\n# In[11]:\n\n\n# для сложных гипотез заменить (0, 1) параметрами получеными из выборки (не _.std())\nfunc = sts.norm(0, 1).cdf\nprob_1 = func(prob_bins[:-1])\nprob_2 = func(prob_bins[1:])\nprob = prob_2 - prob_1\nprob\n\n\n# In[12]:\n\n\nexpected = prob * observed.size\nexpected\n\n\n# In[13]:\n\n\n# соответственно здесь уменьшаем количество степеней свободы на 2 (ddof=2)\nassert bins[0].size == expected.size\nsts.chisquare(bins[0], expected)\n\n","repo_name":"yehorb/statistics-course","sub_path":"dev/2018-06-14-yb-practice-7.py","file_name":"2018-06-14-yb-practice-7.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29901691466","text":"from flask import Flask, url_for, redirect, request, abort, jsonify, render_template, session\n#import requests\nimport datetime\napp = Flask(__name__, static_url_path='', static_folder='staticpages')\nfrom yourDAO import class_instance\nurl = \"http://127.0.0.1:5000/\"\n\n\ndishes = [\n {\"id\":1, \"Name\": \"Onion Bhajee\", \"Category\":\"Starter\", \"Price\":6},\n {\"id\":2, \"Name\": \"Veg Samosa\", \"Category\":\"Starter\", \"Price\": 4.50},\n {\"id\":3, \"Name\": \"Lamb Biriyani\", \"Category\":\"Main\", \"Price\": 15},\n {\"id\":4, \"Name\": \"Vegetable Madras, Pilau Rice\", \"Category\":\"Main\", \"Price\": 12.50},\n {\"id\":5, \"Name\": \"Chicken Tikka Masala, Boiled Rice\", \"Category\":\"Main\", \"Price\":14},\n {\"id\":6, \"Name\": \"Chicken Rogan Josh, Pilau Rice\", \"Category\":\"Main\", \"Price\":14},\n {\"id\":7, \"Name\": \"Lamb Balti, Boiled Rice\", \"Category\":\"Main\", \"Price\":14.80},\n {\"id\":8, \"Name\": \"Lamb Vindaloo, Naan Bread\", \"Category\":\"Main\", \"Price\":15.30},\n {\"id\":9, \"Name\": \"Kulfi Ice Cream\", \"Category\":\"Dessert\", \"Price\":7}\n]\n\n\nnextID = 10\n\n@app.route('/publicMenu')\ndef viewMenuOnWebpage():\n if not 'username' in session:\n abort(401)\n else:\n render_template('menu1.html', utc_dt = datetime.datetime.utcnow())\n\n\n\n@app.route('/menu')\ndef viewMenuOnWebpage():\n if not 'username' in session:\n abort(401)\n else:\n render_template('myMenuViewer.html', utc_dt = datetime.datetime.utcnow())\n\n\n\n@app.route('/menu/')\ndef getID(id):\n foodItem = class_instance.findByID(id)\n return jsonify(foodItem)\n \n\n@app.route('/menu/display/')\ndef orderByID(id):\n for dish in dishes:\n for id, name, category, price in dish.items():\n return jsonify(name)\n\n\n\n@app.route('/menu/display')\ndef getAll2():\n for dish in dishes:\n for id, name, category, price in dish.items():\n return jsonify(category, name, str(price) + \" euro\")\n\n\n\n@app.route('/menu/', methods=['GET'])\ndef orderADish(name):\n if not 'usename' in session:\n abort(401)\n else:\n ip_addr = request.remote_addr\n data = (id, ip_addr)\n order = class_instance.order_food(data)\n return jsonify({order})\n\n\n\n@app.route('/menu/new', methods=[\"POST\"])\ndef createNewDish():\n global nextID\n dish = {\n \"id\":nextID,\n \"Name\":\"Chicken Curry and Rice\",\n \"Price\": 12.50\n }\n dishes.append(dish)\n nextID += 1\n return jsonify(dish)\n\n\n\n@app.route('/menu', methods=[\"PUT\"])\ndef updateItem(id):\n found_dishes = list(filter(lambda t: t[\"id\"] == id, dishes))\n if len(found_dishes) == 0:\n abort(401)\n currentItem = found_dishes[0]\n if \"Name\" in request.json:\n currentItem[\"Name\"] == request.json[\"Name\"]\n if \"Category\" in request.json:\n currentItem[\"Category\"] == request.json[\"Category\"]\n if \"Price\" in request.json:\n currentItem[\"Price\"] == request.json[\"Price\"]\n return jsonify(currentItem)\n\n\n@app.route('/menu/', methods=[\"DELETE\"])\ndef delete(id):\n foundItems = list(filter(lambda t: t[\"id\"] == id, dishes))\n if len(foundItems) == 0:\n abort(401)\n dishes.remove(foundItems[0])\n return jsonify({\"done\": True})\n\n\n@app.route('/')\ndef index():\n count = 0\n if not 'counter' in session:\n session['counter'] = 0\n else:\n count = session['counter']\n count += 1\n session['counter'] = count\n return str(count)\n\n\n@app.route('/login')\ndef login():\n return '

    Login

    '+\\\n '\\n'\\\n f'\\n'\n \n def timestamp_labels(self) -> str:\n '''generate label with timestamp of latest record'''\n return '

    Time of last record {} {}

    '.format(*self.timestamp)\n \n def data_labels(self) -> str:\n '''generate labels with reading from lastest record'''\n return '

    Air temperature: {:.2f}*C

    \\n' \\\n '

    Air humidity: {:.2f}%

    \\n' \\\n '

    Soil humidity: {:.2f}%

    \\n\\n'.format(*self.sensor_data)\n\n def merge(self) -> str:\n '''merge generated lines into one string'''\n a: str = self.state_class()\n b: str = self.sensorID_label()\n c: str = self.data_labels()\n d: str = self.timestamp_labels()\n return a+b+d+c\n\n def replicate(self) -> None:\n '''create boxes for every sensor contained in list of latest records'''\n for key in self.input_file:\n self.sensor_id = key\n self.state = \"activeSensor\" if self.input_file[key][0] else \"inactiveSensor\"\n self.sensor_data = self.input_file[key][1]\n self.timestamp = self.timestamps[key]\n self.html+=self.merge()\n \n def generate_html(self) -> str:\n '''merge generated bits and add immutable parts of html into one string'''\n opener ='\\n'\\\n '\\nDASHBOARD'\\\n '\\n'\\\n '\\n '\\\n '\\n'\\\n '\\n'\\\n '\\n

    DASHBOARD

    \\n'\n self.replicate()\n return opener+self.html+'\\n'\n\n def html_dump(self) -> None:\n '''dump generated string into html file'''\n with open('backend/templates/index.html', 'w') as f:\n f.write(self.generate_html())\n f.close()\n\n\nclass DataPlotter:\n def __init__(self, sensor_id) -> None:\n self.sensor_id = sensor_id\n\n def return_all_data(self) -> tuple:\n '''create jsons with data from csv files for plotting'''\n with open(f'backend/dataSensor{self.sensor_id}.csv', 'r') as file:\n reader = csv.reader(file)\n temperature = []\n humidity = []\n moisture = []\n time = []\n for row in reader:\n temperature.append(round(float(row[1]),2))\n humidity.append(round(float(row[2]),2))\n moisture.append(round(float(row[3]),2))\n time.append(f\"{row[4]} {row[5]}\")\n temperature_df = pd.DataFrame(dict(time = time, temperature = temperature))\n humidity_df = pd.DataFrame(dict(time = time, humidity = humidity))\n moisture_df = pd.DataFrame(dict(time = time, moisture = moisture))\n temperature_fig = px.line(temperature_df, x=\"time\",\n y=\"temperature\",\n title=f\"Temperature from sensor {self.sensor_id}\")\n humidity_fig = px.line(humidity_df, x=\"time\",\n y=\"humidity\",\n title=f\"Humidity from sensor {self.sensor_id}\")\n moisture_fig = px.line(moisture_df,\n x=\"time\",\n y=\"moisture\",\n title=f\"Soil moisture from sensor {self.sensor_id}\")\n\n temperatureJSON = json.dumps(temperature_fig,\n cls=plotly.utils.PlotlyJSONEncoder)\n humidityJSON = json.dumps(humidity_fig,\n cls=plotly.utils.PlotlyJSONEncoder)\n moistureJSON = json.dumps(moisture_fig,\n cls=plotly.utils.PlotlyJSONEncoder)\n \n return temperatureJSON, humidityJSON, moistureJSON","repo_name":"knowalinski/eng-greenhouse-automation-system","sub_path":"backend/data_operator.py","file_name":"data_operator.py","file_ext":"py","file_size_in_byte":8077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7078932469","text":"\"\"\"Miscellaneous rewriters aimed at preprocessing, postprocessing,\noptimization, and making other transformations applicable.\n\"\"\"\n\n\n__all__ = [\n 'import_distalgo',\n 'get_distalgo_message_sets',\n 'RelationFinder',\n 'MacroUpdateRewriter',\n 'SetTypeRewriter',\n 'ObjTypeRewriter',\n 'StrictUpdateRewriter',\n 'MapOpImporter',\n 'UpdateRewriter',\n 'MinMaxRewriter',\n 'eliminate_deadcode',\n 'PassEliminator',\n]\n\n\nfrom incoq.util.collections import OrderedSet\nimport incoq.compiler.incast as L\nfrom incoq.compiler.obj import is_specialrel\n\n\nclass DistalgoImporter(L.MacroProcessor):\n \n \"\"\"Preprocessing for distalgo inc interface programs.\n \n - len() is converted to count()\n - set() is converted to a SetComp\n - set() is converted to a Set literal\n \"\"\"\n \n # Distalgo conversion is done even before importing basic IncAST\n # operations from their Python representation. For consistency,\n # we'll use parse_structast() to avoid creating IncAST-specific\n # nodes at this stage.\n \n def handle_fe_len(self, f, arg):\n return L.parse_structast('count(ARG)',\n subst={'ARG': arg}, mode='expr')\n \n def handle_fe_set(self, f, *args):\n if len(args) == 0:\n return None\n elif len(args) == 1:\n arg = args[0]\n if isinstance(arg, L.GeneratorExp):\n return L.SetComp(arg.elt, arg.generators)\n elif isinstance(arg, (L.List, L.Tuple)):\n return L.Set(arg.elts)\n else:\n return None\n else:\n raise L.ProgramError('set() takes at most one arg')\n\ndef import_distalgo(tree):\n tree = DistalgoImporter.run(tree)\n return tree\n\n\ndef get_distalgo_message_sets(tree):\n \"\"\"Return all variable names that match the pattern for distalgo\n message sets.\n \"\"\"\n vars = L.VarsFinder.run(tree)\n return [v for v in vars\n if v.startswith('_') and\n ('ReceivedEvent_' in v or\n 'SentEvent_' in v)]\n\n\nclass RelationFinder(L.NodeVisitor):\n \n \"\"\"Find variables that we can statically infer to be relations,\n i.e. sets that are unaliased and top-level.\n \n For R to be inferred to be a relation, it must have a global-scope\n initialization having one of the following forms:\n \n R = Set()\n R = incoq.runtime.Set()\n R = set()\n \n and its only other occurrences must have the forms:\n \n - a SetUpdate naming R as the target\n \n - the RHS of membership clauses (including condition clauses)\n \n - the RHS of a For loop\n \"\"\"\n \n def process(self, tree):\n self.inited = OrderedSet()\n self.disqual = OrderedSet()\n super().process(tree)\n return self.inited - self.disqual\n \n # Manage a toplevel flag to record whether we're at global scope.\n \n def visit_Module(self, node):\n self.toplevel = True\n self.generic_visit(node)\n \n def nontoplevel_helper(self, node):\n last = self.toplevel\n self.toplevel = False\n self.generic_visit(node)\n self.toplevel = last\n \n visit_FunctionDef = nontoplevel_helper\n visit_ClassDef = nontoplevel_helper\n \n def visit_Assign(self, node):\n allowed_inits = [\n L.pe('Set()'),\n L.pe('incoq.runtime.Set()'),\n L.pe('set()'),\n ]\n # If this is a relation initializer, mark the relation name\n # and don't recurse.\n if (self.toplevel and\n L.is_varassign(node)):\n name, value = L.get_varassign(node)\n if value in allowed_inits:\n self.inited.add(name)\n return\n \n self.generic_visit(node)\n \n def visit_SetUpdate(self, node):\n # Skip the target if it's just a name.\n if isinstance(node.target, L.Name):\n self.visit(node.elem)\n else:\n self.generic_visit(node)\n \n def visit_For(self, node):\n # Skip the iter if it's just a name.\n if isinstance(node.iter, L.Name):\n self.visit(node.target)\n self.visit(node.body)\n self.visit(node.orelse)\n else:\n self.generic_visit(node)\n \n def visit_Comp(self, node):\n # Skip the iter of each clause if it's just a name.\n # Also recognize condition clauses that express memberships.\n # Always skip the params and options.\n self.visit(node.resexp)\n for cl in node.clauses:\n if (isinstance(cl, L.Enumerator) and\n isinstance(cl.iter, L.Name)):\n self.visit(cl.target)\n elif (isinstance(cl, L.Compare) and\n len(cl.ops) == len(cl.comparators) == 1 and\n isinstance(cl.ops[0], L.In) and\n isinstance(cl.comparators[0], L.Name)):\n self.visit(cl.left)\n else:\n self.visit(cl)\n \n def visit_Name(self, node):\n # We got here through some disallowed use of R.\n self.disqual.add(node.id)\n\n\nclass LegalUpdateValidator(L.NodeVisitor):\n \n \"\"\"Return True if an update operand expression is ok,\n or False if it needs rewriting.\n \"\"\"\n \n class Invalid(BaseException):\n pass\n \n def process(self, tree):\n try:\n super().process(tree)\n except self.Invalid:\n return False\n return True\n \n # Any non-whitelisted node type causes failure.\n \n whitelist = [\n 'Num', 'Str', 'Bytes', 'Name',\n 'Tuple', 'List', 'Dict', 'Set',\n 'Load',\n 'BoolOp', 'BinOp', 'UnaryOp',\n 'And', 'Or',\n # Exclude bitwise operators, which can construct new sets.\n 'Add', 'Sub', 'Mult', 'Div', 'Mod', 'Pow', 'LShift',\n 'RShift', 'FloorDiv',\n # Exclude Not, which can be used for cardinality tests on sets.\n 'Invert', 'UAdd', 'USub',\n # Exclude membership operators In, NotIn.\n 'Eq', 'NotEq', 'Lt', 'LtE', 'Gt', 'GtE', 'Is', 'IsNot',\n ]\n whitelist = [getattr(L, name) for name in whitelist]\n \n def generic_visit(self, node):\n if not isinstance(node, tuple(self.whitelist)):\n raise self.Invalid\n super().generic_visit(node)\n\nclass MacroUpdateRewriter(L.NodeTransformer):\n \n \"\"\"Rewrite MacroUpdates into normal set and map updates.\"\"\"\n \n # TODO: These could be refactored as macros in incast perhaps?\n \n def visit_MacroUpdate(self, node):\n op = node.op\n subst = {'TARGET': node.target,\n 'OTHER': node.other}\n # Remember that it's illegal to modify a set while iterating over it\n # in a For loop without making a copy. \n if op == 'union':\n # No copy needed because if node.target and node.other are\n # aliased, the operation has no effect.\n code = L.pc('''\n for _upelem in OTHER:\n TARGET.nsadd(_upelem)\n ''', subst=subst)\n elif op == 'inter':\n code = L.pc('''\n for _upelem in list(TARGET):\n if _upelem not in OTHER:\n TARGET.remove(_upelem)\n ''', subst=subst)\n elif op == 'diff':\n code = L.pc('''\n for _upelem in list(OTHER):\n TARGET.nsremove(_upelem)\n ''', subst=subst)\n elif op == 'symdiff':\n code = L.pc('''\n for _upelem in list(OTHER):\n if _upelem in TARGET:\n TARGET.remove(_upelem)\n else:\n TARGET.add(_upelem)\n ''', subst=subst)\n elif op == 'assign':\n code = L.pc('''\n if TARGET is not OTHER:\n while len(TARGET) > 0:\n _upelem = next(iter(TARGET))\n TARGET.remove(_upelem)\n for _upelem in OTHER:\n TARGET.add(_upelem)\n ''', subst=subst)\n elif op == 'clear':\n code = L.pc('''\n while len(TARGET) > 0:\n _upelem = next(iter(TARGET))\n TARGET.remove(_upelem)\n ''', subst=subst)\n elif op == 'mapassign':\n code = L.pc('''\n if TARGET is not OTHER:\n while len(TARGET) > 0:\n _upkey = next(iter(TARGET))\n TARGET.delkey(_upkey)\n for _upkey, _upval in OTHER.items():\n TARGET.assignkey(_upkey, _upval)\n ''', subst=subst)\n elif op == 'mapclear':\n code = L.pc('''\n while len(TARGET) > 0:\n _upkey = next(iter(TARGET))\n TARGET.delkey(_upkey)\n ''', subst=subst)\n else:\n assert()\n return code\n\nclass SetTypeRewriter(L.StmtTransformer):\n \n \"\"\"Rewrite set expressions to use incoq.runtime.Set.\n \n If set_literals is True, handle set literal expressions, including\n ones that use set(...).\n \n If orig_set_comps is True, handle set comprehensions marked\n with the in_original option.\n \"\"\"\n \n def __init__(self, namegen, *, set_literals, orig_set_comps):\n super().__init__()\n self.namegen = namegen\n self.set_literals = set_literals\n self.orig_set_comps = orig_set_comps\n \n def helper(self, node, no_update=False):\n fresh = next(self.namegen)\n \n if no_update:\n template = L.trim('''\n S_VAR = Set()\n ''')\n else:\n template = L.trim('''\n S_VAR = Set()\n L_VAR.update(EXPR)\n ''')\n new_code = L.pc(template, subst={'L_VAR': L.ln(fresh),\n 'S_VAR': L.sn(fresh),\n 'EXPR': node})\n \n self.pre_stmts.extend(new_code)\n return L.ln(fresh)\n \n def visit_Comp(self, node):\n node = self.generic_visit(node)\n \n if (self.orig_set_comps and\n node.options.get('in_original', False)):\n return self.helper(node)\n else:\n return node\n \n def visit_Set(self, node):\n node = self.generic_visit(node)\n \n if self.set_literals:\n return self.helper(node)\n else:\n return node\n \n def visit_Call(self, node):\n # Handle set(...) syntax as if it were {...}.\n if (self.set_literals and\n isinstance(node.func, L.Name) and\n node.func.id == 'set'):\n no_update = len(node.args) == 0\n return self.helper(node, no_update=no_update)\n else:\n return node\n \n def visit_For(self, node):\n # Skip the top level of node.iter, because set iteration\n # looks at the set contents, not the constructed set value.\n #\n # This is accomplished by using generic_visit() instead of\n # visit() on the iter, to avoid dispatch to any of the\n # above handlers.\n iter_result = self.generic_visit(node.iter)\n # Handle special case return values. Tuple return values\n # are not permitted in this context, so it's just the None\n # case.\n if iter_result is None:\n iter_result = node.iter\n \n target = self.visit(node.target)\n body = self.visit(node.body)\n orelse = self.visit(node.orelse)\n \n new_node = L.For(target, iter_result, body, orelse)\n # If there's no change, avoid returning a newly constructed\n # node, which would force copying up the tree.\n if new_node == node:\n new_node = node\n \n return new_node\n\nclass ObjTypeRewriter(L.NodeTransformer):\n \n \"\"\"Add incoq.runtime.Obj as a base class to all class definitions.\"\"\"\n \n def valid_baseclass(self, expr):\n if isinstance(expr, L.Name):\n return True\n elif (isinstance(expr, L.Attribute) and\n self.valid_baseclass(expr.value)):\n return True\n else:\n return False\n \n def visit_ClassDef(self, node):\n node = self.generic_visit(node)\n \n assert all(self.valid_baseclass(b) for b in node.bases), \\\n 'Illegal base class'\n objbase = L.ln('Set')\n if objbase not in node.bases:\n new_bases = node.bases + (objbase,)\n node = node._replace(bases=new_bases)\n \n return node\n\n\nclass StrictUpdateRewriter(L.NodeTransformer):\n \n \"\"\"Rewrite set, field, and/or map updates with if-guards\n to ensure that they can be considered strict. To be run\n after macro updates have already been turned into elementary\n updates.\n \"\"\"\n \n def __init__(self, *, rewrite_sets=True, rewrite_fields=True,\n rewrite_maps=True):\n super().__init__()\n self.rewrite_sets = rewrite_sets\n self.rewrite_fields = rewrite_fields\n self.rewrite_maps = rewrite_maps\n \n # No need to generic_visit() since updates can't contain\n # other updates.\n \n def visit_SetUpdate(self, node):\n if not self.rewrite_sets:\n return node\n nsop = {'add': 'nsadd',\n 'remove': 'nsremove'}[node.op]\n template = 'TARGET.{}(ELEM)'.format(nsop)\n return L.pc(template, subst={'TARGET': node.target,\n 'ELEM': node.elem})\n \n def visit_Assign(self, node):\n if not self.rewrite_fields:\n return node\n if not L.is_attrassign(node):\n return node\n cont, field, value = L.get_attrassign(node)\n return L.pc('''\n CONT.nsassignfield(FIELD, VALUE)\n ''', subst={'CONT': cont,\n 'FIELD': L.ln(field),\n 'VALUE': value})\n \n def visit_Delete(self, node):\n if not self.rewrite_fields:\n return node\n if not L.is_delattr(node):\n return node\n cont, field = L.get_delattr(node)\n return L.pc('''\n CONT.nsdelfield(FIELD)\n ''', subst={'CONT': cont,\n 'FIELD': L.ln(field)})\n \n def visit_AssignKey(self, node):\n if not self.rewrite_maps:\n return node\n return L.pc('''\n TARGET.nsassignkey(KEY, VALUE)\n ''', subst={'TARGET': node.target,\n 'KEY': node.key,\n 'VALUE': node.value})\n \n def visit_DelKey(self, node):\n if not self.rewrite_maps:\n return node\n return L.pc('''\n TARGET.nsdelkey(KEY)\n ''', subst={'TARGET': node.target,\n 'KEY': node.key})\n\n\nclass MapOpImporter(L.NodeTransformer):\n \n \"\"\"Convert assignment and deletion of map keys to AssignKey\n and DelKey nodes. Uses of the map \"globals()\" are ignored.\n \"\"\"\n \n def visit_Assign(self, node):\n if L.is_mapassign(node):\n target, key, value = L.get_mapassign(node)\n return L.AssignKey(target, key, value)\n return node\n \n def visit_Delete(self, node):\n if L.is_delmap(node):\n target, key = L.get_delmap(node)\n return L.DelKey(target, key)\n return node\n\nclass UpdateRewriter(L.NodeTransformer):\n \n \"\"\"Rewrite set and map updates to ensure that the operands\n are legal update expressions.\n \"\"\"\n \n def __init__(self, namegen):\n self.namegen = namegen\n \n # No need to recurse since we only deal with update statements,\n # which can't be nested.\n \n def visit_SetUpdate(self, node):\n target_ok = LegalUpdateValidator.run(node.target)\n elem_ok = LegalUpdateValidator.run(node.elem)\n if target_ok and elem_ok:\n return node\n \n code = ()\n if not target_ok:\n targetvar = next(self.namegen)\n code += (L.Assign((L.sn(targetvar),), node.target),)\n node = node._replace(target=L.ln(targetvar))\n if not elem_ok:\n elemvar = next(self.namegen)\n code += (L.Assign((L.sn(elemvar),), node.elem),)\n node = node._replace(elem=L.ln(elemvar))\n return code + (node,)\n \n def visit_AssignKey(self, node):\n target_ok = LegalUpdateValidator.run(node.target)\n key_ok = LegalUpdateValidator.run(node.key)\n value_ok = LegalUpdateValidator.run(node.value)\n if target_ok and key_ok and value_ok:\n return node\n \n code = ()\n if not target_ok:\n targetvar = next(self.namegen)\n code += (L.Assign((L.sn(targetvar),), node.target),)\n node = node._replace(target=L.ln(targetvar))\n if not key_ok:\n keyvar = next(self.namegen)\n code += (L.Assign((L.sn(keyvar),), node.key),)\n node = node._replace(key=L.ln(keyvar))\n if not value_ok:\n valuevar = next(self.namegen)\n code += (L.Assign((L.sn(valuevar),), node.value),)\n node = node._replace(value=L.ln(valuevar))\n \n return code + (node,)\n \n def visit_DelKey(self, node):\n target_ok = LegalUpdateValidator.run(node.target)\n key_ok = LegalUpdateValidator.run(node.key)\n if target_ok and key_ok:\n return node\n \n code = ()\n if not target_ok:\n targetvar = next(self.namegen)\n code += (L.Assign((L.sn(targetvar),), node.target),)\n node = node._replace(target=L.ln(targetvar))\n if not key_ok:\n keyvar = next(self.namegen)\n code += (L.Assign((L.sn(keyvar),), node.key),)\n node = node._replace(key=L.ln(keyvar))\n \n return code + (node,)\n\n\nclass MinMaxRewriter(L.NodeTransformer):\n \n \"\"\"If a min/max operation is over a union of set literals or\n set comprehensions, distribute the min/max to each operand\n and take the overall min/max. The overall aggregate uses the\n runtime's min2() and max2() functions, which are not\n incrementalized but allow their arguments to be None.\n \"\"\"\n \n def visit_Aggregate(self, node):\n node = self.generic_visit(node)\n \n if not node.op in ['min', 'max']:\n return node\n func2 = {'min': 'min2', 'max': 'max2'}[node.op]\n \n if not L.is_setunion(node.value):\n return node\n sets = L.get_setunion(node.value)\n if len(sets) == 1:\n # If there's just one set, don't change anything.\n return node\n \n # Wrap each operand in an aggregate query with the same\n # options as the original aggregate. (This ensures that\n # 'impl' is carried over.) Set literals are wrapped in\n # a call to incoq.runtime's min2()/max2() instead of an\n # Aggregate query node.\n terms = []\n for s in sets:\n if isinstance(s, (L.Comp, L.Name)):\n new_term = L.Aggregate(s, node.op, node.options)\n else:\n new_term = L.pe('OP(__ARGS)', subst={'OP': L.ln(func2)})\n new_term = new_term._replace(args=s.elts)\n terms.append(new_term)\n \n # The new top-level aggregate is min2()/max2().\n new_node = L.pe('OP(__ARGS)',\n subst={'OP': L.ln(func2)})\n new_node = new_node._replace(args=tuple(terms))\n return new_node\n\n\n# TODO: There are two cases where dead code elimination will fail to\n# get rid of a relation. One is when the relation is reference-counted,\n# because the reference-counted add/remove operations are already\n# broken down into operations that inspect the current refcount to\n# decide what to do. We'd have to change it so rcadd and rcremove\n# operations are not expanded until the end. This would also entail\n# changing auxmap transformation to work for rcadd/rcremove.\n#\n# The second case is when the contents of the set are read directly,\n# such as in filter checks. This could be fixed by rewriting these tests\n# to use an arbitrary map over the set instead.\n\nclass DeadCodeEliminator(L.NodeTransformer):\n \n def __init__(self, deadvars):\n self.deadvars = set(deadvars)\n \n def visit_Assign(self, node):\n if (len(node.targets) == 1 and\n isinstance(node.targets[0], L.Name) and\n node.targets[0].id in self.deadvars):\n return L.Pass()\n \n def update_helper(self, node):\n if isinstance(node.target, L.Name):\n if node.target.id in self.deadvars:\n return L.Pass()\n \n visit_SetUpdate = update_helper\n visit_RCSetRefUpdate = update_helper\n visit_AssignKey = update_helper\n visit_DelKey = update_helper\n\n\nclass PassEliminator(L.NodeTransformer):\n \n def filter_pass(self, stmts):\n \"\"\"Update a list of statements to exclude Pass nodes.\"\"\"\n if len(stmts) == 1:\n # Can't remove a lone Pass.\n return stmts\n else:\n return tuple(s for s in stmts if not isinstance(s, L.Pass))\n \n def body_helper(self, node):\n node = self.generic_visit(node)\n \n new_body = self.filter_pass(node.body)\n node = node._replace(body=new_body)\n if hasattr(node, 'orelse'):\n new_orelse = self.filter_pass(node.orelse)\n node = node._replace(orelse=new_orelse)\n \n return node\n \n visit_Module = body_helper\n visit_FunctionDef = body_helper\n visit_ClassDef = body_helper\n visit_For = body_helper\n visit_While = body_helper\n visit_If = body_helper\n visit_With = body_helper\n\n\ndef eliminate_deadcode(tree, *, keepvars=None, obj_domain_out, verbose=False):\n \"\"\"Modify the program to remove sets that are not read from.\"\"\"\n if keepvars is None:\n keepvars = set()\n keepvars = set(keepvars)\n \n # Find variables that are only written to, not read from.\n # Exclude special names and keepvars.\n special_vars = set(['__all__'])\n all_vars = L.VarsFinder.run(tree)\n read_vars = L.VarsFinder.run(\n tree, ignore_store=True)\n write_only_vars = all_vars - read_vars - special_vars - keepvars\n \n if obj_domain_out:\n # Also exclude pairsets since they will be translated into\n # actual obj-domain updates.\n for v in set(write_only_vars):\n if is_specialrel(v):\n write_only_vars.remove(v)\n \n # Delete most updates to these variables. Some cases, such as\n # the target of a For loop, are left alone.\n tree = DeadCodeEliminator.run(tree, write_only_vars)\n \n if verbose:\n if len(write_only_vars) > 0:\n print('Eliminated dead variables: ' + ', '.join(write_only_vars))\n else:\n print('No dead vars eliminated')\n \n return tree\n","repo_name":"IncOQ/incoq","sub_path":"incoq/compiler/central/rewritings.py","file_name":"rewritings.py","file_ext":"py","file_size_in_byte":23137,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"48"} +{"seq_id":"25378136432","text":"import math\n\nalphabet = list(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ '\")\ndist = 30*math.pi/14\n\ndef smallestDist (x, y):\n z=abs(alphabet.index(x)-alphabet.index(y))\n return z if z<28-z else 28-z\n \n\nfor i in range(int(input())):\n time=0.0\n string = input()\n for j in range(len(string)-1):\n time+= 1 + smallestDist(string[j],string[j+1])*dist/15\n\n print(time+1)\n","repo_name":"vjsaairam/Kattis","sub_path":"Racing Alphabet.py","file_name":"Racing Alphabet.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"19886469152","text":"\"Lista de vendedores de una empresa\"\n\nced = int(input(\"ingrese la cedula: \"))\nnombre = input(\"ingrese el nombre del vendedor: \")\ntip_vend = int(input(\"\"\"ingrese el tipo de vendedor: \n 1 -- Puerta a puerta\n 2 -- Telemercadeo\n 3 -- Ejecutivo de ventas\n \"\"\"))\n\nventa_vend = int(input(\"ingrese la venta del mes del vendedor: \"))\n\ncomis_total = 0\nventas_total = 0\n\nwhile ced != -1:\n if tip_vend == 1:\n comis_vend = 0.2 * venta_vend\n comis_total += comis_vend\n ventas_total += venta_vend\n\n elif tip_vend == 2:\n comis_vend = 0.15 * venta_vend\n comis_total += comis_vend\n ventas_total += venta_vend\n\n\n elif tip_vend == 3:\n comis_vend = 0.25 * venta_vend\n comis_total += comis_vend\n ventas_total += venta_vend\n\n print (\"\\n\", \"*=\" * 35)\n print (f\"Nombre del vendedor: {nombre}\")\n print (f\"Comisión del vendedor: {comis_vend:,}\")\n print (f\"Comisiones totales: {comis_total:,}\")\n print (f\"Ventas totales: {ventas_total:,}\")\n print (\"\\n\", \"*=\" * 35)\n\n ced = int(input(\"ingrese la cedula: \"))\n\n if ced == -1:\n continue\n\n else:\n nombre = input(\"ingrese el nombre: \")\n tip_vend = int(input(\"\"\"ingrese el tipo de vendedor: \n 1 -- Puerta a puerta\n 2 -- Telemercadeo\n 3 -- Ejecutivo de ventas\n \"\"\"))\n\n venta_vend = int(input(\"ingrese la venta del mes del vendedor: \"))\n\n\n\n","repo_name":"carlosjhoan/Campuslands","sub_path":"Python/SRV/SRV2/SRV05/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":1541,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"31081996145","text":"class TrieNode:\n def __init__(self):\n self.children = dict()\n\nclass Trie:\n def __init__(self):\n self.root = TrieNode()\n self.Ends = []\n \n def insert(self,word):\n root = self.root\n for char in word:\n if char not in root.children:\n root.children[char] = TrieNode()\n root = root.children[char]\n self.Ends.append((root,len(word)+1))\n \nclass Solution:\n def minimumLengthEncoding(self, words: List[str]) -> int:\n words = list(set(words))\n trie = Trie()\n \n for word in words:\n trie.insert(word[::-1])\n\n return sum(val for node,val in trie.Ends if len(node.children)==0)","repo_name":"Merwan-J/competetive-programming","sub_path":"820-short-encoding-of-words/820-short-encoding-of-words.py","file_name":"820-short-encoding-of-words.py","file_ext":"py","file_size_in_byte":717,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"27776002290","text":"import os\nimport torch\nfrom PIL import Image\n\nfrom torch.utils.data import Dataset\n\n\nclass ImagesDataset(Dataset):\n \"\"\"Images dataset for causal metrics benchmark\"\"\"\n\n def __init__(self, txt_file, labels_file, root_dir, transform=None):\n \"\"\"\n Args:\n txt_file (string): Path to the txt listing files\n root_dir (string): Directory with the images inside\n labels_file (string): Path to txt with labels\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.file_path = []\n self.file_name = []\n self.labels = {}\n labels = open(labels_file)\n files = open(txt_file)\n for file in files:\n self.file_path.append(os.path.join(root_dir, file.rstrip()))\n self.file_name.append(file.rstrip())\n\n for label in labels:\n name, id, _ = label.rstrip().split(' ')\n self.labels[name] = id\n\n self.transform = transform\n\n def __len__(self):\n return len(self.file_path)\n\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n\n image = Image.open(self.file_path[idx]).convert('RGB')\n\n if self.transform:\n image = self.transform(image)\n\n return image, self.labels[self.file_name[idx]], self.file_name[idx]\n","repo_name":"aenglebert/polycam","sub_path":"benchmarks/imagesDataset.py","file_name":"imagesDataset.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"48"} +{"seq_id":"15870619116","text":"from rest_framework.response import Response\nfrom rest_framework.decorators import api_view, permission_classes, authentication_classes\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework import status\nfrom rest_framework.permissions import IsAuthenticated # Cambio en la importación\nfrom django.db.models import Q\nfrom api.models import Product, Invoice, Business\nfrom api.serializers import InvoiceSerializer, BusinessSerializer\nfrom api.views import get_business_id_by_user_from_server\n\n\n@api_view(['POST'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef subtract_stock(request):\n \"\"\"\n Subtracts the specified quantity from the stock of a product.\n\n JSON Input:\n {\n \"product_id\": 1, # Product ID\n \"quantity\": 5\n }\n\n Returns:\n 200 OK with a success message on successful stock update,\n 400 Bad Request if there is insufficient stock,\n 404 Not Found if the product doesn't exist.\n \"\"\"\n if request.method == 'POST':\n product_id = request.data.get('product_id')\n quantity = request.data.get('quantity')\n\n try:\n product = Product.objects.get(pk=product_id)\n if product.stock >= quantity:\n product.stock -= quantity\n product.save()\n return Response({'message': 'Stock updated successfully.'}, status=status.HTTP_200_OK)\n else:\n return Response({'error': 'Insufficient stock available.'}, status=status.HTTP_400_BAD_REQUEST)\n except Product.DoesNotExist:\n return Response({'error': 'Product does not exist.'}, status=status.HTTP_404_NOT_FOUND)\n return Response({'error': 'Method not allowed.'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n@api_view(['POST'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef customer_invoices(request):\n \"\"\"\n Retrieves invoices associated with a specified customer.\n\n JSON Input:\n {\n \"customer_id\": 1 # Customer ID\n }\n\n Returns:\n 200 OK with invoice data on success,\n 404 Not Found if no invoices are found for the specified customer.\n \"\"\"\n if request.method == 'POST':\n customer_id = request.data.get('customer_id')\n try:\n invoices = Invoice.objects.filter(customer=customer_id)\n serializer = InvoiceSerializer(invoices, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Invoice.DoesNotExist:\n return Response({'error': 'No invoices found for the specified customer.'},\n status=status.HTTP_404_NOT_FOUND)\n return Response({'error': 'Method not allowed.'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n@api_view(['POST'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef invoices_by_specified_date_range(request):\n \"\"\"\n Retrieves invoices within a specified date range.\n\n JSON Input:\n {\n \"rango_fechas\": {\n \"fecha_inicio\": \"2023-01-01T00:00:00Z\",\n \"fecha_fin\": \"2023-12-31T23:59:59Z\"\n }\n }\n\n Returns:\n 200 OK with invoice data on success,\n 404 Not Found if no invoices are found within the specified date range,\n 400 Bad Request if date parameters are missing.\n \"\"\"\n if request.method == 'POST':\n date_range = request.data.get('rango_fechas', None)\n\n if date_range and 'fecha_inicio' in date_range and 'fecha_fin' in date_range:\n start_date = date_range['fecha_inicio']\n end_date = date_range['fecha_fin']\n\n try:\n invoices = Invoice.objects.filter(\n Q(invoice_date__gte=start_date) & Q(invoice_date__lte=end_date)\n )\n serializer = InvoiceSerializer(invoices, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Invoice.DoesNotExist:\n return Response({'error': 'No invoices found within the specified date range.'},\n status=status.HTTP_404_NOT_FOUND)\n\n return Response({'error': 'Date parameters are mandatory.'}, status=status.HTTP_400_BAD_REQUEST)\n return Response({'error': 'Method not allowed.'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n@api_view(['POST'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef invoices_in_month(request):\n \"\"\"\n Retrieves invoices for a specified month.\n\n JSON Input:\n {\n \"mes\": 1 # Month (1-12)\n }\n\n Returns:\n 200 OK with invoice data on success,\n 404 Not Found if no invoices are found for the specified month,\n 400 Bad Request if the month parameter is missing.\n \"\"\"\n if request.method == 'POST':\n month = request.data.get('mes', None)\n\n if month:\n try:\n invoices = Invoice.objects.filter(invoice_date__month=month)\n serializer = InvoiceSerializer(invoices, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n except Invoice.DoesNotExist:\n return Response({'error': 'No invoices found for the specified month.'},\n status=status.HTTP_404_NOT_FOUND)\n\n return Response({'error': 'Month parameter is mandatory.'}, status=status.HTTP_400_BAD_REQUEST)\n return Response({'error': 'Method not allowed.'}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n\n\n@api_view(['GET'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef get_last_registered_invoice(request):\n \"\"\"\n Retrieves the last registered invoice number for the authenticated user's business.\n\n Returns:\n 200 OK with the last registered invoice number on success,\n 404 Not Found if the business is not found.\n \"\"\"\n try:\n business = get_business_id_by_user_from_server(request)\n last_invoice = business.last_registered_invoice\n return Response({'last_registered_invoice': last_invoice}, status=status.HTTP_200_OK)\n except Business.DoesNotExist:\n return Response({'error': 'Business not found'}, status=status.HTTP_404_NOT_FOUND)\n\n\n@api_view(['GET'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef get_currency_by_business(request):\n \"\"\"\n Retrieves the currency information for the authenticated user's business.\n\n Returns:\n 200 OK with currency information on success,\n 404 Not Found if the business is not found.\n \"\"\"\n try:\n business = get_business_id_by_user_from_server(request)\n currency = business.currency\n return Response({'currency': currency.name,\n 'symbol': currency.symbol,\n 'international identifier': currency.international_identifier\n }, status=status.HTTP_200_OK)\n except Business.DoesNotExist:\n return Response({'error': 'Business not found'}, status=status.HTTP_404_NOT_FOUND)\n\n\n@api_view(['GET'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef get_complete_invoice_number_series(request):\n \"\"\"\n Retrieves the complete invoice number series for the authenticated user's business.\n\n Returns:\n 200 OK with the concatenated invoice number series on success,\n 404 Not Found if the business is not found.\n \"\"\"\n try:\n business = get_business_id_by_user_from_server(request)\n\n # Concatenates the values into a single string\n concatenated_info = f\"{business.authorization_number} {business.invoice_series} {business.invoice_number}\"\n\n return Response({'concatenated_info': concatenated_info}, status=status.HTTP_200_OK)\n except Business.DoesNotExist:\n return Response({'error': 'Business not found'}, status=status.HTTP_404_NOT_FOUND)\n\n\n@api_view(['PUT'])\n@authentication_classes([TokenAuthentication])\n@permission_classes([IsAuthenticated])\ndef edit_name_photo_for_business(request):\n \"\"\"\n Edit the name and photo of the business.\n\n JSON Input:\n {\n \"name\": \"New Business Name\",\n \"photo_link\": \"https://example.com/new_photo.jpg\"\n }\n\n Returns:\n 200 OK with success message and updated business data on success,\n 400 Bad Request with error details if the input is invalid.\n \"\"\"\n business_id = get_business_id_by_user_from_server(request)\n\n try:\n business = Business.objects.get(id=business_id)\n except Business.DoesNotExist:\n return Response({\"error\": \"Business not found\"}, status=status.HTTP_404_NOT_FOUND)\n\n data = request.data # JSON with updated business data\n serializer = BusinessSerializer(business, data=data, partial=True)\n if serializer.is_valid():\n serializer.save()\n return Response({\"message\": \"Business updated successfully\", \"data\": serializer.data}, status=status.HTTP_200_OK)\n return Response({\"error\": \"Failed to update the business\", \"errors\": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n\n\n\n\n\n\n\n","repo_name":"willR30/inventario_api","sub_path":"api/others/other_views.py","file_name":"other_views.py","file_ext":"py","file_size_in_byte":9110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4852250003","text":"# coding:utf8\n\nimport time\n\nfrom socket import AF_INET, SOCK_STREAM, socket\nfrom thread import start_new\nimport struct\nHOST = 'magnus1k.com'\nPORT = 1000\nBUFSIZE = 1024\nADDR = (HOST, PORT)\nclient = socket(AF_INET, SOCK_STREAM)\nclient.connect(ADDR)\n\n\ndef send_data(sendstr, command_id):\n head_0 = chr(0)\n head_1 = chr(0)\n head_2 = chr(0)\n head_3 = chr(0)\n proto_version = chr(0)\n server_version = 0\n sendstr = sendstr\n data = struct.pack('!sssss3I', head_0, head_1, head_2,\n head_3, proto_version, server_version,\n len(sendstr)+4, command_id)\n senddata = data+sendstr\n return senddata\n\n\ndef resolve_recvdata(data):\n head = struct.unpack('!sssss3I', data[:17])\n length = head[6]\n data = data[17:17+length]\n return data\n\ns1 = time.time()\n\n\ndef start():\n for num in xrange(10):\n client.sendall(send_data('asdfe', 1))\n\nfor i in range(10):\n start_new(start, ())\nwhile True:\n pass\n\n","repo_name":"magnus1k/test_game_server","sub_path":"tool/clienttest.py","file_name":"clienttest.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25871861580","text":"# Basic NN \r\n\r\nimport os\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom keras.layers import Dense\r\nfrom keras.models import Sequential\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nimport pandas as pd\r\nfrom BlackjackSM import BlackjackSM\r\n\r\n# Define the optimal action based on player's hand and dealer's up card\r\ndef optimal_action(player_hand_value, dealer_up_card_value, is_soft_hand):\r\n # The logic of optimal_action goes here\r\n # For example:\r\n if is_soft_hand:\r\n if player_hand_value <= 17:\r\n return 'hit'\r\n elif player_hand_value == 18 and dealer_up_card_value in [9, 10, 11]:\r\n return 'hit'\r\n else:\r\n return 'stand'\r\n else:\r\n if player_hand_value < 17:\r\n return 'hit'\r\n else:\r\n return 'stand'\r\n\r\n# Simulation parameters\r\nnum_episodes = 100\r\ndata = []\r\n\r\n# Initialize the state machine\r\nstate_machine = BlackjackSM()\r\n\r\n# Run the simulation\r\nfor _ in range(num_episodes):\r\n state_machine.new_hand()\r\n\r\n while not state_machine.terminal:\r\n # Extract the necessary information from the state\r\n current_state = state_machine.state()\r\n player_hand_value = current_state[0] # Player's hand value\r\n is_soft_hand = current_state[1] == 1 # Is the player's hand soft?\r\n dealer_up_card_value = current_state[4] # Dealer's up card value\r\n \r\n # Determine the action using the optimal strategy\r\n action = optimal_action(player_hand_value, dealer_up_card_value, is_soft_hand)\r\n \r\n # Execute the action\r\n state_machine.do(action)\r\n \r\n # Store state, action, and reward\r\n next_state = state_machine.state()\r\n reward = state_machine.reward() if state_machine.terminal else 0\r\n data.append({\r\n 'player_hand_value': player_hand_value,\r\n 'dealer_up_card_value': dealer_up_card_value,\r\n 'is_soft_hand': is_soft_hand,\r\n 'action': action,\r\n 'reward': reward,\r\n 'terminal': state_machine.terminal\r\n })\r\n\r\n# Convert the data into a pandas DataFrame\r\ndf = pd.DataFrame(data)\r\n\r\n# Save the dataset to a CSV file\r\ndf.to_csv('blackjack_optimal_strategy_data.csv', index=False)\r\n\r\n# Print the first few rows of the dataframe to verify\r\nprint(df.head())\r\n\r\n\r\n# Load your dataset\r\ndf = pd.read_csv('blackjack_dataset.csv')\r\n\r\n# Preprocess the dataset\r\nfeature_list = ['player_total', 'has_ace', 'dealer_card_val']\r\nX = df[feature_list].values\r\ny = df['correct_action'].apply(lambda x: 1 if x == 'hit' else 0).values.reshape(-1, 1)\r\n\r\n# Split the dataset into training and testing sets\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\r\n\r\n# Feature scaling\r\nscaler = StandardScaler()\r\nX_train = scaler.fit_transform(X_train)\r\nX_test = scaler.transform(X_test)\r\n\r\n# Create the neural network\r\nmodel = Sequential([\r\n Dense(16, input_dim=len(feature_list), activation='relu'),\r\n Dense(128, activation='relu'),\r\n Dense(32, activation='relu'),\r\n Dense(8, activation='relu'),\r\n Dense(1, activation='sigmoid')\r\n])\r\n\r\nmodel.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])\r\n\r\n# Train the model\r\nmodel.fit(X_train, y_train, epochs=20, batch_size=256, verbose=1)\r\n\r\n# Evaluate the model\r\ny_pred = model.predict(X_test)\r\ny_pred = (y_pred > 0.5).astype(int)\r\nprint(f\"Accuracy: {accuracy_score(y_test, y_pred)}\")\r\n\r\n# Save the model and scaler\r\nmodel.save('blackjack_nn_model.h5')\r\nnp.save('scaler.npy', scaler.mean_)","repo_name":"wadewilliamsw1234/Blackjack-AI-Exploration","sub_path":"blackjack_basicnn.py","file_name":"blackjack_basicnn.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21899761844","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n#\n# Copyright (C) 2017 Lenovo, Inc.\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n#\n# Module to send Conditional CLI commands to Lenovo Switches\n# Lenovo Networking\n#\n\nDOCUMENTATION = '''\n---\nmodule: cnos_conditional_command\nauthor: \"Anil Kumar Muraleedharan (@amuraleedhar)\"\nshort_description: Execute a single command based on condition on devices\n running Lenovo CNOS\ndescription:\n - This module allows you to modify the running configuration of a switch. It\n provides a way to execute a single CNOS command on a network device by\n evaluating the current running configuration and executing the command only\n if the specific settings have not been already configured.\n The CNOS command is passed as an argument of the method.\n This module functions the same as the cnos_command module.\n The only exception is that following inventory variable can be specified\n [\"condition = \"]\n When this inventory variable is specified as the variable of a task, the\n command is executed for the network element that matches the flag string.\n Usually, commands are executed across a group of network devices. When\n there is a requirement to skip the execution of the command on one or\n more devices, it is recommended to use this module. This module uses SSH to\n manage network device configuration.\nextends_documentation_fragment:\n- community.network.cnos\n\noptions:\n clicommand:\n description:\n - This specifies the CLI command as an attribute to this method.\n The command is passed using double quotes. The variables can be\n placed directly on to the CLI commands or can be invoked\n from the vars directory.\n required: true\n default: Null\n condition:\n description:\n - If you specify condition=false in the inventory file against any\n device, the command execution is skipped for that device.\n required: true\n default: Null\n flag:\n description:\n - If a task needs to be executed, you have to set the flag the same\n as it is specified in the inventory for that device.\n required: true\n default: Null\n\n'''\nEXAMPLES = '''\nTasks : The following are examples of using the module\n cnos_conditional_command. These are written in the main.yml file of the tasks\n directory.\n---\n- name: Applying CLI template on VLAG Tier1 Leaf Switch1\n community.network.cnos_conditional_command:\n deviceType: \"{{ hostvars[inventory_hostname]['deviceType'] }}\"\n outputfile: \"./results/test_conditional_command_\n {{ inventory_hostname }}_output.txt\"\n condition: \"{{ hostvars[inventory_hostname]['condition']}}\"\n flag: leaf_switch2\n command: \"spanning-tree mode enable\"\n\n'''\nRETURN = '''\nmsg:\n description: Success or failure message\n returned: always\n type: str\n sample: \"Command Applied\"\n'''\n\nimport sys\nimport time\nimport socket\nimport array\nimport json\nimport time\nimport re\nimport os\ntry:\n from ansible_collections.community.network.plugins.module_utils.network.cnos import cnos\n HAS_LIB = True\nexcept Exception:\n HAS_LIB = False\nfrom ansible.module_utils.basic import AnsibleModule\nfrom collections import defaultdict\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n clicommand=dict(required=True),\n outputfile=dict(required=True),\n condition=dict(required=True),\n flag=dict(required=True),\n host=dict(required=False),\n deviceType=dict(required=True),\n username=dict(required=False),\n password=dict(required=False, no_log=True),\n enablePassword=dict(required=False,\n no_log=True), ), supports_check_mode=False)\n\n condition = module.params['condition']\n flag = module.params['flag']\n cliCommand = module.params['clicommand']\n outputfile = module.params['outputfile']\n output = ''\n if (condition is None or condition != flag):\n module.exit_json(changed=True, msg=\"Command Skipped for this switch\")\n return ''\n # Send the CLi command\n cmd = [{'command': cliCommand, 'prompt': None, 'answer': None}]\n output = output + str(cnos.run_cnos_commands(module, cmd))\n # Write to memory\n save_cmd = [{'command': 'save', 'prompt': None, 'answer': None}]\n cmd.extend(save_cmd)\n output = output + str(cnos.run_cnos_commands(module, cmd))\n\n # Save it into the file\n path = outputfile.rsplit('/', 1)\n # cnos.debugOutput(path[0])\n if not os.path.exists(path[0]):\n os.makedirs(path[0])\n file = open(outputfile, \"a\")\n file.write(output)\n file.close()\n\n # Logic to check when changes occur or not\n errorMsg = cnos.checkOutputForError(output)\n if (errorMsg is None):\n module.exit_json(changed=True,\n msg=\"CLI Command executed and results saved in file \")\n else:\n module.fail_json(msg=errorMsg)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ansible-collections/community.network","sub_path":"plugins/modules/cnos_conditional_command.py","file_name":"cnos_conditional_command.py","file_ext":"py","file_size_in_byte":5774,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"48"} +{"seq_id":"24655110080","text":"class Speciality:\n names = (\n \"Сертификаты и подписки\",\n \"ОРТОПЕДИЧЕСКАЯ СТОМАТОЛОГИЯ\",\n \"Терапевтическая стоматологоия\",\n \"Электронные родовые сертификаты\",\n \"ИМПЛАНТАЦИЯ\",\n \"ПО СОГЛАСОВАНИЮ\",\n \"ХИРУРГИЧЕСКАЯ СТОМАТОЛОГИЯ\",\n \"ТЕРАПЕВТИЧЕСКАЯ СТОМАТОЛОГИЯ VECTOR\",\n \"ТЕРАПЕВТИЧЕСКАЯ СТОМАТОЛОГИЯ\",\n \"ОРТОПЕДИЧЕСКАЯ СТОМАТОЛОГИЯ ВРАЧ ПОВЫШЕННОЙ КАТЕГОРИИ\",\n \"Слепки\",\n \"Хирургия\",\n \"Вкладки\",\n \"ДЕТСКАЯ СТОМАТОЛОГИЯ\",\n \"ОРТОПЕДИЧЕСКАЯ СТОМАТОЛОГИЯ ГЛ.ВРАЧ\",\n \"ОРТОПЕДИЧЕСКАЯ СТОМАТОЛОГИЯ ВРАЧ\",\n \"Консультации врача\",\n \"ПРОФЕССИОНАЛЬНАЯ ГИГИЕНА\",\n \"Продукция\",\n \"Исправление прикуса\",\n \"Дополнительно\",\n \"Виниры\",\n \"ОБЩИЕ\",\n \"Консультации главного врача\",\n \"Абатменты\",\n \"Коронки\",\n \"ОРТОДОНТИЯ (исправление прикуса)\"\n )\n\n def __init__(self, name: str):\n name = str(name).replace(\" \", \" \").replace(\"\\t\", \"\")\n\n if name not in self.names:\n raise NameError(f\"{name} not in {self.names}\")\n\n self.name = name","repo_name":"Aaliyah097/aestetica","sub_path":"src/staff/entities/speciality.py","file_name":"speciality.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24877183034","text":"import pymongo\nfrom blog_app.models import post\n\n\nclass DBSettings:\n uri = \"mongodb://127.0.0.1:27017\"\n\n\ndef show_results(results):\n print('Results:')\n for result in results:\n print(result)\n\n\ndef main():\n client = pymongo.MongoClient(DBSettings.uri)\n database = client['testapp']\n collection = database['purchaseitems']\n results = collection.find({})\n show_results(results)\n results_list = [result['itemname'] for result in collection.find({})]\n print(results_list)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sidharth01g/PythonForWeb","sub_path":"db_app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23107060747","text":"from collections import namedtuple, deque\nimport random\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\n\nclass ReplayBuffer:\n \n def __init__(self, a_size, buffer_size, batch_size, seed, device):\n self.action_size = a_size\n self.memory = deque(maxlen=buffer_size) \n self.batch_size = batch_size\n self.experience = namedtuple(\"Experience\", field_names=[\"state\", \"action\", \"reward\", \"next_state\", \"done\"])\n self.seed = random.seed(seed)\n self.device = device\n \n def add(self, s, a, r, s_t, done):\n exp = self.experience(s, a, r, s_t, done)\n self.memory.append(exp)\n \n def sample(self):\n experiences = random.sample(self.memory, k=self.batch_size)\n #Collects all sampled experience values in tensors and puts them in a tuple which is returned\n r = torch.from_numpy(np.vstack([exp.reward for exp in experiences if exp is not None])).float().to(self.device)\n dones = torch.from_numpy(np.vstack([exp.done for exp in experiences if exp is not None]).astype(np.uint8)).float().to(self.device)\n s = torch.from_numpy(np.vstack([exp.state for exp in experiences if exp is not None])).float().to(self.device)\n a = torch.from_numpy(np.vstack([exp.action for exp in experiences if exp is not None])).long().to(self.device)\n s_t = torch.from_numpy(np.vstack([exp.next_state for exp in experiences if exp is not None])).float().to(self.device)\n \n return (s, a, r, s_t, dones)\n\n def __len__(self):\n return len(self.memory)","repo_name":"shazakam/Lunar-Lander-RL","sub_path":"DQN/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"7900829831","text":"# 9012\n# n = int(input())\n# for i in range(n):\n# A = input()\n# A_list = list(set(A))\n# cnt = []\n# for j in A_list:\n# count = A.count(j)\n# cnt.append(count)\n# if cnt[0] == cnt[1]:\n# print(\"YES\")\n# else:\n# print('NO')\n# 런타임 오류, 아마 마지막 인덱스 지정한 거 때문에?\n\n\n# n = int(input())\n# for i in range(n):\n# A = list(input())\n# sum = 0\n# for j in A:\n# if j == '(':\n# sum +=1\n# else :\n# sum -=1\n# if sum == 0:\n# print(\"YES\")\n# else:\n# print(\"NO\")\n# wrong\n\n# '('가 먼저 들어온다고 고정해야 됨\n# 괄호의 시작은 ( 이니까\nn = int(input())\nfor i in range(n):\n A = list(input())\n sum = 0\n for j in A:\n if j == '(':\n sum +=1\n elif j ==')':\n sum -=1\n if sum < 0:\n print(\"NO\")\n break\n\n if sum > 0:\n print(\"NO\")\n elif sum ==0:\n print(\"YES\")\n","repo_name":"GayeonKimm/CT","sub_path":"String/9012.py","file_name":"9012.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"26751739204","text":"import sys\r\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\r\n\r\nfrom AuthorWindow import Ui_AuthorWindow\r\n\r\nclass MyAuthorWindow(QMainWindow, Ui_AuthorWindow):\r\n def __init__(self):\r\n super(MyAuthorWindow, self).__init__()\r\n self.setupUi(self)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n myWin = MyAuthorWindow()\r\n myWin.show()\r\n sys.exit(app.exec_())\r\n\r\n\r\n\r\n\r\n","repo_name":"birdtianyu/Object-Detection","sub_path":"CallAuthorWindow.py","file_name":"CallAuthorWindow.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"20557610621","text":"from django.shortcuts import render,redirect\nimport sys\nsys.path.insert(1,'/usr/share/doc/python3-fingerprint/examples')\nimport example_search\nfrom register.models import Candidate, Voter\n# from result.models import Vote\nfrom register.forms import candidateForm, voterForm\n\n# Create your views here.\ndef index(request):\n\tcontext={}\n\tcandidates=Candidate.objects.all().order_by('region')\n\tprev=0\n\tcontext['regions']=[]\n\ttemp=[]\n\tfor cand in candidates:\n\t\tif(prev==cand.region):\n\t\t\ttemp.append(cand)\n\t\telse:\n\t\t\tif(prev!=0):\n\t\t\t\tcontext['regions'].append(temp)\n\t\t\tprev=cand.region\n\t\t\ttemp=[]\n\t\t\ttemp.append(cand)\n\tcontext['regions'].append(temp)\n\tprint(context)\n\tprint(\"$$$$$$$$$$$$$$$\")\n\treturn render(request,'result/index.html',context)\n","repo_name":"kSinghParth/eVoting","sub_path":"result/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33154025206","text":"# discordpy-startup\n# -*- coding: utf-8 -*-\nimport sys\nimport discord\nimport aiohttp\nimport random\nimport asyncio\nimport time\nimport datetime\nimport urllib.request\nimport json\nimport re\nimport os\nimport traceback\nimport math\n\nfrom discord.ext import tasks\nfrom datetime import datetime, timedelta, timezone\n\nimport logging\n\n\nJST = timezone(timedelta(hours=+9), 'JST') # タイムゾーンの生成\nclient = discord.Client() \nTOKEN = os.environ['DISCORD_BOT_TOKEN'] # botのtoken(heroku参照)\n\nclass Data:\n id_list = []\n\ngch = Data()\n\n@client.event\nasync def on_ready():\n\n # 起動後の初期処理の実行を開始する合図としてステータスを変更\n await client.change_presence(activity=discord.Game(name=\"起動中( ˘ω˘ ) スヤァ…\"))\n\n # 使い勝手がいいので起動時の時刻を日本時間で取得\n dateTime = datetime.now(JST)\n\n # 指定チャンネルに起動ログ(embed)を送信\n ready_chid = 675965627873361930\n ready_ch = client.get_channel(ready_chid)\n embed = discord.Embed(title = \"起動ログ\")\n embed.timestamp = datetime.now(JST)\n await ready_ch.send(embed = embed)\n\n # グローバルチャンネルのIDのリストを生成\n path = \"data/global_channel/id_data.txt\"\n with open(path, mode = \"r\") as file:\n gch.id_list = [int(i.replace(\"\\n\", \"\")) for i in file.readlines()] \n print(gch.id_list)\n await client.change_presence(activity=discord.Game(name=f\"a:help\"))\n\n \n@client.event\nasync def on_member_join(member):\n pass\n\n\n@client.event\nasync def on_member_remove(member): \n pass\n\n\n@tasks.loop(seconds=10)\nasync def loop():\n pass\n\n\n@client.event\nasync def on_message(message):\n\n if message.author.bot or message.embeds: \n return\n\n\n # グローバルチャットの会話プログラム\n g_webhook_name = \"雑談用\" # 2チャンネル間のWebhook名\n\n if message.channel.id in gch.id_list: #IDが登録されているチャンネルにメッセージが送信されたら\n\n def another_ch(ch):\n ch_list = []\n\n for ch_id in gch.id_list:\n\n if message.channel.id != ch_id:\n ch = client.get_channel(ch_id)\n ch_list.append(ch)\n\n return ch_list\n\n for channel in another_ch(message.channel):\n\n if channel == None:\n continue\n\n ch_webhooks = await channel.webhooks()\n webhook = discord.utils.get(ch_webhooks, name=g_webhook_name)\n\n if webhook is None: # 雑談用ってwebhookがなかったら無視\n await channel.create_webhook(name = \"雑談用\")\n await channel.send(\"Webhook作ったよ\")\n\n if message.attachments:\n await webhook.send(\n content=message.content,\n username=message.author.name,\n avatar_url=message.author.avatar_url_as(format=\"png\")\n )\n\n for A in message.attachments:\n img_embed = discord.Embed()\n img_embed.set_image(url=A.url)\n await webhook.send(\n #content=message.content,\n embed=img_embed,\n username=message.author.name,\n avatar_url=message.author.avatar_url_as(format=\"png\")\n )\n\n else:\n await webhook.send(\n content=message.content,\n username=message.author.name,\n avatar_url=message.author.avatar_url_as(format=\"png\")\n )\n\n # チャンネルのグローバル登録コマンドプログラム\n if message.guild:\n\n if message.content == \"a!add\":\n\n if not message.author.guild_permissions.administrator:\n await message.channel.send(\"おっと!\\n君は管理者権限を持ってないから追加は出来ないよ。\")\n return\n\n if message.channel.id in gch.id_list:\n await message.channel.send(\"登録済だよ?\")\n return\n\n channel = message.channel\n ch_webhooks = await channel.webhooks()\n webhook = discord.utils.get(ch_webhooks, name=g_webhook_name)\n\n if webhook is None:\n\n try:\n await channel.create_webhook(name = \"雑談用\")\n\n except:\n await channel.send(\"なんかうまくいかなかった()\")\n\n else:\n path = \"data/global_channel/id_data.txt\"\n with open(path, mode = \"a\") as file:\n try:\n file.write(f\"\\n{channel.id}\")\n except:\n print(\"error\")\n else:\n gch.id_list.append(channel.id)\n\n for id in gch.id_list:\n ch = client.get_channel(id)\n await ch.send(f\"{channel.name}をグローバルチャンネルに追加したよ!\")\n return\n await message.channel.send(\"登録済だよ?\")\n \n\nclient.run(TOKEN)\n","repo_name":"amano1/azarashi_bot","sub_path":"discordbot.py","file_name":"discordbot.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9026333901","text":"import os\nimport boto3\nfrom infraestructure.gateways import StorageGateway\nfrom domain.helpers.loggers import logger\n\nclass S3Imp(StorageGateway):\n\n def __init__(self) -> None:\n try:\n self.client = boto3.client('s3', aws_access_key_id=os.getenv(\"ACCESS_KEY\"),\n aws_secret_access_key=os.getenv(\"SECRET_ACCESS_KEY\"))\n self.bucket_name = os.getenv(\"BUCKET_NAME\")\n except Exception as e:\n logger.error(f\"{str(e)}\")\n\n def download(self, object_name: str) -> str:\n BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n path_data = os.path.join(BASE_DIR, 'bin', object_name)\n self.client.download_file(self.bucket_name, object_name, path_data)\n return path_data\n","repo_name":"LuisDiazM/nlp-chatbots","sub_path":"backend/core-ia-models/infraestructure/storage/s3Imp.py","file_name":"s3Imp.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"36499268932","text":"import sys\nimport os\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\nimport re\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LogisticRegression, LinearRegression\nfrom sklearn.metrics import classification_report, roc_auc_score\nfrom sklearn.model_selection import train_test_split\n\n\n\n\ndef run_character_bin(repr_type, train_prefix, val_prefix):\n with open(train_prefix+'_all_embeddings.pkl', 'rb') as file:\n all_embeddings = pickle.load(file)\n\n with open(train_prefix+'_all_concept_pixels.pkl', 'rb') as file:\n all_concept_pixels = pickle.load(file)\n \n with open(val_prefix+'_all_embeddings.pkl', 'rb') as file:\n val_all_embeddings = pickle.load(file)\n\n with open(val_prefix+'_all_concept_pixels.pkl', 'rb') as file:\n val_all_concept_pixels = pickle.load(file)\n \n all_embeddings_swav = np.concatenate(all_embeddings)\n val_all_embeddings_swav = np.concatenate(val_all_embeddings)\n \n bin_mapping = np.array([ 2194., 10191., 18188., 26185., 34182., 42179., 50176.])\n \n y_train = [y_ex-1 for y_ex in np.digitize(all_concept_pixels, bin_mapping)]\n y_test = [y_ex-1 for y_ex in np.digitize(val_all_concept_pixels, bin_mapping)]\n \n x_train = all_embeddings_swav\n x_test = val_all_embeddings_swav\n \n log_reg = LogisticRegression(class_weight='balanced', max_iter=500, multi_class='ovr', n_jobs=80)\n log_reg.fit(x_train, y_train)\n \n pkl_filename = \"probing_results/log_reg_char_bin_\"+repr_type+'.pkl'\n with open(pkl_filename, 'wb') as file:\n pickle.dump(log_reg, file)\n \n y_pred = log_reg.predict(x_test)\n y_pred_proba = log_reg.predict_proba(x_test)\n \n results = pd.DataFrame.from_dict(classification_report(y_test, y_pred,\n output_dict=True)).round(2)\n \n results.to_csv('probing_results/'+repr_type+'_char_bin.csv')\n \n roc_auc = roc_auc_score(y_test, y_pred_proba, multi_class='ovr')\n roc_auc_ovo = roc_auc_score(y_test, y_pred_proba, multi_class='ovo')\n with open('probing_results/'+repr_type+'_char_bin_roc_auc.txt', 'w') as file:\n file.write(str(roc_auc))\n file.close()\n with open('probing_results/'+repr_type+'_char_bin_roc_auc_ovo.txt', 'w') as file:\n file.write(str(roc_auc_ovo))\n file.close()\n\nif __name__ == \"__main__\":\n print('SWAV')\n run_character_bin('swav', 'train_swav', 'val_swav')\n\n print('MOCO')\n run_character_bin('moco', 'train_superpixels_moco', 'val_superpixels_moco')\n\n print('BYOL')\n run_character_bin('byol', 'train_superpixels_byol', 'val_superpixels_byol')\n\n print('SIMCLR')\n run_character_bin('simclr', 'train_superpixels_simclr', 'val_superpixels_simclr')\n","repo_name":"BioNN-InfoTech/visual-probes","sub_path":"run_character_bin.py","file_name":"run_character_bin.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18177977375","text":"import sys\r\nsys.stdin = open(\"input.txt\", \"r\")\r\n\r\nimport heapq\r\n\r\nmod = 1000000009\r\nMAX = 100001\r\nINF = 100000000000000000\r\ninput = sys.stdin.readline\r\n\r\ndp = [0 for _ in range(MAX)]\r\ndist = [INF for _ in range(MAX)]\r\nadj = [[] for _ in range(MAX)]\r\n\r\n\r\nN, M, S, E = map(int, input().split())\r\nfor i in range(M):\r\n node1, node2, cost = map(int, input().split())\r\n adj[node1].append([node2, cost])\r\n adj[node2].append([node1, cost])\r\n\r\npq = []\r\nheapq.heappush(pq, [0, S])\r\ndist[S] = 0\r\ndp[S] = 1\r\n\r\nwhile len(pq) != 0:\r\n cur_dist, cur = pq[0]\r\n heapq.heappop(pq)\r\n\r\n if dist[cur] < cur_dist:\r\n continue\r\n\r\n for nxt, nxt_dist in adj[cur]:\r\n if dist[nxt] < cur_dist + nxt_dist:\r\n continue\r\n if dist[nxt] == cur_dist + nxt_dist:\r\n dp[nxt] += dp[cur]\r\n dp[nxt] %= mod\r\n else: # dist[nxt] > cur_dist + nxt_dist\r\n dist[nxt] = cur_dist + nxt_dist\r\n heapq.heappush(pq, [cur_dist + nxt_dist, nxt])\r\n dp[nxt] = dp[cur] % mod\r\n\r\nprint(dp[E])\r\n","repo_name":"AlphaTechnic/SOGANG_ICPC_training_day","sub_path":"2021-02-14/14554.py","file_name":"14554.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10810702361","text":"#!/usr/bin/env python\n\n# remove_proper\n#\n# Removes proper nouns from dictionary file and writes out a new one.\n# Dictionary file may be specified as the first command line argument;\n# default is /usr/share/dict/words.\n\nimport sys\n\ntry: dictionary = int(sys.argv[1])\nexcept IndexError: dictionary = '/usr/share/dict/words'\n\nwords = [ x for x in open(dictionary, 'r').readlines() if x.islower() ]\n\nwith open('non_proper_dictionary', mode='w', encoding='utf-8') as output:\n output.write(''.join(words))\n","repo_name":"lonnon/passphrase","sub_path":"remove_proper.py","file_name":"remove_proper.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7807513657","text":"import sys\nsys.stdin = open('input.txt')\n\nmove = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n\nfor tc in range(1, 11):\n N = input()\n maze = [list(map(int, input())) for _ in range(16)]\n start_col = 0\n start_row = 0\n answer = 0\n\n for i in maze:\n if 2 in i:\n start_col = i.index(2)\n break\n start_row += 1\n\n bfs_queue = [(start_row, start_col)]\n while bfs_queue:\n check = bfs_queue.pop(0)\n for row, col in move:\n next_row = check[0] + row\n next_col = check[1] + col\n\n if next_row == -1 or next_row == 16 or next_col == -1 or next_col == 16:\n continue\n\n if maze[next_row][next_col] == 0:\n bfs_queue.append((next_row, next_col))\n elif maze[next_row][next_col] == 3:\n answer = 1\n break\n\n maze[check[0]][check[1]] = 1\n if answer:\n break\n\n print('#{} {}'.format(tc, answer))","repo_name":"asooso1/ssafy_algorithm","sub_path":"0826/윤여창/1226_미로1/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37229272913","text":"import pytorch_lightning as pl\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn import Linear, Parameter, ReLU\nfrom torch.nn import Sequential as Seq\nfrom torch_geometric.nn import GCNConv, MessagePassing\nfrom torch_geometric.nn.conv.gcn_conv import gcn_norm\nfrom torch_geometric.nn.inits import glorot, zeros\nfrom torch_geometric.utils import add_self_loops, remove_self_loops\n\nfrom automltsad.detectors.GTA.model import Informer\nfrom automltsad.detectors.GTA.tconv import TemporalBlock\n\n\nclass AdaGCNConv(MessagePassing):\n def __init__(\n self,\n num_nodes,\n in_channels,\n out_channels,\n improved=False,\n add_self_loops=False,\n normalize=True,\n bias=True,\n init_method='all',\n ):\n super(AdaGCNConv, self).__init__(\n aggr='add', node_dim=0\n ) # \"Max\" aggregation.\n self.num_nodes = num_nodes\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.improved = improved\n self.add_self_loops = add_self_loops\n self.normalize = normalize\n self.bias = bias\n self.init_method = init_method\n\n self.weight = Parameter(torch.Tensor(in_channels, out_channels))\n\n if bias:\n self.bias = Parameter(torch.Tensor(out_channels))\n else:\n self.register_parameter('bias', None)\n\n self._init_graph_logits_()\n\n self.reset_parameters()\n\n def _init_graph_logits_(self):\n if self.init_method == 'all':\n logits = 0.8 * torch.ones(self.num_nodes**2, 2)\n logits[:, 1] = 0\n elif self.init_method == 'random':\n logits = 1e-3 * torch.randn(self.num_nodes**2, 2)\n elif self.init_method == 'equal':\n logits = 0.5 * torch.ones(self.num_nodes**2, 2)\n else:\n raise NotImplementedError(\n 'Initial Method %s is not implemented' % self.init_method\n )\n\n self.register_parameter(\n 'logits', Parameter(logits, requires_grad=True)\n )\n\n def reset_parameters(self):\n glorot(self.weight)\n zeros(self.bias)\n\n def forward(self, x, edge_index, edge_weight=None):\n # x has shape [N, in_channels]\n # edge_index has shape [2, E]\n if self.normalize:\n edge_index, edge_weight = gcn_norm( # yapf: disable\n edge_index,\n edge_weight,\n x.size(self.node_dim),\n self.improved,\n self.add_self_loops,\n dtype=x.dtype,\n )\n\n z = torch.nn.functional.gumbel_softmax(self.logits, hard=True)\n\n x = torch.matmul(x, self.weight)\n\n # propagate_type: (x: Tensor, edge_weight: OptTensor)\n out = self.propagate(\n edge_index, x=x, edge_weight=edge_weight, size=None, z=z\n )\n\n if self.bias is not None:\n out += self.bias\n\n return out\n\n def message(self, x_j, edge_weight, z):\n if edge_weight is None:\n return x_j * z[:, 0].contiguous().view(\n [-1] + [1] * (x_j.dim() - 1)\n )\n else:\n return (\n edge_weight.view([-1] + [1] * (x_j.dim() - 1))\n * x_j\n * z[:, 0].contiguous().view([-1] + [1] * (x_j.dim() - 1))\n )\n\n def __repr__(self):\n return '{}({}, {})'.format(\n self.__class__.__name__, self.in_channels, self.out_channels\n )\n\n\nclass GraphTemporalEmbedding(torch.nn.Module):\n def __init__(\n self,\n num_nodes,\n seq_len,\n num_levels,\n kernel_size=3,\n dropout=0.02,\n device=torch.device('cuda:0'),\n ):\n super(GraphTemporalEmbedding, self).__init__()\n self.num_nodes = num_nodes\n self.seq_len = seq_len\n self.num_levels = num_levels\n self.device = device\n assert (kernel_size - 1) // 2\n\n self.tc_modules = torch.nn.ModuleList([])\n self.gc_module = AdaGCNConv(num_nodes, seq_len, seq_len)\n for i in range(num_levels):\n dilation_size = 2**i\n self.tc_modules.extend(\n [\n TemporalBlock(\n num_nodes,\n num_nodes,\n kernel_size=kernel_size,\n stride=1,\n dilation=dilation_size,\n padding=(kernel_size - 1) * dilation_size // 2,\n dropout=dropout,\n )\n ]\n )\n # self.gc_modules.extend([AdaGCNConv(num_nodes, seq_len, seq_len)])\n\n source_nodes, target_nodes = [], []\n for i in range(num_nodes):\n for j in range(num_nodes):\n source_nodes.append(j)\n target_nodes.append(i)\n self.edge_index = torch.tensor(\n [source_nodes, target_nodes], dtype=torch.long, device=self.device\n )\n\n def forward(self, x):\n # >> (bsz, seq_len, num_nodes)\n x = x.permute(0, 2, 1) # >> (bsz, num_nodes, seq_len)\n\n x = self.tc_modules[0](x) # >> (bsz, num_nodes, seq_len)\n x = self.gc_module(x.transpose(0, 1), self.edge_index).transpose(\n 0, 1\n ) # >> (bsz, num_nodes, seq_len)\n # output = x\n\n for i in range(1, self.num_levels):\n x = self.tc_modules[i](x) # >> (bsz, num_nodes, seq_len)\n x = self.gc_module(x.transpose(0, 1), self.edge_index).transpose(\n 0, 1\n ) # >> (bsz, num_nodes, seq_len)\n # output += x\n\n # return output.transpose(1, 2) # >> (bsz, seq_len, num_nodes)\n return x.transpose(1, 2)\n\n\nclass GTA(pl.LightningModule):\n def __init__(\n self,\n num_nodes,\n seq_len,\n label_len,\n out_len,\n num_levels,\n learning_rate,\n factor=4,\n d_model=512,\n n_heads=8,\n e_layers=3,\n d_layers=2,\n d_ff=512,\n dropout=0.0,\n attn='prob',\n embed='fixed',\n data='ETTh',\n activation='gelu',\n device=torch.device('cuda:0'),\n direction='minimize',\n ):\n super().__init__()\n self.num_nodes = num_nodes\n self.seq_len = seq_len\n self.label_len = label_len\n self.out_len = out_len\n self.num_levels = num_levels\n self.learning_rate = learning_rate\n self.direction = direction\n\n self.gt_embedding = GraphTemporalEmbedding(\n num_nodes,\n seq_len,\n num_levels,\n kernel_size=3,\n dropout=dropout,\n device=device,\n )\n self.model = Informer(\n num_nodes,\n num_nodes,\n num_nodes,\n seq_len,\n label_len,\n out_len,\n factor,\n d_model,\n n_heads,\n e_layers,\n d_layers,\n d_ff,\n dropout,\n attn,\n embed,\n data,\n activation,\n device,\n )\n\n def forward(self, batch_x, batch_y, batch_x_mark, batch_y_mark):\n batch_x = self.gt_embedding(batch_x)\n dec_inp = torch.zeros_like(\n batch_y[:, -self.out_len :, :], device=self.device\n )\n dec_inp = torch.cat([batch_y[:, : self.label_len, :], dec_inp], dim=1)\n output = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n return output\n\n def training_step(self, batch, batch_idx):\n batch_x, batch_y, batch_x_mark, batch_y_mark = batch\n batch_x = self.gt_embedding(batch_x)\n dec_inp = torch.zeros_like(\n batch_y[:, -self.out_len :, :], device=self.device\n )\n dec_inp = torch.cat([batch_y[:, : self.label_len, :], dec_inp], dim=1)\n output = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n y = batch_y[:, -self.out_len :, :]\n loss = F.mse_loss(y, output)\n if self.direction == 'maximize':\n loss = torch.finfo(loss.dtype).max - loss\n self.log('train_loss', loss, prog_bar=True)\n return loss\n\n def validation_step(self, batch, batch_idx):\n batch_x, batch_y, batch_x_mark, batch_y_mark = batch\n batch_x = self.gt_embedding(batch_x)\n dec_inp = torch.zeros_like(\n batch_y[:, -self.out_len :, :], device=self.device\n )\n dec_inp = torch.cat([batch_y[:, : self.label_len, :], dec_inp], dim=1)\n output = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)\n y = batch_y[:, -self.out_len :, :]\n loss = F.mse_loss(y, output)\n if self.direction == 'maximize':\n loss = torch.finfo(loss.dtype).max - loss\n self.log('val_loss', loss, prog_bar=True)\n return loss\n\n def predict_step(self, batch, batch_idx, dataloader_idx=0):\n batch_x, batch_y, batch_x_mark, batch_y_mark = batch\n y = batch_y[:, -self.out_len :, :]\n y_hat = self(batch_x, batch_y, batch_x_mark, batch_y_mark)\n out = F.mse_loss(y, y_hat, reduction='none').view(-1, 1)\n return out\n\n def configure_optimizers(self):\n optimizer = torch.optim.AdamW(self.parameters(), lr=self.learning_rate)\n return optimizer\n","repo_name":"NevoleMarek/automltsad","sub_path":"automltsad/detectors/GTA/gta.py","file_name":"gta.py","file_ext":"py","file_size_in_byte":9315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"55377500","text":"with open(\"input.txt\") as file:\n data = file.read().split(\"\\n\")\n\ntotal = 0\nfor line in data:\n line = line.split(\" | \")\n digitStrings = list(map(set, line[0].split(\" \"))) # Strings to the left of |\n outputStrings = list(map(set, line[1].split(\" \"))) # String to the right of |\n\n # Numbers 0-9 as keys mapped to their\n mapping = {i:None for i in range(10)}\n\n for s in digitStrings:\n if len(s) == 2:\n mapping[1] = s\n elif len(s) == 3:\n mapping[7] = s\n elif len(s) == 7:\n mapping[8] = s\n elif len(s) == 4:\n mapping[4] = s\n\n # Take away the letters common to the strings for 7 and 1 to get the\n # character corresponding to 'a' in the default 7-seg display\n a = mapping[7].difference(mapping[1]).pop()\n \n b = \"\"\n c = \"\"\n e = \"\"\n f = \"\"\n # Since b is the only character to appear 6 times in the (default) \n # digitStrings, we can determine which character corresponds to b. Similar \n # applies for the characters c, e, and f.\n for letter in \"abcdefg\":\n count = 0\n for s in digitStrings:\n if letter in s:\n count += 1\n\n if count == 6:\n b = letter\n elif count == 8 and letter != a:\n c = letter\n elif count == 4:\n e = letter\n elif count == 9:\n f = letter\n\n # Removing now known characters from some known strings yields the \n # remaining characters, d and g\n d = mapping[4].difference(set(b + c + f)).pop()\n g = mapping[8].difference(set(a + b + c + d + e + f)).pop()\n\n # Build the remaining mappings\n mapping[0] = set(a + b + c + e + f + g)\n mapping[2] = set(a + c + d + e + g)\n mapping[3] = set(a + c + d + f + g)\n mapping[5] = set(a + b + d + f + g)\n mapping[6] = set(a + b + d + e + f + g)\n mapping[9] = set(a + b + c + d + f + g)\n\n # Build the value of the output number in num using the known mappings\n num = 0\n for i in outputStrings:\n for k, v in mapping.items():\n if v == i:\n num = 10 * num + k\n total += num\n\nprint(total)\n \n","repo_name":"benchittle/advent-of-code-2021","sub_path":"day08/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23914085021","text":"import tensorflow as tf\nimport numpy as np\n\nfrom functools import cached_property\nfrom typing import Any, Callable\n\nimport random\nimport os\nimport shutil\nimport mmap\nimport sys\n\nfrom math import ceil\n\n\nclass DataConverter:\n def serialize(self, item) -> bytes:\n raise NotImplementedError()\n\n def deserialize(self, raw) -> Any:\n raise NotImplementedError()\n\n @property\n def item_size(self) -> int:\n raise NotImplementedError()\n\n\nclass DQNConverter(DataConverter):\n def __init__(self, height: int, width: int, agent_history_length: int):\n super().__init__()\n self.height = height\n self.width = width\n self.agent_history_length = agent_history_length\n\n @cached_property\n def item_size(self) -> int:\n seq_size = self.height * self.width * self.agent_history_length * 4\n action_size = 8\n reward_size = 4\n next_seq_size = self.height * self.width * self.agent_history_length * 4\n bool_size = 1\n return (seq_size + action_size + reward_size + next_seq_size +\n bool_size)\n\n def serialize(self, item):\n assert len(item) == 5, 'invalid item'\n seq, action, reward, next_seq, done = item\n seq_raw = np.array(seq, dtype=np.float32).tobytes()\n action_raw = np.array(action, dtype=np.int64).tobytes()\n reward_raw = np.array(reward, dtype=np.float32).tobytes()\n next_seq_raw = np.array(next_seq, dtype=np.float32).tobytes()\n done_raw = np.array(done, dtype=np.bool).tobytes()\n return b''.join(\n [seq_raw, action_raw, reward_raw, next_seq_raw, done_raw])\n\n def deserialize(self, raw):\n assert len(raw) == self.item_size, 'invalid raw data'\n seq_size = self.height * self.width * self.agent_history_length * 4\n\n start, end = 0, seq_size\n seq_raw = raw[start:end]\n\n start, end = end, (end + 8)\n action_raw = raw[start:end]\n\n start, end = end, (end + 4)\n reward_raw = raw[start:end]\n\n start, end = end, (end + seq_size)\n next_seq_raw = raw[start:end]\n\n start, end = end, (end + 1)\n done_raw = raw[start:end]\n\n return (\n np.frombuffer(seq_raw, dtype=np.float32).reshape(\n (self.height, self.width, self.agent_history_length)),\n np.frombuffer(action_raw, dtype=np.int64).reshape(()),\n np.frombuffer(reward_raw, dtype=np.float32).reshape(()),\n np.frombuffer(next_seq_raw, dtype=np.float32).reshape(\n (self.height, self.width, self.agent_history_length)),\n np.frombuffer(done_raw, dtype=np.bool).reshape(()),\n )\n\n\nclass ReplayBuffer:\n def __init__(self, path: str, capacity: int, converter: DataConverter):\n self.path = path\n self.capacity = capacity\n self.converter = converter\n self.file = None\n self.data = None\n self.index = None\n self.size = None\n\n @cached_property\n def item_size(self):\n return self.converter.item_size\n\n @cached_property\n def raw_size(self):\n return 12 + self.capacity * self.item_size\n\n def initialize(self):\n total, used, free = shutil.disk_usage(os.path.dirname(self.path))\n assert free >= self.raw_size, 'not enough disk space'\n\n if sys.platform.startswith('linux'):\n r = os.system(f'fallocate -l {self.raw_size} {self.path}')\n elif sys.platform == 'darwin':\n r = os.system(f'mkfile -n {self.raw_size} {self.path}')\n else:\n raise Exception('Unexpected platform')\n\n assert r == 0, 'initialization failed'\n\n with open(self.path, 'r+b') as f:\n fn = f.fileno()\n mm = mmap.mmap(fn, 0)\n mm[:4] = np.array(self.capacity, dtype=np.int32).tobytes()\n mm[4:8] = np.array(0, dtype=np.int32).tobytes()\n mm[8:12] = np.array(0, dtype=np.int32).tobytes()\n mm.close()\n\n def open(self):\n try:\n self.file = open(self.path, 'r+b')\n self.data = mmap.mmap(self.file.fileno(), 0)\n self.data.madvise(mmap.MADV_RANDOM)\n assert np.frombuffer(\n self.data[:4],\n dtype=np.int32) == self.capacity, ('capacity does not match')\n\n self.index = np.frombuffer(self.data[4:8], dtype=np.int32).reshape(\n ())\n assert self.index >= 0 and self.index < self.capacity, (\n 'index is out of bounds')\n\n self.size = np.frombuffer(self.data[8:12], dtype=np.int32).reshape(\n ())\n assert self.size >= 0 and self.size <= self.capacity, (\n 'current size is out of bounds')\n except Exception:\n self.close()\n raise\n\n def close(self):\n if self.data is not None:\n self.data.close()\n if self.file is not None:\n self.file.close()\n self.index = None\n self.size = None\n\n def _write_to_index(self, x, index):\n start_index = 12 + self.item_size * index\n end_index = start_index + self.item_size\n self.data[start_index:end_index] = self.converter.serialize(x)\n\n def _read_from_index(self, index):\n start_index = 12 + self.item_size * index\n end_index = start_index + self.item_size\n return self.converter.deserialize(self.data[start_index:end_index])\n\n def _update_index(self, index):\n self.data[4:8] = np.array(index, dtype=np.int32).tobytes()\n self.index = index\n\n def _update_size(self, size):\n self.data[8:12] = np.array(size, dtype=np.int32).tobytes()\n self.size = size\n\n def append(self, x):\n assert self.capacity > 0\n self._write_to_index(x, self.index)\n self._update_index((self.index + 1) % self.capacity)\n if self.size < self.capacity:\n self._update_size(self.size + 1)\n\n def sample(self):\n buffer_size = int(self.size)\n return self._read_from_index(np.random.choice(buffer_size, (), False))\n\n def sample_batch(self, batch_size):\n samples = [\n self._read_from_index(x) for x in np.random.choice(\n self.size, min(self.size, batch_size), False)\n ]\n return tuple(tf.convert_to_tensor(np.stack(x)) for x in zip(*samples))\n","repo_name":"enceladus-rex/dqn","sub_path":"dqn/memory.py","file_name":"memory.py","file_ext":"py","file_size_in_byte":6342,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"33407834774","text":"import xml.etree.cElementTree as ET\nfrom xml.etree.ElementTree import ElementTree, Element\nimport collections\n\n\nEDIT_CLASS = \"android.widget.EditText\"\nACTION_EDIT = \"edit\"\nACTION_CLICK = \"click\"\nACTION_LONG_CLICK = \"long_click\"\nACTION_SCROLL_LEFT_TO_RIGHT = \"scroll_left_to_right\"\nACTION_SCROLL_RIGHT_TO_LEFT = \"scroll_right_to_left\"\nACTION_SCROLL_UP_TO_DOWN = \"scroll_up_to_down\"\nACTION_SCROLL_DOWN_TO_UP = \"scroll_down_to_up\"\nACTION_UIAUTOMATOR = \"uiautomator\"\nACTION_INTENT = \"intent\"\nACTION_KEY_EVENT = \"key\"\nACTION_BACK_EVENT = \"back\"\n\n\nclass Point:\n def __init__(self, left_x, left_y, right_x, right_y):\n self.left_x = int(left_x)\n self.left_y = int(left_y)\n self.right_x = int(right_x)\n self.right_y = int(right_y)\n\n self.x = (self.left_x + self.right_x) / 2\n self.y = (self.left_y + self.right_y) / 2\n\n @property\n def str(self):\n return f'[%s,%s][%s,%s]' %(self.left_x, self.left_y, self.right_x, self.right_y)\n\n\nclass Node:\n def __init__(self):\n self.index = 0\n self.text = \"\"\n self.resource_id = \"\"\n self.class_ = \"\"\n self.package = \"\"\n self.content_desc = \"\"\n self.checkable = False\n self.checked = False\n self.clickable = False\n self.enabled = False\n self.focusable = False\n self.focused = False\n self.scrollable = False\n self.long_clickable = False\n self.password = False\n self.selected = False\n self.visible_to_user = False\n self.bounds = Point(0, 0, 0, 0)\n self.children = []\n self.parent = None\n\n @property\n def str(self):\n return f'' % \\\n (self.index, self.text, self.class_, self.resource_id, self.package, self.content_desc, self.clickable)\n\n @staticmethod\n def get_nodes_from_tree(tree):\n q = collections.deque()\n q.append(tree)\n nodes = []\n while q:\n node = q.pop()\n actions = Node.get_actions_from_node(node)\n if actions:\n nodes.append(node)\n\n for child in node.children:\n q.append(child)\n\n return nodes\n\n @staticmethod\n def get_actions_from_node(node):\n actions = []\n class_ = node.class_\n clickable = node.clickable\n long_clickable = node.long_clickable\n scrollable = node.long_clickable\n\n # if class_ == EDIT_CLASS:\n # actions.append(ACTION_EDIT)\n if clickable:\n actions.append(ACTION_CLICK)\n elif long_clickable:\n actions.append(ACTION_LONG_CLICK)\n elif scrollable:\n actions.append(ACTION_SCROLL_LEFT_TO_RIGHT)\n actions.append(ACTION_SCROLL_UP_TO_DOWN)\n actions.append(ACTION_SCROLL_RIGHT_TO_LEFT)\n actions.append(ACTION_SCROLL_DOWN_TO_UP)\n return actions\n","repo_name":"uplk/CrashDect","sub_path":"utils/tree/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":3081,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37883230751","text":"#!/usr/bin/env python3\n\nimport os\nimport json\nimport regex as re\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom gyroOrAccl import GyroOrAccel\n\n# sample file strings for parsing\nfile_1 = \"log_00_01_02_00_04.txt\"\nfile_2 = \"log_00_02_03_01_04.txt\"\nfile_3 = \"log_01_11_02_00_04.txt\"\nfile_4 = \"log_00_02_03_01_07.txt\"\n\n## For the purpose of group_by:\nPOSITIVE = 1\nSCENARIO = 2\nTRIAL = 3\nDEVICE = 4\nPERSON = 5\n\npositives = [\"Not fall\", \"Fall\"]\n\nscenarios = [\"Sitting slow\", \"Sitting fast\", \"Cheer up only\", \"Cheer up and down\",\n \"Punch out only\", \"Punch out and in\", \"Squat sit\", \"Dab\",\n \"Walking down single step\", \"Walking down 5 steps\", \"Front fall no catch\", \"Front fall catch\",\n \"Side fall no catch\", \"Side fall catch\", \"Back fall no roll\", \"Back fall with roll\",\n \"Pushed fall front\", \"Pushed fall side\", \"Jump fall\", \"Front knee fall\",\n \"Dive roll\", \"Uncategorized fall\"]\n\ndevices = [\"Natalia's phone\", \"Cho Yin's phone\"]\n\npeople = [\"Natalia\", \"Cho Yin\", \"Lucy\", \"Poorvaja\", \n \"Catherine\", \"Roberto\", \"Han Bin\", \"Andre\", \n \"Clay\", \"Annalie\", \"Karlyn\", \"Najee\",\n \"Gamal\", \"Daniel\", \"Gaibo\", \"Aaron\"]\n\n# convert positive number to string\ndef positive_to_string(pos_index):\n if pos_index in [0,1]:\n return positives[pos_index]\n else:\n raise ValueError(\"Positive value must be 0 or 1\")\n\n# convert scenario number to string\ndef scenario_to_string(scenario_index):\n if 0 <= scenario_index < 22:\n return scenarios[scenario_index] \n else:\n raise ValueError(\"Invalid scenario value\")\n\n# convert device number to string\ndef device_to_string(device_index):\n if 0 <= device_index < 2:\n return devices[device_index]\n else:\n raise ValueError(\"Invalid device value\")\n\n# convert person number to string\ndef person_to_string(person_index):\n if 0 <= person_index < 16:\n return people[person_index]\n else:\n raise ValueError(\"Invalid person value\")\n\n# convert dictionary of categories to array of strings\ndef cat_dict_to_string_dict(cat_dict):\n ret_arr = []\n for cat in cat_dict:\n if cat == POSITIVE:\n ret_arr.append(positive_to_string(cat_dict[cat]))\n elif cat == SCENARIO:\n ret_arr.append(scenario_to_string(cat_dict[cat]))\n elif cat == DEVICE:\n ret_arr.append(device_to_string(cat_dict[cat]))\n elif cat == PERSON:\n ret_arr.append(person_to_string(cat_dict[cat]))\n elif cat == TRIAL:\n ## TODO\n pass\n else:\n raise ValueError\n return ret_arr\n\n# convert dictionary of categories to a single string label\ndef cat_dict_to_string(cat_dict):\n string_dict = cat_dict_to_string_dict(cat_dict)\n return \"; \".join(string_dict)\n\n# categorize a file by types\ndef categorize_file(filename, group_by=[POSITIVE]):\n regex = \"log_(\\d+)_(\\d+)_(\\d+)_(\\d+)_(\\d+).txt\"\n m = re.match(regex, filename)\n positive = m.group(1)\n scenario = m.group(2)\n trial = m.group(3)\n device = m.group(4)\n person = m.group(5)\n cat_dict = {}\n for cat in group_by:\n cat_dict[cat] = int(m.group(cat))\n return cat_dict\n\n## separate a list of files by label types\ndef separate_files(files, group_by=[POSITIVE]):\n ret = []\n for filename in files:\n cat_dict = categorize_file(filename, group_by=group_by)\n added = False\n for ret_dict in ret:\n if ret_dict[\"categories\"] == cat_dict:\n ret_dict[\"files\"].append(filename)\n added = True\n break\n if not added:\n label = cat_dict_to_string(cat_dict)\n new_dict = {\"categories\": cat_dict, \"label\": label, \"files\": [filename]}\n ret.append(new_dict)\n return ret\n\n# separate files by detected type\ndef sep_files_by_fall(folder, extra=[True, True], timeInterval=1.5, timeIntNeg=0.01, negPeakHeight=5, minPeakHeight=16, window_length=41, polyorder=12, thres=0.5, group_by=[POSITIVE]):\n mult_falls = []\n one_fall = []\n no_falls = []\n for file in os.listdir(folder):\n filepath = os.path.join(folder, file)\n with open(filepath) as f:\n data = f.read().split(\"\\n\")\n abso = GyroOrAccel(\"Absolute\", data, file)\n fall_res = abso.is_fall(extra, timeInterval, timeIntNeg, negPeakHeight, minPeakHeight, window_length, polyorder, thres)\n if len(fall_res) == 0:\n no_falls.append(file)\n elif len(fall_res) == 1:\n one_fall.append(file)\n else:\n mult_falls.append(file)\n return {\"one detected\": one_fall,\n \"multiple detected\": mult_falls,\n \"none detected\": no_falls}\n\n# separate files by label and detected\ndef sep_files_by_type_and_fall(folder, extra=[True, True], timeInterval=1.5, timeIntNeg=0.01, negPeakHeight=5, minPeakHeight=3, window_length=41, polyorder=12, thres=0.5, group_by=[POSITIVE]):\n num_peaks_dict = sep_files_by_fall(folder, extra=extra, timeInterval=timeInterval, timeIntNeg=timeIntNeg, negPeakHeight=negPeakHeight,\n minPeakHeight=minPeakHeight, window_length=window_length, polyorder=polyorder, thres=thres, group_by=group_by)\n ret_dict = {}\n for num_key in num_peaks_dict:\n files = num_peaks_dict[num_key]\n separated = separate_files(files, group_by=group_by)\n ret_dict[num_key] = separated\n return ret_dict\n\n# get recall\ndef get_recall(folder, timeInterval=1.5, minPeakHeight=3):\n detection_dict = get_num_detected_each_cat(folder, timeInterval=timeInterval, minPeakHeight=minPeakHeight, group_by=[POSITIVE])\n total_falls = 0 \n for item in detection_dict[\"one detected\"]:\n if item[\"label\"] == \"Fall\":\n true_positives = item[\"num\"]\n for key in detection_dict:\n for item in detection_dict[key]:\n if item[\"label\"] == \"Fall\":\n total_falls += item[\"num\"]\n return float(true_positives)/total_falls\n\n# get precision\ndef get_precision(folder, timeInterval=1.5, minPeakHeight=25):\n detection_dict = get_num_detected_each_cat(folder, timeInterval=timeInterval, minPeakHeight=minPeakHeight, group_by=[POSITIVE])\n total_trials = detection_dict[\"one detected\"][0][\"num\"] + detection_dict[\"one detected\"][1][\"num\"] \\\n + detection_dict[\"none detected\"][0][\"num\"] + detection_dict[\"none detected\"][1][\"num\"] \\\n + detection_dict[\"one detected\"][0][\"num\"] + detection_dict[\"one detected\"][1][\"num\"]\n for item in detection_dict[\"one detected\"]:\n if item[\"label\"] == \"Fall\":\n true_positives = item[\"num\"]\n return float(true_positives)/total_detections\n\n# get the number detected in each category\ndef get_num_detected_each_cat(folder, extra=[True, True], timeInterval=1.5, timeIntNeg=0.01, negPeakHeight=5, minPeakHeight=3, window_length=41, polyorder=12, thres=0.5, group_by=[POSITIVE]):\n separated = sep_files_by_type_and_fall(folder, extra=extra, timeInterval=timeInterval, timeIntNeg=timeIntNeg, negPeakHeight=negPeakHeight,\\\n minPeakHeight=minPeakHeight, window_length=window_length, polyorder=polyorder, thres=thres, group_by=group_by)\n ret_dict = {}\n for num_key in separated:\n ret_dict[num_key] = []\n for item in separated[num_key]:\n item_dict = {\"label\": item[\"label\"], \"num\": len(item[\"files\"])}\n ret_dict[num_key].append(item_dict)\n return ret_dict\n\n# print the number detected in each category\ndef print_num_detected_each_cat(folder, extra=[True, True], timeInterval=1.5, timeIntNeg=0.01, negPeakHeight=5, minPeakHeight=16, window_length=41, polyorder=12, thres=0.5, group_by=[POSITIVE]):\n separated = sep_files_by_type_and_fall(folder, extra, timeInterval, timeIntNeg, negPeakHeight, minPeakHeight, window_length, polyorder, thres)\n false_pos = 0\n false_neg = 0\n truth_pos = 0\n truth_neg = 0\n for num_key in separated:\n # print(num_key)\n for item in separated[num_key]:\n label = item[\"label\"]\n total = len(item[\"files\"])\n if num_key == \"none detected\":\n if label == \"Not fall\":\n truth_neg += total\n elif label == \"Fall\":\n false_neg += total\n elif num_key == \"one detected\" or num_key == \"multiple detected\":\n if label == \"Not fall\":\n false_pos += total\n elif label == \"Fall\":\n truth_pos += total\n # print(\"\\t%s: %d\" % (label, total))\n sum = false_pos + false_neg + truth_pos + truth_neg\n print(\"false_pos: %s, false neg: %s, true pos: %s, true neg: %s\" % (false_pos, false_neg, truth_pos, truth_neg))\n print(\"accuracy: %s, pres: %s, recall: %s\" % (((truth_pos + truth_neg)/sum), (truth_pos/(truth_pos + false_pos)), (truth_pos/(truth_pos + false_neg))))\n return separated\n\n# get recall and precision\n# return -1 for not enough data\ndef get_recall_and_precision(folder, timeInterval=1.5, minPeakHeight=3, group_by=[POSITIVE], phrase=\"\", extra=[True, True]):\n detection_dict = get_num_detected_each_cat(folder, timeInterval=timeInterval, minPeakHeight=minPeakHeight, group_by=group_by, extra=extra)\n # print(json.dumps(detection_dict, indent=4))\n total_falls = 0\n true_positives = 0\n for item in detection_dict[\"one detected\"]:\n if \"Fall\" in item[\"label\"] and phrase in item[\"label\"]:\n true_positives = item[\"num\"]\n if true_positives == 0:\n return -1., -1.\n for key in detection_dict:\n for item in detection_dict[key]:\n if \"Fall\" in item[\"label\"] and phrase in item[\"label\"]:\n total_falls += item[\"num\"]\n recall = float(true_positives)/total_falls\n total_detections = detection_dict[\"one detected\"][0][\"num\"] + detection_dict[\"one detected\"][1][\"num\"]\n for item in detection_dict[\"one detected\"]:\n if \"Fall\" in item[\"label\"] and phrase in item[\"label\"]:\n true_positives = item[\"num\"]\n precision = float(true_positives)/total_detections\n return recall, precision\n\n# get accuracy, recall, and precision\ndef get_accuracy(folder, timeInterval=1.5, minPeakHeight=3, group_by=[POSITIVE], phrase=\"\", extra=[True, True]):\n detection_dict = get_num_detected_each_cat(folder, timeInterval=timeInterval, minPeakHeight=minPeakHeight, group_by=group_by, extra=extra)\n # print(json.dumps(detection_dict, indent=4))\n true_positives = 0\n true_negatives = 0\n false_positives = 0\n false_negatives = 0\n for item in detection_dict[\"one detected\"]:\n if phrase in item[\"label\"]:\n if \"Fall\" in item[\"label\"]:\n true_positives = item[\"num\"]\n elif \"Not fall\" in item[\"label\"]:\n false_positives = item[\"num\"]\n for item in detection_dict[\"none detected\"]:\n if phrase in item[\"label\"]:\n if \"Not fall\" in item[\"label\"]:\n true_negatives = item[\"num\"] \n elif \"Fall\" in item[\"label\"]:\n false_negatives = item[\"num\"]\n total = true_negatives + true_positives + false_negatives + false_positives\n try:\n accuracy = float(true_negatives + true_positives) / total\n except:\n accuracy = -1.\n try:\n precision = float(true_positives) / (true_positives + false_positives)\n except:\n precision = -1.\n try:\n recall = float(true_positives) / (true_positives + false_negatives)\n except:\n recall = -1.\n return accuracy, precision, recall\n\n# get the fraction correct by category\ndef get_truth_by_category(folder, categories, cat_type, first_fall_cat, minPeakHeight=3, extra=[False, True]):\n detection_dict = get_num_detected_each_cat(folder, timeInterval=1.5, minPeakHeight=minPeakHeight, group_by=[POSITIVE, cat_type], extra=extra)\n pos_dict = {}\n neg_dict = {}\n for cat_idx in range(len(categories)):\n cat = categories[cat_idx]\n if cat_idx < first_fall_cat:\n true_key = \"none detected\"\n false_key = \"one detected\"\n else:\n true_key = \"one detected\"\n false_key = \"none detected\"\n true_list = detection_dict[true_key]\n false_list = detection_dict[false_key]\n correct_num = 0\n incorrect_num = 0\n for item in true_list:\n if cat in item[\"label\"]:\n correct_num = item[\"num\"]\n break\n for item in false_list:\n if cat in item[\"label\"]:\n incorrect_num = item[\"num\"]\n break\n accuracy = float(correct_num) / (correct_num + incorrect_num)\n if cat_idx < first_fall_cat:\n neg_dict[cat] = accuracy\n else:\n pos_dict[cat] = accuracy\n return neg_dict, pos_dict\n\n# function for finding the best minPeakHeight\ndef optimize_recall_and_precision(folder):\n best_recall = 0\n best_precision = 0\n best_minPeakHeight = 0\n best_overall_precision = 0\n best_ovearll_precision_minPeakHeight = 0\n best_overall_recall = 0\n best_overall_recall_minPeakHeight = 0\n for minPeakHeight in range(50):\n print(\"minHeight: %d\" % (minPeakHeight))\n print(\"best_minPeakHeight: %d, best recall: %f, best precision: %f\" % (best_minPeakHeight, best_recall, best_precision))\n print(\"best_overall_recall: %f (%d), best_overall_precision: %f (%d)\" % (best_overall_recall, best_overall_recall_minPeakHeight, best_overall_precision, best_ovearll_precision_minPeakHeight))\n recall, precision = get_recall_and_precision(folder, timeInterval=1.5, minPeakHeight=minPeakHeight)\n print(\"recall: %f, precision: %f\" % (recall, precision))\n if recall + precision > best_recall + best_precision:\n best_recall = recall\n best_precision = precision\n best_minPeakHeight = minPeakHeight\n if recall > best_overall_recall:\n best_overall_recall = recall\n best_overall_recall_minPeakHeight = minPeakHeight\n if precision > best_overall_precision:\n best_overall_precision = precision\n best_ovearll_precision_minPeakHeight = minPeakHeight\n return best_minPeakHeight, best_recall, best_precision\n\n# get dictionaries for accuracy, precision, and recall\ndef get_accuracy_dict(folder, categories, cat_type, minPeakHeight=3, extra=[False, True]):\n acc_dict = {}\n prec_dict = {}\n rec_dict = {}\n for cat in categories:\n accuracy, precision, recall = get_accuracy(folder, phrase=cat, minPeakHeight=minPeakHeight, group_by=[POSITIVE, cat_type], extra=extra)\n acc_dict[cat] = accuracy\n prec_dict[cat] = precision\n rec_dict[cat] = recall\n return acc_dict, prec_dict, rec_dict\n\n# plot accuracy by category\ndef plot_results():\n folder = \"finaldata\"\n categorize_file(file_1)\n minPeakHeight = 16\n extra = [False, True]\n neg_dict, pos_dict = get_truth_by_category(folder, scenarios, SCENARIO, 10, extra=extra)\n\n for D in [neg_dict, pos_dict]:\n plt.bar(range(len(D)), list(D.values()), align='center')\n locs, labels = plt.xticks(range(len(D)), list(D.keys()))\n plt.setp(labels, rotation=30)\n if D == neg_dict:\n title = \"Proportion of actions with no false positive\"\n else:\n title = \"Proportion of actions with accurate detection\"\n plt.title(title)\n plt.show()\n\n# helper for plotting accuracy, precision, and recall\ndef plot_acc_helper(keys, acc, precis, recall):\n N = len(keys) \n\n ind = np.arange(N) # the x locations for the groups\n width = 0.24 # the width of the bars\n\n fig, ax = plt.subplots()\n rects1 = ax.bar(ind, acc, width, color='#3061b2')\n rects2 = ax.bar(ind + width, precis, width, color='#4286f4')\n rects3 = ax.bar(ind + 2 * width, recall, width, color='#7fafff')\n\n # add some text for labels, title and axes ticks\n ax.set_ylabel('Scores')\n ax.set_title('Accuracy, precision, and recall')\n ax.set_xticks(ind + width / 2)\n ax.set_xticklabels(keys)\n\n ax.legend((rects1[0], rects2[0], rects3[0]), ('Accuracy', 'Precision', 'Recall'))\n\n plt.show()\n\n# plot accuracy, precision, and recall with a specific type\ndef plot_acc_precis_recall(folder, categories, cat_type, minPeakHeight=3, extra=[False,True]):\n acc_dict, prec_dict, rec_dict = get_accuracy_dict(folder, categories, cat_type, minPeakHeight=minPeakHeight, extra=extra)\n keys = acc_dict.keys()\n acc = acc_dict.values()\n precis = prec_dict.values()\n recall = rec_dict.values() \n\n plot_acc_helper(keys, acc, precis, recall)\n\n# plot differences between algorithms\ndef plot_algo_differences(folder, minPeakHeight=3):\n keys = [\"Algo 1 and 2\", \"Algo 1 only\", \"Algo 2 only\", \"Original algorithm\"]\n accs = []\n precis = []\n recalls = []\n for extra in [[True, True], [True, False], [False, True], [False, False]]:\n accuracy, precision, recall = get_accuracy(folder, timeInterval=1.5, minPeakHeight=3, group_by=[POSITIVE], phrase=\"\", extra=extra)\n accs.append(accuracy)\n precis.append(precision)\n recalls.append(recall)\n\n plot_acc_helper(keys, accs, precis, recalls)\n\n\n\nif __name__ == '__main__':\n plot_results()\n folder = \"finaldata\"\n categories = people\n cat_type = PERSON\n acc_dict, prec_dict, rec_dict = get_accuracy_dict(folder, categories, cat_type, minPeakHeight=3, extra=[False, True])\n print(acc_dict)\n print(prec_dict)\n print(rec_dict)\n\n folder = \"finaldata\"\n categories = [\"Lucy\", \"Cho Yin\", \"Najee\", \"Karlyn\"]\n cat_type = PERSON\n plot_acc_precis_recall(folder, categories, cat_type, minPeakHeight=3, extra=[False, True])\n\n plot_algo_differences(folder)\n","repo_name":"kongcho/CS23400","sub_path":"Project/evaluate.py","file_name":"evaluate.py","file_ext":"py","file_size_in_byte":17629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8555500367","text":"import math\r\n\r\n\r\ndef count_zeros():\r\n '''\r\n Определение точности числа\r\n :return: zeros - количество чисел, после запятой, eps - точность вычислений\r\n '''\r\n eps = input('Введите точность ')\r\n zeros = int(eps.count('0', 0, len(eps)))\r\n eps = float(eps)\r\n return zeros, eps\r\n\r\n\r\ndef count_func(x, eps):\r\n '''\r\n Вычисление результата функции\r\n :param eps: точность вычисления\r\n :param x: аргумент функции\r\n :return: результат вычисления функции\r\n '''\r\n return round(2 - math.sqrt(x ** 3) - 2 * math.log(x), eps)\r\n\r\n\r\ndef count_first_derivative(x, eps):\r\n '''\r\n Вычисление первой производной\r\n :param eps: точность вычисления\r\n :param x: аргумент функции\r\n :return: результат вычисления\r\n '''\r\n return round(-3 * math.sqrt(x ** 3)/(2 * x) - 2 / x, eps)\r\n\r\n\r\ndef count_second_derivative(x, eps):\r\n '''\r\n Вычисление первой производной\r\n :param eps: точность вычисления\r\n :param x: аргумент функции\r\n :return: результат вычисления\r\n '''\r\n return round(-3 * math.sqrt(x ** 3)/(4 * x) + 2 / (x * 2), eps)\r\n\r\n\r\nz, e = count_zeros()\r\n\r\nxn = float(input('Введите x0 '))\r\nprint(f\"0 {xn:.{z}f}\")\r\ndiff = 1\r\n\r\ni = 1\r\nwhile e < diff:\r\n xn_new = xn\r\n try:\r\n temp = 2 * (count_first_derivative(xn, z) ** 2) - count_func(xn, z) * count_second_derivative(xn, z)\r\n xn_new = xn - ((2 * count_func(xn, z) * count_first_derivative(xn, z)) / temp)\r\n except:\r\n exit(0)\r\n diff = abs(xn_new - xn)\r\n xn = xn_new\r\n print(i, f\" {xn:.{z}f}\", f\" {diff:.{z}f}\")\r\n i += 1\r\n","repo_name":"EternityJR/uni","sub_path":"Lab2_VM/Lab3_VM.py","file_name":"Lab3_VM.py","file_ext":"py","file_size_in_byte":1903,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"75001434705","text":"import cv2\nimport pandas as pd\nfrom ultralytics import YOLO\nfrom tracker import *\n\nmodel = YOLO('yolov8s.pt')\n\n# Function to check if a point crosses a line\ndef is_crossing_line(point, cy, offset):\n return abs(point[1] - cy) < offset\n\nvideo_file = \"WhatsApp Video 2023-10-29 at 12.33.10_5aa8e9bb.mp4\"\ncap = cv2.VideoCapture(video_file)\n\nmy_file = open(\"coco.txt\", \"r\")\ndata = my_file.read()\nclass_list = data.split(\"\\n\")\n\n# Define a list of COCO classes that correspond to vehicles\nvehicle_classes = ['car', 'truck', 'bus', 'motorcycle', 'bicycle']\n\ntracker = Tracker()\ncy1 = 194\ncy2 = 220\noffset = 6\n\nwhile True:\n ret, frame = cap.read()\n if not ret:\n break\n\n frame = cv2.resize(frame, (1020, 500))\n\n results = model.predict(frame)\n a = results[0].boxes.data\n px = pd.DataFrame(a).astype(\"float\")\n vehicle_list = []\n\n for index, row in px.iterrows():\n x1 = int(row[0])\n y1 = int(row[1])\n x2 = int(row[2])\n y2 = int(row[3])\n d = int(row[5])\n\n c = class_list[d]\n if c in vehicle_classes:\n vehicle_list.append([x1, y1, x2, y2])\n\n bbox_id = tracker.update(vehicle_list)\n for bbox in bbox_id:\n x1, y1, x2, y2, id = bbox\n cx = int(x1 + x2) // 2\n cy = int(y1 + y2) // 2\n cv2.circle(frame, (cx, cy), 4, (255, 0, 255), -1)\n\n # Check if the vehicle has crossed the lines\n if is_crossing_line((cx, cy), cy1, offset):\n # Handle the vehicle crossing the line here\n pass\n\n if is_crossing_line((cx, cy), cy2, offset):\n # Handle the vehicle crossing the other line here\n pass\n\n # ... (Rest of your code)\n\n cv2.imshow('frame', frame)\n\n # ... (Rest of your code)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"Tarun-reddy-27/yolov8vehicle-Copy2.1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70413241106","text":"from ase.spacegroup import crystal\nfrom ase.io import write\nfrom ase.build import molecule\n\n# just molecule\natoms = molecule('H2O')\n\n# crystal\nh2o_crystal = crystal(['H', 'H', 'O'],\n basis=[(0.336017, 0.336017, 0.696031),\n (0.460401, 0.460401, 0.511393),\n (0.334231, 0.334231, 0.555157)],\n spacegroup=1,\n cellpar=[7.50, 7.50, 7.06, 90, 90, 107.4],\n pbc=True)\n\n# check angle from H2O\nangle_2 = h2o_crystal.get_angle(1, 2, 0)\nprint('Angle between O-H in crystal:', angle_2)\n\nh2o_crystal *= (6, 6, 6)\n\ncif_file_path = f\"crystal_water_mol_test.cif\"\nwrite(cif_file_path, h2o_crystal)\n","repo_name":"tilde-lab/ml-playground","sub_path":"visualization_of_molecules/crystal_of_water.py","file_name":"crystal_of_water.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"22693747628","text":"from django.urls import path, include\nfrom .views import *\n\nurlpatterns = [\n path('list/', CartItemView.as_view()),\n path('add/', CartItemAddView.as_view()),\n path('delete//', CartItemDelView.as_view()),\n path('add_one//', CartItemAddOneView.as_view()),\n path('reduce_one//', CartItemReduceOneView.as_view()),\n]","repo_name":"Baiel1922/DjangoShop","sub_path":"cart/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"10322195671","text":"import warnings\nfrom typing import Dict, List, TypeVar\n\nimport maup\n\nA = TypeVar(\"A\")\nB = TypeVar(\"B\")\n\n\ndef unitmap(source, target) -> dict:\n \"\"\"\n Creates a mapping from source units to target units.\n\n Args:\n source (tuple): 2-tuple containing a `GeoDataFrame` and an index name corresponding\n to the unique identifiers of the units, e.g. `(vtds, \"GEOID20\")`.\n Unique identifiers will be keys in the resulting dictionary.\n target (tuple): 2-tuple containing a `GeoDataFrame` and an index name corresponding\n to the unique identifiers of the units, e.g. `(districts, \"DISTRICTN\")`.\n Unique identifiers will be values in the resulting dictionary.\n\n Returns:\n A dictionary mapping `_from` unique identifiers to `_to` unique identifiers.\n \"\"\"\n # Explode each of the tuples.\n source_shapes, source_index = source\n target_shapes, target_index = target\n\n # Get rid of all the unnecessary data and set the indices of each dataframe\n # to the specified indices.\n source_shapes = source_shapes[[source_index, \"geometry\"]].set_index(source_index)\n target_shapes = target_shapes[[target_index, \"geometry\"]].set_index(target_index)\n\n # Ensure we're in the same CRS.\n target_shapes = target_shapes.to_crs(source_shapes.crs)\n\n # Set a progress bar; filter out all warnings; create the mapping.\n maup.progress.enabled = True\n warnings.simplefilter(\"ignore\", UserWarning)\n warnings.simplefilter(\"ignore\", FutureWarning)\n mapping = maup.assign(source_shapes, target_shapes)\n\n # Reset the mapping's index, zip, and return.\n mapping = mapping.reset_index()\n l, r = \"l\", \"r\"\n mapping.columns = [l, r]\n\n return dict(zip(mapping[l], mapping[r]))\n\n\ndef invert(unitmap: Dict[A, B]) -> Dict[B, List[A]]:\n \"\"\"\n Inverts the provided unit mapping.\n\n Args:\n unitmap: Dictionary taking source unique identifiers to target unique\n identifiers.\n\n Returns:\n A dictionary mapping target unique identifiers to _lists_ of source\n unique identifiers.\n \"\"\"\n # Invert the dictionary.\n inverse: Dict[B, List[A]] = {}\n\n for s, t in unitmap.items():\n if inverse.get(t, None):\n inverse[t].append(s)\n else:\n inverse[t] = [s]\n\n return inverse\n","repo_name":"mggg/gerrytools","sub_path":"gerrytools/geometry/unitmap.py","file_name":"unitmap.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"42599432901","text":"import datetime\n\nimport pytest\nfrom django.contrib.auth import get_user_model\nfrom django.core.exceptions import ValidationError\nfrom django.db import IntegrityError\nfrom model_bakery import baker\n\nfrom apps.documents import models\n\nUser = get_user_model()\n\n\nclass TestDocumentModel:\n @pytest.fixture\n def john(self, db):\n return baker.make(User, username=\"john\")\n\n @pytest.fixture\n def marc(self, db):\n return baker.make(User, username=\"marc\")\n\n @pytest.fixture\n def pete(self, db):\n return baker.make(User, username=\"pete\")\n\n @pytest.fixture\n def document(self, john, marc, pete, db):\n document = models.Document.objects.create(\n name=\"Analysis 1 Theoriesammlung\",\n dtype=models.Document.DTypes.SUMMARY,\n description=\"Dieses Dokument ist eine Zusammenfassung der \\\n Theorie aus dem AnI1-Skript auf 8 Seiten. Das Dokument ist \\\n in LaTeX gesetzt, Source ist hier: http://j.mp/fjtleh - \\\n Gute Ergänzungen sind erwünscht!\",\n uploader=john,\n )\n document.DocumentRating.create(user=marc, rating=5)\n document.DocumentRating.create(user=pete, rating=2)\n return document\n\n def test_basic_properties(self, document):\n assert document.name == \"Analysis 1 Theoriesammlung\"\n assert document.description.startswith(\"Dieses Dokument\")\n assert document.dtype == models.Document.DTypes.SUMMARY\n assert not document.public\n assert isinstance(document.uploader, User)\n\n def test_upload_date(self, document):\n \"\"\"Check whether upload date has been set.\"\"\"\n assert isinstance(document.upload_date, datetime.datetime)\n\n def test_rating_average(self, document):\n \"\"\"Test the document rating average calculation.\"\"\"\n assert document.DocumentRating.count() == 2\n assert document.rating() == 4\n\n @pytest.mark.parametrize(\"rating\", [11, 0])\n def test_rating_validation(self, document, marc, rating):\n dr = models.DocumentRating.objects.get(document=document, user=marc)\n dr.rating = rating\n with pytest.raises(ValidationError):\n dr.full_clean()\n\n def test_rating_author_validation(self, document, john):\n \"\"\"A user may not rate his own uploads.\"\"\"\n dr = models.DocumentRating(document=document, user=john)\n with pytest.raises(ValidationError):\n dr.full_clean()\n\n def test_duplicate_ratings_validation(self, document, marc):\n \"\"\"A user cannot rate the same document twice.\"\"\"\n dr = models.DocumentRating(document=document, user=marc)\n with pytest.raises(IntegrityError):\n dr.save()\n\n @pytest.mark.django_db\n def test_null_value_uploader(self):\n d = models.Document()\n d.name = \"spam\"\n d.description = \"ham\"\n d.dtype = models.Document.DTypes.SUMMARY\n try:\n d.save()\n except IntegrityError:\n pytest.fail(\n \"A document with no uploader should not throw an IntegrityError.\"\n )\n\n def test_download_count(self, document):\n models.DocumentDownload.objects.create(document=document)\n models.DocumentDownload.objects.create(document=document)\n models.DocumentDownload.objects.create(document=document)\n assert document.downloadcount() == 3\n\n @pytest.mark.django_db\n def test_license_details_cc(self):\n \"\"\"Test the details of a CC license.\"\"\"\n summary = models.Document.DTypes.SUMMARY\n doc1 = models.Document.objects.create(\n name=\"CC-BY doc\", dtype=summary, license=models.Document.LICENSES.cc3_by\n )\n doc2 = models.Document.objects.create(\n name=\"CC-BY-NC-SA doc\",\n dtype=summary,\n license=models.Document.LICENSES.cc3_by_nc_sa,\n )\n assert doc1.get_license_display() == \"CC BY 3.0\"\n assert doc2.get_license_display() == \"CC BY-NC-SA 3.0\"\n details1 = doc1.license_details()\n details2 = doc2.license_details()\n assert details1[\"url\"] == \"http://creativecommons.org/licenses/by/3.0/deed.de\"\n assert details1[\"icon\"] == \"http://i.creativecommons.org/l/by/3.0/80x15.png\"\n assert (\n details2[\"url\"]\n == \"http://creativecommons.org/licenses/by-nc-sa/3.0/deed.de\"\n )\n assert (\n details2[\"icon\"] == \"http://i.creativecommons.org/l/by-nc-sa/3.0/80x15.png\"\n )\n\n @pytest.mark.django_db\n def test_license_details_pd(self):\n \"\"\"Test the details of a PD (CC0) license.\"\"\"\n doc = models.Document.objects.create(\n name=\"PD doc\",\n dtype=models.Document.DTypes.SUMMARY,\n license=models.Document.LICENSES.pd,\n )\n details = doc.license_details()\n assert doc.get_license_display() == \"Public Domain\"\n assert (\n details[\"url\"] == \"http://creativecommons.org/publicdomain/zero/1.0/deed.de\"\n )\n assert details[\"icon\"] == \"http://i.creativecommons.org/p/zero/1.0/80x15.png\"\n\n @pytest.mark.django_db\n def test_license_details_none(self):\n \"\"\"Test the details of a document without a license.\"\"\"\n doc = models.Document.objects.create(\n name=\"PD doc\", dtype=models.Document.DTypes.SUMMARY\n )\n details = doc.license_details()\n assert doc.get_license_display() is None\n assert details[\"url\"] is None\n assert details[\"icon\"] is None\n\n\n@pytest.mark.django_db\ndef test_user_model():\n \"\"\"Test whether the custom name function returns the correct string.\"\"\"\n john = User.objects.create(username=\"john\")\n marc = User.objects.create(username=\"marc\", first_name=\"Marc\")\n pete = User.objects.create(username=\"pete\", last_name=\"Peterson\")\n mike = User.objects.create(username=\"mike\", first_name=\"Mike\", last_name=\"Müller\")\n assert john.name() == \"john\"\n assert marc.name() == \"Marc\"\n assert pete.name() == \"Peterson\"\n assert mike.name() == \"Mike Müller\"\n","repo_name":"studentenportal/web","sub_path":"tests/documents/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":6074,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"48"} +{"seq_id":"3740516106","text":"\"\"\"\nTests for the replication system in ``_zkapauthorizer.replicate``.\n\"\"\"\n\nfrom base64 import b64encode, urlsafe_b64encode\nfrom functools import partial\nfrom io import BytesIO\nfrom operator import attrgetter\nfrom os import urandom\nfrom sqlite3 import OperationalError, ProgrammingError, connect\nfrom typing import IO, Callable, Optional\n\nfrom attrs import frozen\nfrom compose import compose\nfrom eliot import start_action\nfrom fixtures import TempDir\nfrom hypothesis import given\nfrom hypothesis.strategies import lists, text\nfrom tahoe_capabilities import (\n DirectoryReadCapability,\n ReadCapability,\n danger_real_capability_string,\n)\nfrom testtools import TestCase\nfrom testtools.matchers import (\n AfterPreprocessing,\n Always,\n Contains,\n Equals,\n HasLength,\n IsInstance,\n MatchesAll,\n MatchesDict,\n MatchesStructure,\n Mismatch,\n Not,\n raises,\n)\nfrom testtools.matchers._higherorder import MismatchesAll\nfrom testtools.twistedsupport import succeeded\nfrom twisted.internet.defer import Deferred\nfrom twisted.python.filepath import FilePath\n\nfrom ..config import REPLICA_RWCAP_BASENAME\nfrom ..eliot import log_call\nfrom ..model import RandomToken, VoucherStore, aware_now\nfrom ..replicate import (\n EventStream,\n Replica,\n get_events,\n get_tahoe_lafs_direntry_lister,\n get_tahoe_lafs_direntry_pruner,\n get_tahoe_lafs_direntry_replica,\n replication_service,\n with_replication,\n)\nfrom ..spending import SpendingController\nfrom ..sql import Cursor\nfrom ..tahoe import DataProvider, DirectoryEntry, FileNode, ITahoeClient, MemoryGrid\nfrom .common import delayedProxy, from_awaitable\nfrom .fixtures import TemporaryVoucherStore\nfrom .matchers import Matcher, returns\n\n# Helper to construct the replication wrapper without immediately enabling\n# replication.\nwith_postponed_replication = partial(with_replication, enable_replication=False)\n\n\n@frozen\nclass CountBasedPolicy:\n \"\"\"\n A snapshot policy that is based only on the number of files in the\n replica.\n\n :ivar replica_file_limit: The maximum number of files which will be\n allowed to exist in the replica before a snapshot is indicated.\n \"\"\"\n\n replica_file_limit: int\n\n def should_snapshot(self, snapshot_size: int, replica_sizes: list[int]) -> bool:\n return len(replica_sizes) >= self.replica_file_limit\n\n\nnaive_policy = CountBasedPolicy(replica_file_limit=1000)\n\n\nclass ReplicationConnectionTests(TestCase):\n \"\"\"\n Tests for the SQLite3 connection-like object returned by\n ``with_replication``.\n \"\"\"\n\n def test_close(self) -> None:\n \"\"\"\n The connection object and its cursors can be closed.\n \"\"\"\n conn = with_postponed_replication(connect(\":memory:\"))\n cursor = conn.cursor()\n cursor.close()\n self.assertThat(\n lambda: cursor.execute(\"SELECT 1\", ()),\n raises(ProgrammingError),\n )\n conn.close()\n expected = ProgrammingError\n try:\n with conn:\n pass\n except expected:\n pass\n except BaseException as e:\n self.fail(f\"using connection after close, {e} raised instead of {expected}\")\n else:\n self.fail(\n f\"using connection after close, nothing raised instead of {expected}\"\n )\n\n def test_context_manager_success(self) -> None:\n \"\"\"\n The connection object is a context manager that commits the transaction\n when the managed block completes normally.\n \"\"\"\n dbpath = self.useFixture(TempDir()).join(\"db.sqlite\")\n conn = with_postponed_replication(connect(dbpath))\n with conn:\n cursor: Cursor = conn.cursor()\n cursor.execute(\"BEGIN\", ())\n cursor.execute('CREATE TABLE \"foo\" (\"a\" INT)', ())\n cursor.execute('INSERT INTO \"foo\" VALUES (?)', (42,))\n\n db = connect(dbpath)\n cursor = db.cursor()\n cursor.execute('SELECT \"a\" FROM foo', ())\n self.assertThat(\n cursor.fetchall(),\n Equals([(42,)]),\n )\n\n def test_context_manager_exception(self) -> None:\n \"\"\"\n The connection object is a context manager that rolls the transaction back\n when the managed block raises an exception.\n \"\"\"\n\n class ApplicationError(Exception):\n pass\n\n dbpath = self.useFixture(TempDir()).join(\"db.sqlite\")\n conn = with_postponed_replication(connect(dbpath))\n try:\n with conn:\n cursor: Cursor = conn.cursor()\n cursor.execute(\"BEGIN\", ())\n cursor.execute('CREATE TABLE \"foo\" (\"a\" INT)', ())\n cursor.execute('INSERT INTO \"foo\" VALUES (?)', (42,))\n raise ApplicationError()\n except ApplicationError:\n pass\n else:\n self.fail(\"expected exception to propagate through context manager\")\n\n db = connect(dbpath)\n cursor = db.cursor()\n\n # The table won't even exist.\n self.assertThat(\n lambda: cursor.execute('SELECT \"a\" FROM foo', ()),\n raises(OperationalError),\n )\n\n def test_important_exception(self) -> None:\n \"\"\"\n An exception inside an `important()` context-manager is allowed to\n propagate\n \"\"\"\n conn = with_postponed_replication(connect(\":memory:\"))\n imp = conn.cursor().important()\n\n class ApplicationError(Exception):\n pass\n\n try:\n with imp:\n raise ApplicationError()\n except ApplicationError:\n pass\n else:\n self.fail(\"exception should propagate\")\n\n def test_importance_ends(self) -> None:\n \"\"\"\n After the `important()` context-manager is exited, the cursor is no longer\n marked as important.\n \"\"\"\n mutations = []\n conn = with_replication(connect(\":memory:\"), True)\n conn.add_mutation_observer(\n lambda cursor, observed: lambda: mutations.append(observed)\n )\n important_statement = \"CREATE TABLE 'important' ( 'a' INT )\"\n less_important_statement = \"CREATE TABLE 'less_important' ( 'a' INT )\"\n with conn:\n cursor = conn.cursor()\n with cursor.important():\n cursor.execute(important_statement, ())\n cursor.execute(less_important_statement, ())\n\n self.assertThat(\n mutations,\n Equals(\n [\n [\n (True, important_statement, ((),)),\n (False, less_important_statement, ((),)),\n ]\n ]\n ),\n )\n\n def test_executemany(self) -> None:\n \"\"\"\n The connection's cursor objects have an ``executemany`` method that\n operates in the usual way.\n \"\"\"\n conn = with_postponed_replication(connect(\":memory:\"))\n cursor = conn.cursor()\n cursor.execute(\"BEGIN\", ())\n cursor.execute('CREATE TABLE \"foo\" (\"a\" INT)', ())\n cursor.execute('INSERT INTO \"foo\" VALUES (?)', (1,))\n cursor.executemany('INSERT INTO \"foo\" VALUES (?)', [(3,), (5,), (7,)])\n\n # execute is supposed to update lastrowid but executemany is not\n # supposed to\n self.assertThat(\n cursor.lastrowid,\n Equals(1),\n )\n self.assertThat(\n cursor.rowcount,\n Equals(3),\n )\n cursor.execute('SELECT * FROM \"foo\"', ())\n self.assertThat(\n cursor.fetchall(),\n Equals([(1,), (3,), (5,), (7,)]),\n )\n\n cursor.execute('SELECT * FROM \"foo\"', ())\n for expected in [1, 3, 5, 7]:\n self.assertThat(\n cursor.fetchone(),\n Equals((expected,)),\n )\n self.assertThat(\n cursor.fetchone(),\n Equals(None),\n )\n\n def test_fetchmany(self) -> None:\n \"\"\"\n The connection's cursor objects have a ``fetchmany`` method that operates\n in the usual way.\n \"\"\"\n conn = with_postponed_replication(connect(\":memory:\"))\n cursor = conn.cursor()\n cursor.execute(\"BEGIN\", ())\n cursor.execute('CREATE TABLE \"foo\" (\"a\" INT)', ())\n cursor.executemany('INSERT INTO \"foo\" VALUES (?)', [(3,), (5,), (7,)])\n\n cursor.execute('SELECT \"a\" FROM \"foo\"', ())\n self.assertThat(\n cursor.fetchmany(2),\n Equals([(3,), (5,)]),\n )\n self.assertThat(\n cursor.fetchmany(2),\n Equals([(7,)]),\n )\n self.assertThat(\n cursor.fetchmany(2),\n Equals([]),\n )\n\n\ndef match_upload(\n name_matcher: Matcher[str],\n stream_matcher: Matcher[EventStream],\n) -> Matcher[tuple[str, EventStream]]:\n \"\"\"\n Match faked Tahoe-LAFS EventStream uploads with matching name and\n EventStream.\n \"\"\"\n return _MatchUpload(name_matcher, stream_matcher)\n\n\n@frozen\nclass _MatchUpload(Matcher[tuple[str, DataProvider]]):\n \"\"\"\n Match a two-tuple where the first element is the name of an upload and\n the second element is a function that returns an ``IO[bytes]`` that has\n contents that can be parsed as an ``EventStream``.\n\n :ivar name_matcher: A matcher for the upload name.\n\n :ivar stream_matcher: A matcher for the ``EventStream`` that can be\n deserialized from the bytes of the upload.\n \"\"\"\n\n name_matcher: Matcher[str]\n stream_matcher: Matcher[EventStream]\n\n def match(self, matchee: tuple[str, DataProvider]) -> Optional[Mismatch]:\n \"\"\"\n Do the matching.\n \"\"\"\n name, get_data = matchee\n\n maybe_mismatches: list[Optional[Mismatch]] = []\n maybe_mismatches.append(self.name_matcher.match(name))\n try:\n stream = EventStream.from_bytes(get_data())\n except Exception as e:\n maybe_mismatches.append(Mismatch(f\"Parsing the stream failed: {e}\"))\n else:\n maybe_mismatches.append(self.stream_matcher.match(stream))\n\n mismatches = [m for m in maybe_mismatches if m is not None]\n if len(mismatches) > 0:\n return MismatchesAll(mismatches)\n return None\n\n\nasync def noop_upload(name: str, get_bytes: Callable[[], IO[bytes]]) -> None:\n pass\n\n\nasync def noop_prune(predicate: object) -> None:\n pass\n\n\nasync def noop_list_entries() -> dict[str, DirectoryEntry]:\n return {}\n\n\n# A replica that actually does nothing. Used by tests that don't interact\n# with this part of the replication system but still need a value of the right\n# type.\nnoop_replica = Replica(noop_upload, noop_prune, noop_list_entries)\n\n\ndef has_files(grid: MemoryGrid, dir_cap: DirectoryReadCapability, count: int) -> bool:\n \"\"\"\n A predicate that returns True only when the directory indicated has at\n least the given number of children in it.\n \"\"\"\n return len(grid.list_directory(dir_cap)) >= count\n\n\ndef repeat_until(condition: Callable[[], bool], action: Callable[[], object]) -> None:\n \"\"\"\n Run an action repeatedly until a condition is true.\n \"\"\"\n while True:\n action()\n if condition():\n break\n\n\ndef is_event_stream(\n grid: MemoryGrid, **kwargs: Matcher[object]\n) -> Matcher[tuple[str, dict[str, object]]]:\n \"\"\"\n Match a Tahoe-LAFS directory entry representing a file which can be\n retrieved from the given grid and which contains an ``EventStream`` with a\n structure matched by the given keyword arguments.\n \"\"\"\n\n def is_filenode() -> Matcher[FileNode]:\n return IsInstance(FileNode) # type: ignore[no-any-return]\n\n def download_event_stream(cap: ReadCapability) -> EventStream:\n return EventStream.from_bytes(BytesIO(grid.download(cap)))\n\n return MatchesAll( # type: ignore[no-any-return]\n is_filenode(),\n AfterPreprocessing(\n compose(download_event_stream, attrgetter(\"ro_uri\")),\n MatchesStructure(**kwargs),\n ),\n )\n\n\n@log_call(action_type=\"zkapauthorizer:tests:add-tokens\")\ndef add_tokens(store: VoucherStore) -> None:\n \"\"\"\n Add a token to the given store.\n \"\"\"\n tokens = [RandomToken(b64encode(urandom(96)))]\n voucher = urlsafe_b64encode(urandom(32))\n store.add(voucher, len(tokens), 1, lambda: tokens)\n\n\nclass ReplicationServiceTests(TestCase):\n \"\"\"\n Tests for ``_ReplicationService``.\n \"\"\"\n\n def test_enable_replication_on_connection(self) -> None:\n \"\"\"\n When the service starts it enables replication on its database connection.\n \"\"\"\n tvs = self.useFixture(TemporaryVoucherStore(aware_now))\n store = tvs.store\n\n # We'll spy on \"events\" which are only captured when the connection is\n # in replication mode. To start, make sure that database changes are\n # not already being captured. They should not be since nothing has\n # placed the connection into replication mode yet.\n store.start_lease_maintenance().finish()\n self.assertThat(get_events(store._connection).changes, HasLength(0))\n\n service = replication_service(store._connection, noop_replica, naive_policy)\n service.startService()\n self.addCleanup(service.stopService)\n\n # Now that replication has been enabled. Some events should now be\n # captured.\n store.start_lease_maintenance().finish()\n self.assertThat(get_events(store._connection).changes, Not(HasLength(0)))\n\n def test_first_snapshot(self) -> None:\n \"\"\"\n A snapshot is uploaded if there is no snapshot in the replica directory\n already.\n \"\"\"\n tvs = self.useFixture(TemporaryVoucherStore(aware_now))\n store = tvs.store\n\n grid = MemoryGrid()\n replica_obj = grid.make_directory()\n client = grid.client()\n\n replica = get_tahoe_lafs_direntry_replica(client, replica_obj)\n service = replication_service(store._connection, replica, naive_policy)\n service.startService()\n self.addCleanup(service.stopService)\n\n self.assertThat(\n grid.list_directory(replica_obj.reader),\n Contains(\"snapshot\"),\n )\n\n def test_lingering_event_stream(self) -> None:\n \"\"\"\n If there are changes recorded in the local event stream that should be\n uploaded then they are uploaded soon after the replication service\n starts even if no further local changes are made.\n \"\"\"\n # The starting state that we want is:\n # (1) Replication is enabled\n # (2) A snapshot has been uploaded\n # (3) There is no replication service\n # (4) There are extra changes in the event-stream\n #\n # Then we can create the replication service and watch it react to the\n # extra event-stream changes.\n #\n # To get to this state, we'll make a store and let it upload a\n # snapshot. Then we'll stop its service, make some changes, and make\n # and start a new replication service for the new store.\n tvs = self.useFixture(TemporaryVoucherStore(aware_now))\n store = tvs.store\n\n grid = MemoryGrid()\n replica_obj = grid.make_directory()\n client = grid.client()\n\n replica = get_tahoe_lafs_direntry_replica(client, replica_obj)\n # This accomplishes (1).\n service = replication_service(store._connection, replica, naive_policy)\n service.startService()\n\n # Demonstrate (2).\n self.assertThat(\n set(grid.list_directory(replica_obj.reader)),\n Equals({\"snapshot\"}),\n )\n\n # Accomplish (3).\n self.assertThat(service.stopService(), succeeded(Always()))\n\n # Introduce some \"important\" changes to accomplish (4).\n add_tokens(store)\n\n # Verify the important changes are still in the local database and\n # have not been uploaded to the replica.\n self.assertThat(\n get_events(store._connection).changes,\n Not(HasLength(0)),\n )\n self.assertThat(\n set(grid.list_directory(replica_obj.reader)),\n Equals({\"snapshot\"}),\n )\n\n # Now create and start the new replication service, expecting it will\n # upload the changes in the local event stream.\n service = replication_service(store._connection, replica, naive_policy)\n service.startService()\n\n self.assertThat(\n get_events(store._connection).changes,\n HasLength(0),\n )\n self.assertThat(\n set(grid.list_directory(replica_obj.reader)),\n Equals({\"snapshot\", \"event-stream-2\"}),\n )\n\n def test_replicate(self) -> None:\n \"\"\"\n Making changes to the voucher store while replication is turned on\n causes event-stream objects to be uploaded.\n \"\"\"\n tvs = self.useFixture(TemporaryVoucherStore(aware_now))\n\n grid = MemoryGrid()\n replica_obj = grid.make_directory()\n replica_cap = danger_real_capability_string(replica_obj)\n rwcap_file = FilePath(tvs.config.get_private_path(REPLICA_RWCAP_BASENAME))\n rwcap_file.parent().makedirs()\n rwcap_file.setContent(replica_cap.encode(\"ascii\"))\n\n # Predicate to check if the replica directory has at least some number\n # of files in it.\n has_files_bound = partial(has_files, grid, replica_obj.reader)\n\n # we use this to contol when the first upload happens, so that we\n # actually use the queue\n delay_controller, delay_client = delayedProxy(\n ITahoeClient,\n grid.client(tvs.config._basedir),\n )\n\n srv = replication_service(\n tvs.store._connection,\n get_tahoe_lafs_direntry_replica(delay_client, replica_obj),\n naive_policy,\n )\n\n # run the service and produce some fake voucher etc changes\n # that cause \"events\" to be issued into the database\n srv.startService()\n self.addCleanup(srv.stopService)\n\n with start_action(action_type=\"zkapauthorizer:tests:wait-for-snapshot\"):\n repeat_until(partial(has_files_bound, 1), delay_controller.run)\n\n # then it does a list_directory for pruning purposes. if we don't let\n # it run then the event-stream upload for the first add_tokens() can't\n # start and the subsequent add_tokens calls have their data merged\n # into an upload with the first.\n with start_action(action_type=\"zkapauthorizer:tests:run-list-directory\"):\n delay_controller.run()\n\n # Add some tokens, which are considered important.\n add_tokens(tvs.store)\n\n # Still, no uploads can complete until we let them. Verify that's\n # working as intended by asserting there are no event streams on the\n # grid.\n self.assertThat(\n sorted(grid.list_directory(replica_obj.reader)),\n Equals([\"snapshot\"]),\n )\n\n # Add two more groups of tokens. These are also important. They\n # should be included in an upload but they cannot be included in the\n # upload that already started.\n add_tokens(tvs.store)\n add_tokens(tvs.store)\n\n # Finish the first event-stream upload.\n with start_action(action_type=\"zkapauthorizer:tests:wait-for-event-stream\"):\n repeat_until(partial(has_files_bound, 2), delay_controller.run)\n\n self.assertThat(\n sorted(grid.list_directory(replica_obj.reader)),\n Equals(sorted([\"snapshot\", \"event-stream-2\"])),\n )\n\n # Allow subsequent uploads.\n with start_action(action_type=\"zkapauthorizer:tests:wait-for-event-stream\"):\n repeat_until(partial(has_files_bound, 3), delay_controller.run)\n\n # Now both the first upload and a second upload should have completed.\n # There is no third upload because the data for the 2nd and 3rd\n # add_tokens calls should have been combined into a single upload.\n self.assertThat(\n grid.list_directory(replica_obj.reader),\n MatchesDict(\n {\n \"snapshot\": Always(),\n \"event-stream-2\": is_event_stream(\n grid,\n changes=HasLength(2),\n highest_sequence=returns(Equals(2)),\n ),\n \"event-stream-6\": is_event_stream(\n grid,\n changes=HasLength(4),\n highest_sequence=returns(Equals(6)),\n ),\n }\n ),\n )\n\n # since we've uploaded everything, there should be no\n # events in the store\n self.assertEqual([], get_events(tvs.store._connection).changes)\n\n def test_snapshot_prune(self) -> None:\n \"\"\"\n Uploading a snapshot prunes irrelevant event-stream instances from\n the replica\n \"\"\"\n tvs = self.useFixture(TemporaryVoucherStore(aware_now))\n\n grid = MemoryGrid()\n replica_obj = grid.make_directory()\n replica_cap = danger_real_capability_string(replica_obj)\n rwcap_file = FilePath(tvs.config.get_private_path(REPLICA_RWCAP_BASENAME))\n rwcap_file.parent().makedirs()\n rwcap_file.setContent(replica_cap.encode(\"ascii\"))\n\n # Predicate to check if the replica directory has at least some number\n # of files in it.\n has_files_bound = partial(has_files, grid, replica_obj.reader)\n\n # we use this to contol when the first upload happens, so that we\n # actually use the queue\n delay_controller, delay_client = delayedProxy(\n ITahoeClient,\n grid.client(tvs.config._basedir),\n )\n\n srv = replication_service(\n tvs.store._connection,\n get_tahoe_lafs_direntry_replica(delay_client, replica_obj),\n naive_policy,\n )\n\n # run the service and produce some fake voucher etc changes\n # that cause \"events\" to be issued into the database\n srv.startService()\n self.addCleanup(srv.stopService)\n\n with start_action(action_type=\"zkapauthorizer:tests:wait-for-snapshot\"):\n repeat_until(partial(has_files_bound, 1), delay_controller.run)\n\n # then it does a list_directory for pruning purposes. if we don't let\n # it run then the event-stream upload for the first add_tokens() can't\n # start and the subsequent add_tokens calls have their data merged\n # into an upload with the first.\n with start_action(action_type=\"zkapauthorizer:tests:run-list-directory\"):\n delay_controller.run()\n\n # Add some tokens, which are considered important.\n voucher0 = urlsafe_b64encode(urandom(32))\n self.assertThat(\n Deferred.fromCoroutine(tvs.redeem(voucher0, 20)),\n succeeded(Always()),\n )\n\n # Allow the resulting event-stream upload to complete.\n with start_action(action_type=\"zkapauthorizer:tests:wait-for-event-stream\"):\n repeat_until(partial(has_files_bound, 2), delay_controller.run)\n\n # ..so we should have uploaded here\n self.assertThat(\n grid.list_directory(replica_obj.reader),\n MatchesDict(\n {\n \"snapshot\": Always(),\n \"event-stream-21\": is_event_stream(\n grid,\n changes=HasLength(21),\n highest_sequence=returns(Equals(21)),\n ),\n }\n ),\n )\n\n # do some work that isn't deemed \"important\"\n pass_factory = SpendingController.for_store(\n tokens_to_passes=tvs.redeemer.tokens_to_passes,\n store=tvs.store,\n )\n pass_factory.get(b\"message0\", 10)\n\n self.assertNotEqual(tuple(), get_events(tvs.store._connection).changes)\n\n # we should _not_ have uploaded the above changes yet (because\n # they aren't \"important\") and so they should still exist in\n # the store\n self.assertNotEqual(tuple(), get_events(tvs.store._connection).changes)\n\n # trigger a snapshot upload\n srv.queue_snapshot_upload() # type: ignore\n\n # Let the snapshot upload and pruning processes run.\n delay_controller.run()\n delay_controller.run()\n delay_controller.run()\n delay_controller.run()\n delay_controller.run()\n\n # now there should be no local changes\n self.assertEqual([], get_events(tvs.store._connection).changes)\n # ...and we should have pruned the prior event-stream .. so we\n # interrogate the predicate we _were_ given to ensure it would\n # have said \"yes\" to the event-stream we did upload\n\n self.assertThat(\n grid.list_directory(replica_obj.reader),\n MatchesDict(\n {\n \"snapshot\": Always(),\n }\n ),\n )\n\n def test_snapshot_again(self) -> None:\n \"\"\"\n A new snapshot is uploaded and existing event streams are pruned if the\n cost to maintain the current replica snapshot and event streams is\n more than X times the cost to store a new snapshot of the database.\n \"\"\"\n # The starting state that we want is:\n # (1) Replication is enabled\n # (2) A snapshot has been uploaded\n # (3) An event stream has been uploaded\n #\n # Then we can have the snapshot policy decide it is time to upload a\n # snapshot and observe the consequences.\n tvs = self.useFixture(TemporaryVoucherStore(aware_now))\n store = tvs.store\n\n grid = MemoryGrid()\n replica_dircap = grid.make_directory()\n client = grid.client()\n\n # This policy will decide it is time to upload after 1 snapshot + 2\n # event streams are uploaded.\n snapshot_policy = CountBasedPolicy(replica_file_limit=3)\n\n replica = get_tahoe_lafs_direntry_replica(client, replica_dircap)\n # This accomplishes (1).\n service = replication_service(store._connection, replica, snapshot_policy)\n service.startService()\n\n # Demonstrate (2).\n self.assertThat(\n set(grid.list_directory(replica_dircap.reader)),\n Equals({\"snapshot\"}),\n )\n\n # Make an important change to get to (3).\n add_tokens(store)\n self.assertThat(\n set(grid.list_directory(replica_dircap.reader)),\n Equals({\"snapshot\", \"event-stream-2\"}),\n )\n\n # Make another important change to push us over the limit.\n add_tokens(store)\n\n # The event streams should have been pruned and the new snapshot\n # uploaded.\n self.assertThat(\n set(grid.list_directory(replica_dircap.reader)),\n Equals({\"snapshot\"}),\n )\n\n\nclass TahoeDirectoryListerTests(TestCase):\n \"\"\"\n Tests for ``get_tahoe_lafs_direntry_lister``.\n \"\"\"\n\n @given(\n directory_names=lists(text(max_size=100), max_size=3, unique=True),\n file_names=lists(text(max_size=100), max_size=3, unique=True),\n )\n def test_list(self, directory_names: list[str], file_names: list[str]) -> None:\n \"\"\"\n ``get_tahoe_lafs_direntry_lister`` returns a callable that can read the\n entries details from a Tahoe-LAFS directory.\n \"\"\"\n filedata = b\"somedata\"\n grid = MemoryGrid()\n dirobj = grid.make_directory()\n for name in directory_names:\n grid.link(dirobj, name, grid.make_directory())\n for name in file_names:\n grid.link(dirobj, name, grid.upload(filedata))\n\n client = grid.client()\n lister = get_tahoe_lafs_direntry_lister(client, dirobj)\n\n expected = {name: DirectoryEntry(\"dirnode\", 0) for name in directory_names}\n expected.update(\n {name: DirectoryEntry(\"filenode\", len(filedata)) for name in file_names}\n )\n\n self.assertThat(\n from_awaitable(lister()),\n succeeded(Equals(expected)),\n )\n\n\nclass TahoeDirectoryPrunerTests(TestCase):\n \"\"\"\n Tests for `get_tahoe_lafs_direntry_pruner`\n \"\"\"\n\n def test_prune(self) -> None:\n \"\"\"\n ``get_tahoe_lafs_direntry_pruner`` returns a function that unlinks entries\n from a Tahoe-LAFS mutable directory.\n \"\"\"\n ignore = [\"one\", \"two\"]\n delete = [\"three\", \"four\"]\n\n grid = MemoryGrid()\n dirobj = grid.make_directory()\n for name in ignore + delete:\n filecap = grid.upload(b\"some data\")\n grid.link(dirobj, name, filecap)\n\n client = grid.client()\n pruner = get_tahoe_lafs_direntry_pruner(client, dirobj)\n\n # ask the pruner to delete some of the files\n self.assertThat(\n # prune(..) returns a Coroutine but it declares it as an Awaitable\n # so mypy tells us it won't work with fromCoroutine.\n Deferred.fromCoroutine(pruner(lambda fname: fname in delete)), # type: ignore\n succeeded(Always()),\n )\n\n self.assertThat(\n set(grid.list_directory(dirobj.reader).keys()),\n Equals(set(ignore)),\n )\n","repo_name":"PrivateStorageio/ZKAPAuthorizer","sub_path":"src/_zkapauthorizer/tests/test_replicate.py","file_name":"test_replicate.py","file_ext":"py","file_size_in_byte":29445,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"12433821464","text":"import math\n\nimport numpy as np\n\n\nnp.seterr('raise')\n\ndef DTqxDTqy(xT, yT, XS, YS, M):\n\n\n for q in range(M):\n rih = math.sqrt((xT-XS[q])**2 + (yT-YS[q])**2)\n DTqx = (YS[q] - yT) / (2 * np.pi * rih**2)\n DTqy = (xT - XS[q]) / (2 * np.pi * rih**2)\n\n\n\n return DTqx, DTqy\n\n","repo_name":"DehanYuan/Unsteady_Panel_Method","sub_path":"DTqxDTqy.py","file_name":"DTqxDTqy.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73088594064","text":"import numpy as np\nimport cv2\nimport skimage.segmentation as seg\nimport math\nimport os\n\nfrom numpy.core.multiarray import normalize_axis_index\nfrom skimage.util import img_as_float\n\n# function used to calculate edge weights\ndef discrete_sum(a, axis=-1):\n\n a = np.asanyarray(a)\n nd = a.ndim\n if nd == 0:\n raise ValueError(\"diff requires input that is at least one dimensional\")\n axis = normalize_axis_index(axis, nd)\n\n combined = []\n combined.append(a)\n\n if len(combined) > 1:\n a = np.concatenate(combined, axis)\n\n slice1 = [slice(None)] * nd\n slice2 = [slice(None)] * nd\n slice1[axis] = slice(1, None)\n slice2[axis] = slice(None, -1)\n slice1 = tuple(slice1)\n slice2 = tuple(slice2)\n\n op = not_equal if a.dtype == np.bool_ else np.add\n for _ in range(1):\n a = op(a[slice1], a[slice2])\n\n return a\n\n\ndef _make_graph_edges_3d(n_x, n_y, n_z):\n \"\"\"Returns a list of edges for a 3D image.\n Parameters\n ----------\n n_x : integer\n The size of the grid in the x direction.\n n_y : integer\n The size of the grid in the y direction\n n_z : integer\n The size of the grid in the z direction\n Returns\n -------\n edges : (2, N) ndarray\n with the total number of edges::\n N = n_x * n_y * (nz - 1) +\n n_x * (n_y - 1) * nz +\n (n_x - 1) * n_y * nz\n Graph edges with each column describing a node-id pair.\n \"\"\"\n vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))\n edges_deep = np.vstack((vertices[..., :-1].ravel(),\n vertices[..., 1:].ravel()))\n edges_right = np.vstack((vertices[:, :-1].ravel(),\n vertices[:, 1:].ravel()))\n edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))\n edges = np.hstack((edges_deep, edges_right, edges_down))\n return edges\n\n\ndef _compute_weights_3d(data, **param):\n # Weight calculation is main difference in multispectral version\n # Original gradient**2 replaced with sum of gradients ** 2 (a[...,channel][:-1]+a[...,channel][1:])\n spacing = np.ones(3)\n gradients = np.concatenate(\n [discrete_sum(data[..., 0], axis=ax).ravel() / spacing[ax]\n for ax in [2, 1, 0] if data.shape[ax] > 1], axis=0) ** 2\n for channel in range(1, data.shape[-1]):\n gradients += np.concatenate(\n [discrete_sum(data[..., channel], axis=ax).ravel() / spacing[ax]\n for ax in [2, 1, 0] if data.shape[ax] > 1], axis=0) ** 2\n\n # All channels considered together in this standard deviation\n scale_factor = -param['beta'] / (data.std())\n weights = np.exp(scale_factor * gradients)\n weights += param['eps']\n return weights\n\n\n\ndef construct_graph(data,**param):\n\n data = np.atleast_3d(img_as_float(data))[..., np.newaxis]\n l_x, l_y, l_z = data.shape[:3]\n\n edges = _make_graph_edges_3d(l_x, l_y, l_z)\n weights = _compute_weights_3d(data, **param)\n\n return edges, weights","repo_name":"SiddharthSaravanan/UHDImageSegmentation","sub_path":"Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"27370014101","text":"def encrypt(key, message):\n \"\"\"\n 使用给定的密钥对讯息进行加密。\n\n Args:\n key (str): 加密的密钥。必须是一个非空字串。\n message (str): 要加密的讯息。可以是任意长度的字串。\n\n Returns:\n str: 加密后的讯息,以字串形式返回。该字串包含了讯息的加密版本,可以被解密回原始讯息。\n\n Raises:\n ValueError: 当密钥为空时,引发ValueError。\n\n Example:\n >>> encrypt(\"password\", \"Hello World!\")\n '8\\x04\\x1f\\x1f\\x18O%\\x0b\\x02\\r\\x17R'\n\n \"\"\"\n # 将密钥转换为ASCII码序列\n key_bytes = [ord(char) for char in key]\n # 建立空的加密后资讯串列\n encrypted_bytes = []\n # 逐字元对讯息进行加密\n for i in range(len(message)):\n # 取得讯息字元的ASCII码\n message_byte = ord(message[i])\n # 取得密钥字元的ASCII码\n key_byte = key_bytes[i % len(key_bytes)]\n # 将密钥字元与讯息字元做XOR运算\n encrypted_byte = message_byte ^ key_byte\n # 将加密后的字元加入加密后资讯串列\n encrypted_bytes.append(encrypted_byte)\n # 将加密后资讯串列转换为字串\n encrypted_message = ''.join([chr(byte) for byte in encrypted_bytes])\n return encrypted_message","repo_name":"Keycatowo/owo_tools","sub_path":"owo_tools/encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38556701834","text":"\nimport argparse\nimport glob, shutil\nimport sys\nimport traceback\n\nfrom typing import *\n\nimport dpctl.connection as devconn\nimport dpctl.protocol as devproto\nimport dpctl.commands as devcmds\n\n\ndef dpctl_do(args: Any) -> int:\n def get_device_info(conn, args): return devcmds.get_device_info(conn)\n def get_mode_info(conn, args): return devcmds.get_mode_info(conn, args.mode)\n def set_mode(conn, args): return devcmds.set_mode(conn, args.mode)\n def bootloader(conn, args): return devcmds.set_mode(conn, 0)\n\n def uart_hw_flowctl(conn, args):\n if args.get: return devcmds.uart_hw_flowctl_get(conn)\n fcen = args.set\n if isinstance(fcen, list): fcen = fcen[0]\n if fcen is None:\n if args.enable: fcen = True\n elif args.disable: fcen = False\n if fcen is None:\n print(\"Error: none of '--get', '--set', '--enable' or '--disable' specified.\")\n return 1\n return devcmds.uart_hw_flowctl_set(conn, fcen)\n def tempsensor(conn, args):\n if args.get: return devcmds.tempsensor_get(conn)\n tsen = args.set\n if isinstance(tsen, list): tsen = tsen[0]\n if tsen is None:\n if args.disable: tsen = 0xff\n if tsen is None:\n print(\"Error: none of '--get', '--set' or '--disable' specified.\")\n return 1\n return devcmds.tempsensor_set(conn, tsen)\n def jtag_scan(conn, args):\n return devcmds.jtag_scan(conn, args.type, args.start, args.end)\n def sump_ovclk(conn, args):\n if args.get: return devcmds.sump_overclock_get(conn)\n oven = args.set\n if isinstance(oven, list): oven = oven[0]\n if oven is None:\n if args.enable: oven = 1\n elif args.disable: oven = 0\n if oven is None:\n print(\"Error: none of '--get', '--set', '--enable' or '--disable' specified.\")\n return 1\n return devcmds.sump_overclock_set(conn, oven)\n\n\n #print(repr(args))\n cmds = {\n 'get-device-info': get_device_info,\n 'get-mode-info': get_mode_info,\n 'set-mode': set_mode,\n 'bootloader': bootloader,\n\n 'uart-cts-rts': uart_hw_flowctl,\n 'tempsensor': tempsensor,\n 'jtag-scan': jtag_scan,\n 'sump-overclock': sump_ovclk,\n }\n\n if args.subcmd is None:\n print(\"No subcommand specified?!\")\n return 1\n\n subfn = cmds.get(args.subcmd, None)\n if subfn is None:\n print(\"Unknown subcommand '%s'\" % args.subcmd)\n return 1\n\n conn = devconn.connect(args.conn)\n if isinstance(conn, str):\n print(\"Could not connect to a device: %s.\" % conn)\n return 1\n\n with devproto.DPDevice(conn) as dev:\n return subfn(dev, args)\n\n\ndef main() -> int:\n parser = argparse.ArgumentParser(prog=\"dpctl\")\n\n def auto_int(x):\n return int(x, 0)\n\n # commands:\n # * get device info\n # * get mode info\n # * set mode\n #\n # * mode 1 (general):\n # * 0x16 0x??: usb hwflowctl on/off, 0x??=0xc3: get current value\n # * 0x15 0x00: get tempsensor active/address\n # * 0x15 0x01 0x??: set tempsensor active/address\n #\n # * mode 2 (isp/jtag/...): probably nothing\n #\n # * mode 3 (jtag pinout scanner):\n # * 0x30: get status\n # * 0x31: get result (5 bytes: pin numbers of tck,tms,tdi,tdo,trst)\n # * 0x32 0xNN 0xMM: start scan (pins 0xNN..0xMM)\n #\n # * mode 4 (sump logic analyzer):\n # * 0x40: get overclock\n # * 0x41: set overclock\n #\n # * mode 5 (ftdi/fx2 emul): probably nothing\n\n parser.add_argument('--conn', type=str, default=None,\n help=\"Connection string. Either a dragonprobe-char \"+\\\n \"device in /dev, a USB bus.device number, or a USB \" + \\\n \"VID:PID pair. Defaults to trying /dev/dragonprobe-* \" + \\\n \"(if there is only one), and cafe:1312 otherwise.\")\n #parser.descripiton = ...\n\n subcmds = parser.add_subparsers(required=True, metavar=\"subcommand\",\n dest=\"subcmd\", help=\"Command to send to \"+\\\n \"the device\",\n description=\"For more info on each \" + \\\n \"subcommand, run the program with \" + \\\n \"'subcommand --help' as arguments.\")\n\n # general subcommands\n getdevinfo = subcmds.add_parser(\"get-device-info\", help=\"Shows device info\")\n\n getmodeinfo = subcmds.add_parser(\"get-mode-info\", help=\"Shows mode info.\"+\\\n \" A mode can optionally be specified, \"+\\\n \"default is the current mode.\")\n getmodeinfo.add_argument('mode', default=None, nargs='?', #type=int,\n help=\"Mode to get info of. Defaults to the \" + \\\n \"current mode, 'all' means all modes.\")\n\n setmode = subcmds.add_parser(\"set-mode\", help=\"Set the device mode\")\n setmode.add_argument('mode', type=int, help=\"Mode to switch to, required.\")\n\n bootloader = subcmds.add_parser(\"bootloader\", help=\"Set the device in bootloader mode\")\n\n # mode 1 commands\n usbhwfctl = subcmds.add_parser(\"uart-cts-rts\", help=\"Get, enable/disable\"+\\\n \" UART hardware flow control\")\n uartopts = usbhwfctl.add_mutually_exclusive_group()\n uartopts.add_argument('--get', default=False, action='store_true',\n help=\"Get current hardware flow control setting\")\n uartopts.add_argument('--set', default=None, type=bool, nargs=1,\n help=\"Set hardware flow control\")\n uartopts.add_argument('--enable', default=False, action='store_true',\n help=\"Enable hardware flow control, short for \"+\\\n \"--set true\")\n uartopts.add_argument('--disable', default=False, action='store_true',\n help=\"Disable hardware flow control, short for \"+\\\n \"--set false\")\n\n tempsense = subcmds.add_parser(\"tempsensor\", help=\"Get or set the IRC \" + \\\n \"emulation enable/address of the \" + \\\n \"temperature sensor.\")\n tsopts = tempsense.add_mutually_exclusive_group()\n tsopts.add_argument('--get', default=False, action='store_true',\n help=\"Get current I2C emul state/address\")\n tsopts.add_argument('--set', default=None, type=auto_int, nargs=1,\n help=\"Set emulated I2C address of the temperature \"+\\\n \"sensor. 0 (or another invalid I2C device address) \"+\\\n \"to disable the emulated I2C sensor device.\")\n tsopts.add_argument('--disable', default=False, action='store_true',\n help=\"Disable emulated I2C temperature sensor, \"+\\\n \"short for --set true\")\n\n jtagscan = subcmds.add_parser(\"jtag-scan\", help=\"JTAG pinout scanner\")\n jtagscan.add_argument(\"type\", type=str, help=\"Pinout type to check for.\",\n choices=['jtag', 'swd']) # TODO: SBW etc\n jtagscan.add_argument(\"start\", type=int, help=\"Number of the start \"+\\\n \"of the pin range to scan (inclusive)\")\n jtagscan.add_argument(\"end\", type=int, help=\"Number of the end of \"+\\\n \"the pin range to scan (inclusive)\")\n\n sumpla = subcmds.add_parser(\"sump-overclock\",\n help=\"SUMP logic analyzer overclock\")\n sumpopts = sumpla.add_mutually_exclusive_group()\n sumpopts.add_argument('--get', default=False, action='store_true',\n help=\"Get current overclocking state\")\n sumpopts.add_argument('--set', default=None, type=int, nargs=1,\n help=\"Set current overclocking state\")\n sumpopts.add_argument('--enable', default=False, action='store_true',\n help=\"Enable overclocking, short for --set 1\")\n sumpopts.add_argument('--disable', default=False, action='store_true',\n help=\"Disable overclocking, short for --set 0\")\n\n args = parser.parse_args()\n return dpctl_do(args)\n\n","repo_name":"ca4ti/DragonProbe","sub_path":"host/dpctl/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8326,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31560286360","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: maxschallwig\n\"\"\"\n\nimport requests\n\nurl = \"http://finance.yahoo.com/quote/AAPL?p=AAPL\"\n\nresponse = requests.get(url)\n#transform into dictionary by adding{}\ndata = {}\nIndicators = {\"Previous Close\":[],\n \"Open\":[],\n \"Bid\":[],\n \"Ask\":[],\n \"Day's Range\":[],\n \"52 Week Range\":[],\n \"Volume\":[],\n \"Avg. Volume\":[],\n \"Market Cap\":[],\n \"Beta\":[],\n \"PE Ratio (TTM)\":[],\n \"EPS (TTM)\":[],\n \"Earnings Date\":[],\n \"Dividend & Yield\":[],\n \"Ex-Dividend Date\":[],\n \"1y Target Est\":[]}\nprint(response)\nprint(response.status_code)\n\n#htmlText = response.text\n#print(htmlText)\n#splitList = htmlText.split(\"Previous Close\")\n#print(splitList)\n\n#loops through dictionary\nfor indicator in Indicators:\n# break\n print(indicator)\n #split based on the indictator\n splitList = htmlText.split(indicator)\n afterFirstSplit = splitList[1].split(\"-->\")[1]\n afterSecondSplit = afterFirstSplit.split(\" om tal %2 == 0 --> jämnt, annars udda\nif tal%2 == 0:\n print(\"Talet är jämnt\")\nelse:\n print(\"Talet är udda\") \n\n","repo_name":"johnhjernestam/John_Hjernestam_TE19C","sub_path":"introkod_syntax/Annat/if_satser1.py","file_name":"if_satser1.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"sv","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4318414181","text":"import cocotb_test.simulator\n\nimport cocotb\nfrom cocotb.log import SimLog\nfrom cocotb.clock import Clock\nfrom cocotb.triggers import RisingEdge, FallingEdge, Timer, ClockCycles\nimport random\n\nb = 0x0b4e0ef37bc32127\n\ndef twos_comp(val):\n if val < 0:\n val = (1<<64) + val\n else:\n if (val & (1 << 63)) != 0:\n val = val - (1<<64)\n return val\n\nasync def check (signal, clk, value, mask, num):\n await ClockCycles(clk,12)\n await FallingEdge(clk)\n expected = twos_comp((twos_comp(value & mask) * b)) & 0xffffffffffffffff\n assert str(signal.value) == \"{0:{fill}64b}\".format(expected, fill='0'), \"error on cycle\"+str(num)\n # print (hex(expected))\n\n@cocotb.test()\nasync def run_test_mul_hash(dut):\n\n clk = cocotb.fork(Clock(dut.clk, 2).start())\n mask = (0xffffffffffffffff << (8*int(dut.MSK_BYTES.value))) & 0xffffffffffffffff\n print (\"Mask:\", hex(mask))\n \n await RisingEdge(dut.clk)\n\n val = 0x8822FBF8A5FAFFFF\n dut.a <= val\n cocotb.fork(check(dut.p, dut.clk, val, mask, -3))\n await RisingEdge(dut.clk)\n val = 2**64 - 1; \n dut.a <= val\n cocotb.fork(check(dut.p, dut.clk, val, mask, -2))\n await RisingEdge(dut.clk)\n val = 0; \n dut.a <= val\n cocotb.fork(check(dut.p, dut.clk, val, mask, -1))\n\n for i in range (0,10000,2):\n await RisingEdge(dut.clk)\n val = random.randrange(2**63) \n dut.a <= val\n cocotb.fork(check(dut.p, dut.clk, val, mask, i))\n await RisingEdge(dut.clk)\n val = val | (1<<63)\n dut.a <= val\n cocotb.fork(check(dut.p, dut.clk, val, mask, i+1))\n\n await Timer(25)\n await RisingEdge(dut.clk)\n","repo_name":"ucsdsysnet/Rosebud","sub_path":"fpga_src/accel/pigasus_sme/tb/test_mul_hash.py","file_name":"test_mul_hash.py","file_ext":"py","file_size_in_byte":1637,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"48"} +{"seq_id":"42699850595","text":"import torch\n\nfrom catalyst.utils import metrics\n\n\ndef test_hitrate():\n \"\"\"\n Tests for catalyst.utils.metrics.hitrate metric.\n \"\"\"\n y_pred = [0.5, 0.2]\n y_true = [1.0, 0.0]\n\n hitrate = metrics.hitrate(torch.Tensor([y_pred]), torch.Tensor([y_true]))\n assert hitrate == 0.5\n\n # check 1 simple case\n y_pred = [0.5, 0.2]\n y_true = [0.0, 0.0]\n\n hitrate = metrics.hitrate(torch.Tensor([y_pred]), torch.Tensor([y_true]))\n assert hitrate == 0.0\n","repo_name":"hakanaku1234/catalyst-1","sub_path":"catalyst/metrics/tests/test_hitrate.py","file_name":"test_hitrate.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"17581252130","text":"import sys\nimport time\nimport os\nfrom contextlib import contextmanager\n\nsys.path.append('..')\n\nfrom coax import open_serial_interface, Poll, PollAck\n\nDEFAULT_SERIAL_PORT = '/dev/ttyACM0'\n\n@contextmanager\ndef open_example_serial_interface(reset=True, poll_flush=True):\n serial_port = os.environ.get('COAX_PORT', DEFAULT_SERIAL_PORT)\n\n print(f'Opening {serial_port}...')\n\n with open_serial_interface(serial_port, reset=False) as interface:\n if reset:\n print('Resetting interface...')\n\n interface.reset()\n\n if interface.legacy_firmware_detected:\n print(f'Firmware version is {interface.legacy_firmware_version}')\n\n if poll_flush:\n print('POLLing...')\n\n count = 0\n\n poll_response = interface.execute(Poll(), timeout=1)\n\n while poll_response:\n interface.execute(PollAck())\n\n count += 1\n\n poll_response = interface.execute(Poll(), timeout=1)\n\n print(f'ACK\\'d {count} POLL responses')\n\n yield interface\n","repo_name":"lowobservable/coax","sub_path":"pycoax/examples/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"48"} +{"seq_id":"10432213835","text":"def getNumber(prompt):\n\n while(True):\n\n value = input(prompt)\n\n try:\n return int(value)\n except ValueError:\n print(\"Blad!\")\n\ndef factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n-1)\n\n\nn = getNumber(\"Podaj liczbę: \")\n\nprint(str(n) + \"! = \" + str(factorial(n)))","repo_name":"irekkosek/jezyki-skryptowe","sub_path":"python/zadania/pyth-67-100/zad90.py","file_name":"zad90.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74121556306","text":"# https://atcoder.jp/contests/typical90/submissions/23897436\n# 081 - Friendly Group(★5)\nimport sys\n\nsys.setrecursionlimit(10 ** 7)\ninput = sys.stdin.readline\nf_inf = float('inf')\nMOD = 10 ** 9 + 7\n\n\ndef solve():\n n, k = map(int, input().split())\n IN = []\n MAX = 0\n for _ in range(n):\n a, b = map(int, input().split())\n MAX = max(MAX, a, b)\n IN.append((a, b))\n MAX += 1\n AB = [[0] * MAX for _ in range(MAX)]\n for a, b in IN:\n AB[a][b] += 1\n\n R = [[0] * (MAX + 1) for _ in range(MAX + 1)]\n for i in range(MAX):\n for j in range(MAX):\n R[i + 1][j + 1] = R[i][j + 1] + R[i + 1][j] - R[i][j] + AB[i][j]\n\n res = 0\n for x1 in range(MAX):\n for y1 in range(MAX):\n x2 = min(MAX, x1 + k + 1)\n y2 = min(MAX, y1 + k + 1)\n tot = R[x2][y2] - R[x1][y2] - R[x2][y1] + R[x1][y1]\n res = max(res, tot)\n print(res)\n\n\nif __name__ == '__main__':\n solve()\n","repo_name":"happa64/AtCoder_Beginner_Contest","sub_path":"Unrated/Typical90/Typical90_081.py","file_name":"Typical90_081.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"5728272713","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport healpy as hp\nfrom scipy.stats import norm\nfrom astropy.utils.data import download_file\nfrom astropy.io import ascii\nfrom astropy.table import Table\nfrom astropy.table import Column\nfrom astroquery.vizier import Vizier\nfrom scipy.special import gammaincinv\nfrom astropy.cosmology import WMAP9 as cosmo\nimport astropy.units as u\nimport astropy.constants as c\nimport pandas as pd\nfrom ligo.skymap.distance import conditional_pdf\nimport pdb\nimport matplotlib.pyplot as plt\nfrom get_LST import *\nfrom make_phaseii import *\n\n\n\ndef cdf(pdf):\n #Calculate contour in probability\n sortedpix = np.argsort(pdf)[::-1]\n cumsum = np.cumsum(pdf[sortedpix])\n cls = np.empty_like(pdf)\n cls[sortedpix] = cumsum*100\n return cls\n\n#def get_probability_index(cat, probb, distmu, distsigma, distnorm, pixarea, nside, probability):\ndef get_probability_index(cat, probb, distmu, distsigma, distnorm, pixarea, nside):\n \n '''\n This will take a pandas-read in csv file, and will return a ordered list of galaxies within that catalog that are ordered by probability map\n '''\n \n print(\"cat1: \"+str(cat))\n theta = 0.5*np.pi - cat['DEJ2000']*np.pi/180\n theta = np.asarray([float(i) for i in theta])\n print(\"theta: \"+str(theta))\n \n phi = cat['RAJ2000']*np.pi/180\n phi = np.asarray([float(i) for i in phi])\n cls = cdf(probb)\n\n print(\"cls: \"+str(cls))\n\n ipix = hp.ang2pix(nside, theta, phi)\n print(\"ipix: \"+str(ipix))\n cls = cls[ipix]\n\n dist = cat['d']\n \n logdp_dV = np.log(conditional_pdf(dist,distmu[ipix],distsigma[ipix],distnorm[ipix]).tolist()) - np.log(pixarea)\n \n #logdp_dV = np.log(probability[ipix]) + np.log(conditional_pdf(dist,distmu[ipix],distsigma[ipix],distnorm[ipix]).tolist()) - np.log(pixarea)\n\n #cutting to select only 90 % confidence in position\n cattop = cat[cls<90]\n logdp_dV= logdp_dV[cls<90]\n s_lumK = 10**(-0.4*cat['B'][cls<90])\n s_lumK = s_lumK/s_lumK.sum()\n #s_lumB = 10**(-0.4*cat1['B_Abs'][cls>90])\n #s_lumB = s_lumB/s_lumB.sum()\n cls = cls[cls<90]\n #only using K for now\n logdp_dV = np.log(s_lumK) + logdp_dV\n\n #Now working only with event with overall probability 99% lower than the most probable\n top99i = logdp_dV-np.max(logdp_dV) > np.log(1/100)\n\n cattop = cattop[top99i]\n logdp_dV = logdp_dV[top99i]\n cls = cls[top99i]\n\n #sorting by probability\n isort = np.argsort(logdp_dV)[::-1]\n \n cattop = Table.from_pandas(cattop.iloc[isort])\n logptop = logdp_dV.iloc[isort]\n cls = cls[isort]\n \n \n return cattop, logptop, cls\n\n\n\ndef get_galaxy_list(Skymap_fits_file = \"60029.41050925926/flattened_multiorder_fits_MS230326j.fits\", HET_visible_galaxy_file = \"Glade_HET_Visible_Galaxies.csv\", All_galaxy_file = \"Glade_HET_Visible_Galaxies.csv\"):\n\n if Skymap_fits_file is not None:\n locinfo, header = hp.read_map(Skymap_fits_file, field=range(4), h=True)\n probb, distmu, distsigma, distnorm = locinfo\n # Getting healpix resolution and pixel area in deg^2\n npix = len(probb)\n nside = hp.npix2nside(npix)\n # Area per pixel in steradians\n pixarea = hp.nside2pixarea(nside)\n\n\n #read in galaxy catalog using csv file\n\n cat1 = pd.read_csv(\"Glade_HET_Visible_Galaxies.csv\", sep=',',usecols = [1,2,3,4,5],names=['RAJ2000','DEJ2000','B','K','d'],header=0,dtype=np.float64)\n\n #cattop, logptop, cls = get_probability_index(cat1, probb, distmu, distsigma, distnorm, pixarea, nside, probability)\n cattop, logptop, cls = get_probability_index(cat1, probb, distmu, distsigma, distnorm, pixarea, nside)\n #edit catalog\n \n index = Column(name='index',data=np.arange(len(cattop)))\n logprob = Column(name='LogProb',data=logptop)\n exptime = Column(name='exptime',data=60*20*np.ones(len(cattop)))\n contour = Column(name='contour',data = cls)\n Nvis = Column(name='Nvis',data=np.ones(len(cattop)))\n cattop.add_columns([index,logprob,exptime,Nvis,contour])\n ascii.write(cattop['index','RAJ2000','DEJ2000','exptime','Nvis','LogProb','contour'], 'Test_Found_Galaxies_list.dat', overwrite=True)\n \n print(\"read in galaxies: \"+str(cat1))\n print(\"ordered galaxies by probability: \"+str(cattop))\n plt.scatter(cattop['RAJ2000'][:50], cattop['DEJ2000'][:50])\n plt.savefig(\"top 50 galaxies in list.pdf\")\n \n return cattop,logptop\n\n\ncattop, logptop = get_galaxy_list()\nmincontour = get_LST(savedir = '',targf = 'Test_Found_Galaxies_list.dat')\nmake_phaseii(lstfile = 'LSTs_Found.out', savedir = '')\n","repo_name":"sky5265/LIGHETR_Alert_System","sub_path":"Final Directory/read_galaxy_list.py","file_name":"read_galaxy_list.py","file_ext":"py","file_size_in_byte":4591,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12580542054","text":"# -*- coding: utf-8 -*-\nfrom django.test import TestCase\n\nfrom yosim.rules.models import Rule, RuleLevel\nfrom yosim.utilities.collector import XMLCollector\nfrom yosim.utilities.utilities import (\n get_default_ossec_params, get_absolute_path\n)\n\nRULES_COUNT = 0\nRULE_LEVELS = [\n {\n 'number': 0,\n 'common_name': 'Ignored',\n 'description': 'No action taken. Used to avoid false positives. These \\\n rules are scanned before all the others. They include events with no \\\n security relevance.',\n },\n {\n 'number': 1,\n 'common_name': 'None',\n 'description': 'None',\n },\n {\n 'number': 2,\n 'common_name': 'System low priority notification',\n 'description': 'System notification or status messages. They have \\\n no security relevance.',\n },\n {\n 'number': 3,\n 'common_name': 'Successful/Authorized events',\n 'description': 'They include successful login attempts, firewall \\\n allow events, etc.',\n },\n {\n 'number': 4,\n 'common_name': 'System low priority error',\n 'description': 'Errors related to bad configurations or unused \\\n devices/applications. They have no security relevance and are \\\n usually caused by default installations or software testing.',\n },\n {\n 'number': 5,\n 'common_name': 'User generated error',\n 'description': 'They include missed passwords, denied actions, \\\n etc. By itself they have no security relevance.',\n },\n {\n 'number': 6,\n 'common_name': 'Low relevance attack',\n 'description': 'They indicate a worm or a virus that have no \\\n affect to the system (like code red for apache servers, etc). \\\n They also include frequently IDS events and frequently errors.',\n },\n {\n 'number': 7,\n 'common_name': '\"Bad word\" matching',\n 'description': 'They include words like “bad”, “error”, etc. \\\n These events are most of the time unclassified and may have \\\n some security relevance.',\n },\n {\n 'number': 8,\n 'common_name': 'First time seen',\n 'description': 'Include first time seen events. First time an \\\n IDS event is fired or the first time an user logged in. If you \\\n just started using OSSEC HIDS these messages will probably be \\\n frequently. After a while they should go away, It also includes \\\n security relevant actions (like the starting of a sniffer or \\\n something like that).',\n },\n {\n 'number': 9,\n 'common_name': 'Error from invalid source',\n 'description': 'Include attempts to login as an unknown user \\\n or from an invalid source. May have security relevance (specially \\\n if repeated). They also include errors regarding the “admin” \\\n (root) account.',\n },\n {\n 'number': 10,\n 'common_name': 'Multiple user generated errors',\n 'description': 'They include multiple bad passwords, multiple \\\n failed logins, etc. They may indicate an attack or may just be \\\n that a user just forgot his credencials.',\n },\n {\n 'number': 11,\n 'common_name': 'Integrity checking warning',\n 'description': 'They include messages regarding the modification \\\n of binaries or the presence of rootkits (by rootcheck). If you \\\n just modified your system configuration you should be fine \\\n regarding the “syscheck” messages. They may indicate a \\successful \\\n attack. Also included IDS events that will be ignored (high number \\\n of repetitions).',\n },\n {\n 'number': 12,\n 'common_name': 'High importancy event',\n 'description': 'They include error or warning messages from \\\n the system, kernel, etc. They may indicate an attack against \\\n a specific application.',\n },\n {\n 'number': 13,\n 'common_name': 'Unusual error (high importance)',\n 'description': 'Most of the times it matches a common attack pattern.',\n },\n {\n 'number': 14,\n 'common_name': 'High importance security event',\n 'description': 'Most of the times done with correlation and it \\\n indicates an attack.',\n },\n {\n 'number': 15,\n 'common_name': 'Severe attack',\n 'description': 'No chances of false positives. Immediate \\\n attention is necessary.',\n },\n]\n\n\nclass TestRules(TestCase):\n def filter_rules(self, xmlparse):\n global RULES_COUNT\n # get var tag and value\n defined_vars = {}\n for var in xmlparse.iter('var'):\n defined_vars[var.attrib['name']] = var.text\n\n # loop each group\n for group in xmlparse.iter('group'):\n # if check group has attr\n if group.attrib:\n categories = group.attrib['name'].split(',')\n categories = [category.strip()\n for category in categories if category]\n # loop each rule\n for rule in group.iter('rule'):\n arule = {}\n arule['category'] = \", \".join(categories)\n\n # get rule attributes\n for key, value in rule.attrib.items():\n arule[key] = value\n\n # change id key\n arule['rule_id'] = arule.pop('id')\n\n if 'ignore' in arule:\n arule['ignore_attr'] = arule.pop('ignore')\n\n for child in rule:\n if child.tag in arule:\n arule[child.tag] = arule[child.tag]\n + \"| \" + child.text\n else:\n arule[child.tag] = child.text\n\n # format group\n if 'group' in arule:\n groups = arule['group'].split(',')\n groups = [group.strip() for group in groups if group]\n arule['group'] = \", \".join(groups)\n\n # convert None type to \"Yes\"\n for key, value in arule.items():\n if value is None:\n arule[key] = \"Yes\"\n\n # change list key\n if 'list' in arule:\n arule['rule_list'] = arule.pop('list')\n\n # change nested id key\n if 'id' in arule:\n arule['regex_id'] = arule.pop('id')\n\n # get all fields in Rule\n fields = [f.name for f in Rule._meta.get_fields()]\n\n # avoid dictionary changed size during iteration\n arule['unknown'] = ''\n # push all unknown tag to unknown field\n for key, value in arule.items():\n if key not in fields:\n unknown_data = arule['unknown']\n + key + \": \" + value + \"| \"\n arule['unknown'] = unknown_data.rstrip(\"| \")\n\n if defined_vars:\n for key, value in arule.items():\n for name, text in defined_vars.items():\n var_name = \"$\" + name\n if value == var_name:\n arule[key] = text\n\n RULES_COUNT += 1\n # get rule level object\n arule['level'] = RuleLevel.objects.get(\n number=arule['level'])\n # create rule object\n rule, created = Rule.objects.update_or_create(**arule)\n print(\"Collect rule id: {}\".format(rule.rule_id))\n self.assertTrue(rule)\n\n def test_01_get_rules(self):\n print('Collecting rule levels....')\n for RULE_LEVEL in RULE_LEVELS:\n rule_level, created = RuleLevel.objects.update_or_create(\n **RULE_LEVEL)\n print(\"Collect rule level id: {}\".format(rule_level.number))\n self.assertTrue(rule_level)\n\n print(\"Collecting rules....\")\n params = get_default_ossec_params()\n rule_dir = get_absolute_path(params['DIRECTORY'], 'rules')\n xmlcollector = XMLCollector(rule_dir)\n xmlparses = xmlcollector.get_parsed_xml_files()\n\n for xmlparse in xmlparses:\n self.filter_rules(xmlparse)\n\n print(\"Total collected rules: {}\".format(RULES_COUNT))\n","repo_name":"thoongnv/yosim","sub_path":"yosim/rules/tests/test_get_rules.py","file_name":"test_get_rules.py","file_ext":"py","file_size_in_byte":8653,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"27281429796","text":"from random import randint\r\nfrom tkinter import *\r\nfrom PIL import Image, ImageTk\r\n\r\nroot = Tk()\r\nroot.title(\"Rock Scissor Paper\")\r\nroot.configure(background=\"green\")\r\n\r\nrock_img = ImageTk.PhotoImage(Image.open(\"C:\\\\Users\\\\Khushali\\\\Downloads\\\\com_stone.jpeg\"))\r\nsci_img = ImageTk.PhotoImage(Image.open(\"C:\\\\Users\\\\Khushali\\\\Downloads\\\\com_sci.jpeg\"))\r\npaper_img = ImageTk.PhotoImage(Image.open(\"C:\\\\Users\\\\Khushali\\\\Downloads\\\\com_paper.jpeg\"))\r\nrocku = ImageTk.PhotoImage(Image.open(\"C:\\\\Users\\\\Khushali\\\\Downloads\\\\user_stone.jpeg\"))\r\nsciu = ImageTk.PhotoImage(Image.open(\"C:\\\\Users\\\\Khushali\\\\Downloads\\\\user_sci.jpeg\"))\r\npaperu = ImageTk.PhotoImage(Image.open(\"C:\\\\Users\\\\Khushali\\\\Downloads\\\\user_paper.jpeg\"))\r\n\r\n\r\ncom_label = Label(root, image=sci_img,bg=\"green\")\r\nuser_label = Label(root, image=sciu,bg=\"green\")\r\n\r\ncom_label.grid(row=1, column=0)\r\nuser_label.grid(row=1, column=12)\r\n\r\n#scores..\r\nplayerscore = Label(root, text=0, font=100, bg=\"green\", fg=\"white\")\r\ncomputerscoe= Label(root, text=0, font=100, bg=\"green\", fg=\"white\")\r\ncomputerscoe.grid(row=1 ,column=1)\r\nplayerscore.grid(row=1 ,column=3)\r\n\r\n#indicator\r\nuser_indicator = Label(root, font=50, text=\"USER\", bg=\"green\", fg=\"white\")\r\ncomp_indicator = Label(root,font=50,text=\"COMPUTER\", bg=\"green\", fg=\"white\")\r\nuser_indicator.grid(row=0, column=3)\r\ncomp_indicator.grid(row=0, column=1)\r\n\r\n#messages\r\nmsg = Label(root, font=50, bg=\"green\", fg=\"white\",text=\"You Loose\")\r\nmsg.grid(row=3, column=2)\r\n\r\n#update messages\r\ndef updatemessage(x):\r\n msg['text']=x\r\n\r\n#update user score\r\ndef updateuserscore():\r\n score = int(playerscore[\"text\"])\r\n score += 1\r\n playerscore[\"text\"] = str(score)\r\n\r\n#update computer score\r\ndef updatecomscore():\r\n score = int(computerscoe[\"text\"])\r\n score += 1\r\n computerscoe[\"text\"] = str(score)\r\n\r\n#check winner\r\ndef checkwin(player, computer):\r\n if player == computer:\r\n updatemessage(\"It is tie!!!\")\r\n elif player == \"rock\":\r\n if computer == \"paper\":\r\n updatemessage(\"You loose\")\r\n updatecomscore()\r\n else:\r\n updatemessage(\"You win\")\r\n updateuserscore()\r\n elif player == \"paper\":\r\n if computer == \"scissor\":\r\n updatemessage(\"You loose\")\r\n updatecomscore()\r\n else:\r\n updatemessage(\"You win\")\r\n updateuserscore()\r\n elif player == \"scissor\":\r\n if computer == \"rock\":\r\n updatemessage(\"You loose\")\r\n updatecomscore()\r\n else:\r\n updatemessage(\"You win\")\r\n updateuserscore()\r\n else:\r\n pass\r\n\r\n\r\n#update choices\r\n\r\nchoices = [\"rock\", \"paper\", \"scissor\"]\r\ndef updatechoices(x):\r\n\r\n #computer int random\r\n compchoices = choices[randint(0, 2)]\r\n if compchoices == \"rock\":\r\n com_label.configure(image = rock_img)\r\n elif compchoices == \"scissor\":\r\n com_label.configure(image = sci_img)\r\n else:\r\n com_label.configure(image = paper_img)\r\n\r\n\r\n#user\r\n if x==\"rock\":\r\n user_label.configure(image = rocku)\r\n elif x==\"paper\":\r\n user_label.configure(image = paperu)\r\n else:\r\n user_label.configure(image = sciu)\r\n\r\n checkwin(x,compchoices)\r\n\r\n#button\r\n\r\nrock = Button(root ,width=20, height=2 ,text=\"ROCK\" , bg=\"purple\", fg=\"white\", command=lambda : updatechoices(\"rock\")).grid(row=2, column=1)\r\npaper = Button(root ,width=20, height=2 ,text=\"PAPER\" , bg=\"blue\", fg=\"white\", command=lambda : updatechoices(\"paper\")).grid(row=2, column=2)\r\nscissor = Button(root ,width=20, height=2 ,text=\"SCISSOR\" , bg=\"red\", fg=\"white\", command=lambda : updatechoices(\"scissor\")).grid(row=2, column=3)\r\n\r\nroot.mainloop()","repo_name":"khushalishah21/rock-paper-scissor-game","sub_path":"game1.py","file_name":"game1.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24879695494","text":"x=float(input(\"escribe un numero\"))\ny=float(input(\"escribe otro numero\"))\nz=float(input(\"escribe un ultimo numero\"))\ndef media_aritmetica(x,y,z):\n return(x+y+z)/3\nprint(f'la media aritmetica de los numeros dados es {media_aritmetica(x,y,z)}')\n\n\ndef media(lista):\n suma = 0\n for i in range(len(lista)):\n suma += lista[i]\n return suma / len(lista)\n\ndef crearlista():\n n = int(input('Tamaño de nuestra lista'))\n lista = []\n print(f'Introduce {n} números a la lista:')\n for i in range(n):\n lista.append(float(input(f'{i+1} - ')))\n return lista\nlista = crearlista()\nprint(lista)\nprint(f'La media es {media(lista)}')\n\ndef mediaponderada(lista, ponderadas):\n lista_final = []\n suma1=0\n suma2=0\n for i in range(len(lista)):\n lista_final.append(lista[i]*ponderadas[i])\n for i in range(len(lista_final)):\n suma1 += lista_final[i]\n for i in range(len(ponderadas)):\n suma2 += ponderadas[i]\n return suma1/suma2\n\nopcion = input('Si desea crear una nueva lista pulse s')\nif opcion == 's':\n crearlista()\nponderadas = []\nprint('Ahora vamos a introducir los coeficientes de ponderación de cada valor')\nfor i in range(len(lista)):\n ponderadas.append(float(input(f'{i+1} - El coeficiente de ponderación de {lista[i]} es'))) \n\nprint(f'Nuestra media ponderada es {mediaponderada(lista, ponderadas)}')","repo_name":"Javifdz12/intro_algoritmos","sub_path":"clases/ejercicio9.py","file_name":"ejercicio9.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"12631810702","text":"import argparse\nimport os\nimport pytorch_lightning as pl\nfrom models.trainer import MultimodalVAE\nfrom models.config_cls import Config\nfrom pytorch_lightning.loggers import CSVLogger\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom models.dataloader import DataModule\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.profiler import SimpleProfiler\nfrom pickle import dumps\n\ndef identity(string):\n return string\n\nparser = argparse.ArgumentParser()\nparser.register('type', None, identity)\n_ = dumps(parser)\nparser.add_argument(\"-c\", \"--cfg\", help=\"Specify config file\", metavar=\"FILE\")\nparser.add_argument('-p', '--precision', type=str, default=32,\n help='Value for mixed precision training. Allowed values: 64, 32, 16, bf16')\nparser.add_argument('--viz_freq', type=int, default=None,\n help='frequency of visualization savings (number of iterations)')\nparser.add_argument('--batch_size', type=int, default=None,\n help='Size of the training batch')\nparser.add_argument('--obj', type=str, metavar='O', default=None,\n help='objective to use (moe_elbo/poe_elbo_semi)')\nparser.add_argument('--loss', type=str, metavar='O', default=None,\n help='loss to use (lprob/bce)')\nparser.add_argument('--n_latents', type=int, default=None,\n help='latent vector dimensionality')\nparser.add_argument('--pre_trained', type=str, default=None,\n help='path to pre-trained model (train from scratch if empty)')\nparser.add_argument('--seed', type=int, metavar='S', default=None,\n help='seed number')\nparser.add_argument('--exp_name', type=str, default=None,\n help='name of folder')\nparser.add_argument('--optimizer', type=str, default=None,\n help='optimizer')\n\ndef main(config):\n pl.seed_everything(config.seed)\n data_module = DataModule(config)\n model_wrapped = MultimodalVAE(config, data_module.get_dataset_class().feature_dims)\n profiler = SimpleProfiler(dirpath=os.path.join(config.mPath, \"model\"), filename=\"profiler_output\")\n checkpoint_callback = ModelCheckpoint(dirpath=os.path.join(config.mPath, \"model\"), save_last=True, save_top_k=1, mode=\"min\")\n logger2 = CSVLogger(save_dir=config.mPath, name=\"metrics\", flush_logs_every_n_steps=1, version=\"csv\")\n logger1 = TensorBoardLogger(config.mPath, name=\"metrics\", log_graph=True, version=\"tensorboard\")\n trainer_kwargs = {\"profiler\": profiler, \"accelerator\":\"gpu\",\n \"default_root_dir\": config.mPath, \"max_epochs\": config.epochs, \"check_val_every_n_epoch\": 1,\n \"callbacks\": [checkpoint_callback], \"logger\":[logger1, logger2], \"precision\":args.precision}\n pl_trainer = pl.Trainer(**trainer_kwargs)\n pl_trainer.fit(model_wrapped, datamodule=data_module)\n pl_trainer.test(ckpt_path=\"best\", datamodule=data_module)\n\nif __name__ == '__main__':\n args = parser.parse_args()\n config = Config(parser)\n if config.iterseeds > 1: # iterate over number of seeds defined in iterseeds\n for seed in range(config.iterseeds):\n if seed > 0: # after first training we need to make new path for the new model\n config = Config(parser)\n config.change_seed(config.seed+seed)\n config.dump_config()\n main(config)\n else:\n main(config)\n","repo_name":"gabinsane/multimodal-vae-comparison","sub_path":"multimodal_compare/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"48"} +{"seq_id":"6935267704","text":"import pygame\nfrom pygame.sprite import Sprite\n\nclass Ship(Sprite):\n def __init__(self, settings, screen):\n self.screen = screen\n self.settings = settings\n super(Ship, self).__init__()\n\n #Load the ship image\n self.image = pygame.image.load(\"Untitled1-1.bmp\")\n self.rect = self.image.get_rect()\n self.screen_rect = self.screen.get_rect()\n\n #make ship at the bottom center of screen\n self.rect.centerx = self.screen_rect.centerx\n self.rect.bottom = self.screen_rect.bottom\n\n # Store a decimal value for the ship's center\n self.center = float(self.rect.centerx)\n\n #Movement flag\n self.moving_right = False\n self.moving_left = False\n\n def update(self):\n \"\"\"to update ship's postition based n movement flags\"\"\"\n #Update ship's center value, not the rect\n if self.moving_right and self.rect.right < self.screen_rect.right:\n # self.rect.centerx += 1\n self.center += self.settings.ship_speed_factor\n if self.moving_left and self.rect.left > 0:\n # self.rect.centerx -= 1\n self.center -= self.settings.ship_speed_factor\n #update rect object from self.center\n self.rect.centerx = self.center\n\n def blitme(self):\n self.screen.blit(self.image, self.rect)\n\n def center_ship(self):\n self.center = self.screen_rect.centerx\n","repo_name":"cindyclarestabasrie/Alien-Invasion","sub_path":"Pythonpygame/ship.py","file_name":"ship.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42597501624","text":"from snapthat.database.services.service_provider import database_service, DatabaseServiceKeys\nfrom snapthat.database.models.brand_cloth import BrandClothModel\n\nservice = database_service.get(DatabaseServiceKeys.BrandCloth)\nmodel = BrandClothModel()\n\nres = service.count({})\nprint(res)\n\n\nbrand_service = database_service.get(DatabaseServiceKeys.Brand)\nbrands = brand_service.find({\"brand_name\": \"hello\"})\nbrand_id = str(brands[0][\"_id\"])\nbrand_name = brands[0][\"brand_name\"]\n\n\nmodel.brand_id = brand_id\nmodel.brand_name = brand_name\nmodel.thumbnail = \"nelly/images/0/0_0.jpg\"\nmodel.gender = \"female\"\nmodel.title = \"some random clothing item\"\nmodel.price = 5000\n\n\nmodel.prodId=\"8911\"\nmodel.price = 3000\n\nbrand_cloth_id = service.add(model)\nprint(f\"Brand Cloth id {brand_cloth_id}\")\n\n\nres = service.find({\"_id\": brand_cloth_id})\nprint(res)\n\n\nres = service.find({},skip=0, limit=1000, sort=[('price' , 1)])\nprint(len(res))\nprint([i[\"price\"] for i in res])\n","repo_name":"mobinalhassan/zalando_fyp","sub_path":"commonutils/snapthat/examples/database/brand_cloth.py","file_name":"brand_cloth.py","file_ext":"py","file_size_in_byte":952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70615792145","text":"import json\nimport copy\nimport re\nfrom global_config import *\n\n\nclass KG(object):\n def __init__(self):\n self.graph = {}\n self.entities = {}\n self.list_0 = ('主题曲', '父亲', '母亲', '注册资本', '气候', '朝代', '面积', '总部地点',\n '修业年限', '所在城市', '改编自', '成立日期', '海拔', '国籍', '祖籍', '专业代码', '邮政编码',\n '首都')\n self.list_0_meanless = ('票房', '人口数量', '占地面积') # 涉及到数量train data准确率不高\n self.special_list = {'妻子'} # '丈夫'\n self.conflict = 0\n\n def generate_kg(self, file):\n train_data = json.load(open(file, 'r', encoding='utf-8'))\n for dd in train_data:\n for sspo in dd['spo_list']:\n self.add_node(sspo)\n\n def add_node(self, spo):\n if spo['subject_type'] in self.entities.keys():\n self.entities[spo['subject_type']].append(spo['subject'])\n else:\n self.entities[spo['subject_type']] = [spo['subject']]\n for key in spo['object'].keys():\n if spo['object_type'][key] in self.entities.keys():\n self.entities[spo['object_type'][key]].append(spo['object'][key])\n else:\n self.entities[spo['object_type'][key]] = [spo['object'][key]]\n\n if spo['predicate'] in self.list_0:\n if spo['subject'] in self.graph.keys():\n if spo['predicate'] in self.graph[spo['subject']].keys():\n if spo['object']['@value'] != self.graph[spo['subject']][spo['predicate']]:\n self.conflict += 1\n print('conflict!!new:{}, exist:{}'.format(spo['object']['@value'],\n self.graph[spo['subject']][spo['predicate']]))\n else:\n self.graph[spo['subject']][spo['predicate']] = spo['object']['@value']\n else:\n self.graph[spo['subject']] = {spo['predicate']: spo['object']['@value']}\n else:\n if spo['subject'] in self.graph.keys():\n if spo['predicate'] in self.graph[spo['subject']].keys():\n if spo['object']['@value'] not in self.graph[spo['subject']][spo['predicate']]:\n self.graph[spo['subject']][spo['predicate']].append(spo['object']['@value'])\n else:\n self.graph[spo['subject']][spo['predicate']] = [spo['object']['@value']]\n else:\n self.graph[spo['subject']] = {spo['predicate']: [spo['object']['@value']]}\n\n def check_spo(self, text, spo):\n def is_emperor(string):\n a = ['宗', '帝', '曹操', '祖', '皇']\n for aa in a:\n if string.find(aa) >= 0:\n return True\n else:\n return False\n\n error_flag = False\n predicate = spo['predicate']\n new_spos = [spo]\n if predicate in self.list_0 and spo['subject'] in self.graph.keys() and predicate in self.graph[\n spo['subject']].keys():\n spo_value = spo['object']['@value']\n kg_value = self.graph[spo['subject']][spo['predicate']]\n if spo_value != kg_value:\n if text.find(kg_value) >= 0:\n if spo['predicate'] in ('成立日期', '上映时间'):\n spo['object']['@value'] = spo_value if len(spo_value) >= len(kg_value) else kg_value\n else:\n # print(spo, self.graph[spo['subject']][spo['predicate']])\n spo['object']['@value'] = self.graph[spo['subject']][spo['predicate']]\n # error_flag = True\n elif predicate in self.special_list and spo['subject'] in self.graph.keys() and \\\n predicate in self.graph[spo['subject']].keys():\n spo_value = spo['object']['@value']\n kg_value = self.graph[spo['subject']][spo['predicate']]\n if is_emperor(spo['subject']):\n for kg_obj in kg_value:\n if spo_value.find(kg_obj) >= 0:\n spo['object']['@value'] = kg_obj\n elif text.find(kg_obj) >= 0:\n new_spo = copy.deepcopy(spo)\n new_spo['object']['@value'] = kg_obj\n # print('add', new_spo)\n new_spos.append(new_spo)\n error_flag = True\n elif len(kg_value) == 1 and text.find(kg_value[0]) >= 0:\n spo['object']['@value'] = kg_value[0]\n return error_flag, new_spos\n\n def load_kg(self, file):\n fp = open(file, 'r', encoding='utf-8')\n self.graph, self.entities = json.load(fp)\n\n def dump_kg(self, outfile):\n fp = open(outfile, 'w+', encoding='utf-8')\n json.dump([self.graph, self.entities], fp, ensure_ascii=False)\n\n\nclass Node(object):\n def __init__(self, predicate, node_name, is_sbj):\n self.name = node_name\n self.in_edges = {}\n self.out_edges = {}\n self.gender = 'unknown'\n self.professions = []\n self.fictional = False\n self.add_properity(predicate, is_sbj)\n\n def __str__(self):\n node_info = {'name': self.name,\n 'gender': self.gender,\n 'professions': self.professions,\n 'fictional': self.fictional,\n 'in': dict((rlt, [node.name for node in nodes]) for rlt, nodes in self.in_edges.items()),\n 'out': dict((rlt, [node.name for node in nodes]) for rlt, nodes in self.out_edges.items()),\n }\n return str(node_info)\n\n def add_properity(self, predicate, is_sbj=True):\n def set_gender_(gender):\n if self.gender == 'unknown' or self.gender == gender:\n self.gender = gender\n else:\n self.gender = 'error'\n\n if is_sbj:\n if predicate == '妻子':\n set_gender_('male')\n if predicate in ('配音', '饰演'):\n self.professions.append(predicate)\n else:\n if predicate == '父亲':\n set_gender_('male')\n if predicate in ('妻子', '母亲'):\n set_gender_('female')\n if predicate in ('作者', '编剧', '歌手', '制片人', '作词', '导演', '作曲', '主演', '主持人'):\n self.professions.append(predicate)\n if predicate in ('主角', '配音', '角色', '饰演'):\n self.fictional = True\n\n def add_out_edge(self, edge_type, obj_node):\n if edge_type not in self.out_edges.keys():\n self.out_edges[edge_type] = [obj_node]\n elif obj_node not in self.out_edges[edge_type]:\n self.out_edges[edge_type].append(obj_node)\n else:\n return\n obj_node.add_in_edge(edge_type, self)\n\n def add_in_edge(self, edge_type, sbj_node):\n if edge_type not in self.in_edges.keys():\n self.in_edges[edge_type] = [sbj_node]\n elif sbj_node not in self.in_edges[edge_type]:\n self.in_edges[edge_type].append(sbj_node)\n\n def del_out_edge(self, edge_type, obj_node):\n if edge_type not in self.out_edges.keys():\n print('kg error')\n elif obj_node not in self.out_edges[edge_type]:\n print('kg error')\n else:\n del (self.out_edges[edge_type][self.out_edges[edge_type].index(obj_node)])\n if len(self.out_edges[edge_type]) == 0:\n del (self.out_edges[edge_type])\n obj_node.del_in_edge(edge_type, self)\n\n def del_in_edge(self, edge_type, sbj_node):\n if edge_type not in self.in_edges.keys():\n print('error')\n elif sbj_node not in self.in_edges[edge_type]:\n print('error')\n else:\n del (self.in_edges[edge_type][self.in_edges[edge_type].index(sbj_node)])\n if len(self.in_edges[edge_type]) == 0:\n del (self.in_edges[edge_type])\n\n def check_out_edge(self, edge_type, obj_node):\n if edge_type in self.out_edges.keys():\n if obj_node in self.out_edges[edge_type]:\n return 'CORRECT'\n return 'UNKNOWN'\n\n\nclass KnowledgeGraph(object):\n def __init__(self):\n self.graph = {}\n self.uniques = ('父亲', '母亲', '改编自', '国籍', '祖籍', '专业代码', '邮政编码', '妻子')\n self.conflicts = (('妻子', '母亲', '父亲'), ('嘉宾', '主持人'), ('主角', '主演'), ('角色', '主演'), ('作者', '主角'))\n self.conflicts_1 = ('歌手', '作词', '作曲')\n def add_spo(self, spo):\n def get_node(predicate, entity, is_sbj=True):\n if entity not in self.graph.keys():\n new_node = Node(predicate, entity, is_sbj)\n self.graph[entity] = new_node\n else:\n self.graph[entity].add_properity(predicate, is_sbj)\n return self.graph[entity]\n\n sbj_node = get_node(spo['predicate'], spo['subject'], is_sbj=True)\n obj_node = get_node(spo['predicate'], spo['object']['@value'], is_sbj=False)\n sbj_node.add_out_edge(spo['predicate'], obj_node)\n\n def load_graph_from_raw_data(self, files):\n for filename in files:\n fp = open(filename, 'r', encoding='utf-8')\n data = json.load(fp)\n for d in data:\n for spo in d['spo_list']:\n self.add_spo(spo)\n fp.close()\n\n def load_graph_from_spos(self, spos):\n for spo in spos:\n self.add_spo(spo)\n\n def check_spo(self, text, spos):\n def correct_rlt(spo, sbj_node, obj_node):\n for conflict_pair in self.conflicts:\n if spo['predicate'] in conflict_pair:\n for c_rlt in conflict_pair:\n if c_rlt != spo['predicate']:\n if c_rlt in sbj_node.out_edges.keys() and obj_node in sbj_node.out_edges[c_rlt]:\n # print(sbj, rlt, obj, '-->', sbj, c_rlt, obj)\n if c_rlt != '嘉宾':\n spo['predicate'] = c_rlt\n spo = correct_type(spo)\n if spo['predicate'] in self.conflicts_1:\n if spo['predicate'] not in obj_node.professions:\n for p in self.conflicts_1:\n if obj_node.professions.count(p) > 30:\n spo['predicate'] = p\n break\n return spo\n\n def correct_entity(spo):\n new_temp_spos = []\n sbj, rlt, obj = spo['subject'], spo['predicate'], spo['object']['@value']\n if spo['predicate'] in self.uniques:\n if rlt in sbj_node.out_edges.keys() and len(sbj_node.out_edges[rlt]) >= 1:\n for oobj in sbj_node.out_edges[rlt]:\n if len(oobj.name) > 1 and text.find(oobj.name) != -1:\n if rlt != '妻子':\n spo['object']['@value'] = oobj.name if oobj.name not in obj else obj\n # print(sbj, rlt, obj, '-->', spo['object']['@value'])\n break\n else:\n new_spo = copy.deepcopy(spo)\n new_spo['object']['@value'] = oobj.name if oobj.name not in obj else obj\n if new_spo not in new_temp_spos:\n new_temp_spos.append(new_spo)\n if spo not in new_temp_spos and obj_node.in_edges.get('妻子') is None and len(\n sbj_node.out_edges[rlt]) > 1:\n new_temp_spos.append(spo)\n if not new_temp_spos:\n new_temp_spos.append(spo)\n return new_temp_spos\n\n new_spos = []\n for spo in spos:\n sbj, rlt, obj = spo['subject'], spo['predicate'], spo['object']['@value']\n sbj_node = self.graph.get(sbj)\n obj_node = self.graph.get(obj)\n if sbj_node and obj_node and sbj_node.check_out_edge(rlt, obj_node) == 'UNKNOWN':\n spo = correct_rlt(spo, sbj_node, obj_node)\n new_spos.extend(correct_entity(spo))\n else:\n new_spos.append(spo)\n spos = []\n for spo in new_spos:\n if spo not in spos:\n spos.append(spo)\n return spos\n\n def find_all_possible_spos(self, text, entities):\n spos = []\n for e in entities:\n if e in self.graph.keys():\n for edge_type in self.graph[e].in_edges.keys():\n for node in self.graph[e].in_edges[edge_type]:\n if node.name in entities:\n spos.append([node.name, edge_type, e])\n for edge_type in self.graph[e].out_edges.keys():\n for node in self.graph[e].out_edges[edge_type]:\n if node.name in entities:\n spos.append([e, edge_type, node.name])\n return spos\n\n def fix_spo(self, text, spo, force_flag=None):\n sbj = spo['subject']\n rlt = spo['predicate']\n obj = spo['object']['@value']\n sbj_node = self.graph.get(sbj)\n obj_node = self.graph.get(obj)\n spos = []\n if sbj_node is not None and obj_node is None and (\n force_flag == 'OBJ' or spo['object_type']['@value'] in ('人物', '历史人物', '娱乐人物')):\n kg_obj_nodes = sbj_node.out_edges.get(rlt)\n if kg_obj_nodes:\n for oobj in kg_obj_nodes:\n if oobj.name in obj and len(oobj.name) > 1 and (\n force_flag == 'OBJ' or (len(obj) > 4 and obj.find('·') == -1 and have_chinese(obj))):\n new_spo = copy.deepcopy(spo)\n new_spo['object']['@value'] = oobj.name\n if new_spo not in spos:\n # print(sbj, rlt, obj, ' --> ', oobj.name)\n spos.append(new_spo)\n\n if sbj_node is None and obj_node is not None and (\n force_flag == 'SBJ' or spo['subject_type'] in ('人物', '历史人物', '娱乐人物')):\n kg_sbj_nodes = obj_node.in_edges.get(rlt)\n if kg_sbj_nodes:\n for ssbj in kg_sbj_nodes:\n if ssbj.name in sbj and len(ssbj.name) > 1 and (\n force_flag == 'SBJ' or (len(sbj) > 4 and sbj.find('·') == -1 and have_chinese(sbj))):\n new_spo = copy.deepcopy(spo)\n new_spo['subject'] = ssbj.name\n if new_spo not in spos:\n # print(sbj, rlt, obj, ' --> ', ssbj.name)\n spos.append(new_spo)\n if len(spos) == 0:\n spos = [spo]\n return spos\n\n def self_check(self):\n global std_kg\n\n def correct_rlt_(sbj_node, obj_node):\n rlt_list = []\n for rlt in sbj_node.out_edges.keys():\n if obj_node in sbj_node.out_edges[rlt]:\n rlt_list.append(rlt)\n for conflict_pair in self.conflicts:\n conflict_list = []\n for rlt in conflict_pair:\n if rlt in rlt_list:\n conflict_list.append(rlt)\n if len(conflict_list) == 2:\n # print(conflict_pair)\n # print(sbj_node)\n # print(obj_node)\n conflict_list = set(conflict_list)\n if conflict_list == set(['母亲', '父亲']):\n if obj_node.name in std_kg.graph.keys() and std_kg.graph[obj_node.name].gender == 'male':\n sbj_node.del_out_edge('母亲', obj_node)\n elif obj_node.name in std_kg.graph.keys() and std_kg.graph[obj_node.name].gender == 'female':\n sbj_node.del_out_edge('父亲', obj_node)\n elif sbj_node.name[0] == obj_node.name[0]:\n sbj_node.del_out_edge('母亲', obj_node)\n else:\n sbj_node.del_out_edge('父亲', obj_node)\n sbj_node.del_out_edge('母亲', obj_node)\n elif conflict_list == set(['主角', '主演']):\n if obj_node.name in std_kg.graph.keys() and '主演' in std_kg.graph[obj_node.name].professions:\n sbj_node.del_out_edge('主角', obj_node)\n elif obj_node.name in std_kg.graph.keys() and std_kg.graph[obj_node.name].fictional:\n sbj_node.del_out_edge('主演', obj_node)\n else:\n sbj_node.del_out_edge('主角', obj_node)\n sbj_node.del_out_edge('主演', obj_node)\n elif conflict_list == set(['角色', '主演']):\n if obj_node.name in std_kg.graph.keys() and '主演' in std_kg.graph[obj_node.name].professions:\n sbj_node.del_out_edge('角色', obj_node)\n elif obj_node.name in std_kg.graph.keys() and std_kg.graph[obj_node.name].fictional:\n sbj_node.del_out_edge('主演', obj_node)\n else:\n sbj_node.del_out_edge('角色', obj_node)\n sbj_node.del_out_edge('主演', obj_node)\n elif conflict_list == set(['作者', '主角']):\n if obj_node.name in std_kg.graph.keys() and '作者' in std_kg.graph[obj_node.name].professions:\n sbj_node.del_out_edge('主角', obj_node)\n elif obj_node.name in std_kg.graph.keys() and std_kg.graph[obj_node.name].fictional:\n sbj_node.del_out_edge('作者', obj_node)\n else:\n sbj_node.del_out_edge('作者', obj_node)\n sbj_node.del_out_edge('主角', obj_node)\n elif conflict_list == set(['嘉宾', '主持人']):\n if obj_node.name in std_kg.graph.keys() and '主持人' in std_kg.graph[obj_node.name].professions:\n sbj_node.del_out_edge('嘉宾', obj_node)\n else:\n sbj_node.del_out_edge('嘉宾', obj_node)\n sbj_node.del_out_edge('主持人', obj_node)\n # print(sbj_node)\n # print(obj_node)\n # print('\\n')\n elif len(conflict_list) > 2:\n print(111)\n\n for sbj in self.graph.keys():\n sbj_node = self.graph[sbj]\n obj_nodes = []\n for rlt in sbj_node.out_edges.keys():\n for obj_node in sbj_node.out_edges[rlt]:\n if obj_node not in obj_nodes:\n obj_nodes.append(obj_node)\n for obj_node in obj_nodes:\n correct_rlt_(sbj_node, obj_node)\n\n def generate_spos(self):\n spos = []\n for sbj_node in self.graph.values():\n for rlt in sbj_node.out_edges.keys():\n for obj_node in sbj_node.out_edges[rlt]:\n spos.append({\n 'subject_type': schemas_dict[rlt]['subject_type'],\n 'subject': sbj_node.name,\n 'predicate': rlt,\n 'object_type': {'@value': schemas_dict[rlt]['object_type']['@value']},\n 'object': {'@value': obj_node.name}\n })\n return spos\n\n def check_spo_1(self, spo):\n sbj, rlt, obj = spo['subject'], spo['predicate'], spo['object']['@value']\n sbj_node = self.graph.get(sbj)\n obj_node = self.graph.get(obj)\n if sbj_node and obj_node and sbj_node.check_out_edge(rlt, obj_node) == 'CORRECT':\n return True\n else:\n return False\n\n\n# class NewKnowledgeGraph(object):\n# def __init__(self):\n# self.graph = {}\n# self.uniques = ('父亲', '母亲', '改编自', '国籍', '祖籍', '专业代码', '邮政编码', '妻子')\n# self.conflicts = (('妻子', '母亲', '父亲'), ('嘉宾', '主持人'), ('主角', '主演'), ('角色', '主演'), ('作者', '主角'))\n# \n# def add_spo(self, spo):\n# def get_node(predicate, entity, is_sbj=True):\n# if entity not in self.graph.keys():\n# new_node = Node(predicate, entity, is_sbj)\n# self.graph[entity] = new_node\n# else:\n# self.graph[entity].add_properity(predicate, is_sbj)\n# return self.graph[entity]\n# \n# sbj_node = get_node(spo['predicate'], spo['subject'], is_sbj=True)\n# obj_node = get_node(spo['predicate'], spo['object']['@value'], is_sbj=False)\n# sbj_node.add_out_edge(spo['predicate'], obj_node)\n# \n# def copy_node(self, node, new_name):\n# new_node = copy.deepcopy(node)\n# new_node.name = new_name\n# new_node.in_edges = {}\n# new_node.out_edges = {}\n# return new_node\n# \n# def combine_node(self, node_0, node_1):\n# for rlt in node_1.out_edges.keys():\n# for obj_node in node_1.out_edges[rlt]:\n# node_0.add_out_edge(rlt, obj_node)\n# node_1.del_out_edge(rlt, obj_node)\n# for rlt in node_0.in_edges.keys():\n# for sbj_node in node_0.in_edges[rlt]:\n# sbj_node.add_out_edge(rlt, node_0)\n# sbj_node.del_out_edge(rlt, node_1)\n# return node_0\n# \n# def insert_node(self, node_0, node_1):\n# for rlt in node_0.out_edges.keys():\n# for obj_node in node_0.out_edges[rlt]:\n# obj_node.add_in_edge(rlt, node_1)\n# for rlt in node_0.in_edges.keys():\n# for sbj_node in node_0.in_edges[rlt]:\n# sbj_node.add_out_edge(rlt, node_1)\n# \n# def self_check(self, std_graph):\n# def check_(sbj, obj):\n# std_sbj_node, std_obj_node = std_graph.get_node(node.name), std_graph.get_node(obj_node.name)\n# if std_sbj_node and std_obj_node and std_sbj_node.check_out_edge(rlt, std_obj_node) == 'correct':\n# continue\n# else:\n# if std_sbj_node and std_obj_node:\n# pass\n# elif std_sbj_node and not std_obj_node:\n# pass\n# elif std_obj_node and not std_sbj_node:\n# pass\n# else:\n# pass\n# for node in self.graph.values():\n# for rlt in node.out_edges.keys():\n# for obj_node in node.out_edges[rlt]:\n# check_(node.name, obj_node.name)\n# for rlt in node.in_edges.keys():\n# for sbj_node in node.in_edges[rlt]:\n# check_(sbj_node.name, node.name)\n# \n# def split_nodes(self):\n# for node in self.graph.values()[:]:\n# nodes_name = []\n# for node_type in node.types:\n# if node_type in ('文学作品', '作品', '影视作品', '图书作品', '歌曲'):\n# nodes_name = re.split(\"》《|》、《|》,《|\\d\\d |》和《\", node.name)\n# break\n# elif node.name.find(',') == -1:\n# if node_type in ('企业', '企业/品牌', \"机构\"):\n# nodes_name = re.split(\"、|/|、\", node.name)\n# break\n# elif node_type in ('人物', '历史人物', '娱乐人物') and len(node.name) <= 40:\n# nodes_name = re.split(\"、|/\", node.name)\n# if len(nodes_name) > 1:\n# new_nodes_name = []\n# for n in nodes_name[:]:\n# if len(n) < 15:\n# new_nodes_name.append(n)\n# nodes_name = new_nodes_name\n# break\n# \n# if nodes_name:\n# for new_node_name in nodes_name:\n# new_node = self.copy_node(node, new_node_name)\n# self.insert_node(node, new_node)\n# if new_node_name in self.graph.keys():\n# self.combine_node(self.graph[new_node_name], new_node)\n# else:\n# self.graph[new_node_name] = new_node\n# \n# def correct_unknown_node(self, std_graph):\n# def is_illegal_name(node_name):\n# return True\n# \n# def correct_(sbj, rlt, obj):\n# sbj_nodes_name = []\n# obj_nodes_name = []\n# std_sbj_node, std_obj_node = std_graph.get_node(sbj), std_graph.get_node(obj)\n# if std_sbj_node and not std_obj_node:\n# sbj2obj_nodes = std_sbj_node.out_edges.get(rlt)\n# if sbj2obj_nodes:\n# for oobj in sbj2obj_nodes:\n# if oobj.name in obj and len(oobj.name) > 1 and (is_illegal_name(obj) or False):\n# obj_nodes_name.append(oobj.name)\n# elif not std_sbj_node and std_obj_node:\n# obj2sbj_nodes = std_obj_node.in_edges.get(rlt)\n# if obj2sbj_nodes:\n# for ssbj in obj2sbj_nodes:\n# if ssbj.name in sbj and len(sbj.name) > 1 and (is_illegal_name(sbj) or False):\n# sbj_nodes_name.append(ssbj.name)\n# return sbj_nodes_name, obj_nodes_name\n# \n# \n# \n# for sbj_node in self.graph.values():\n# for rlt in sbj_node.out_edges.keys():\n# for obj_node in sbj_node.out_edges[rlt]:\n# sbj, obj = sbj_node.name, obj_node.name\n# correct_(sbj, obj)\n# \n# \n# def generate_spos(self):\n# spos = []\n# for sbj_node in self.graph.values():\n# for rlt in sbj_node.out_edges.keys():\n# for obj_node in sbj_node.out_edges[rlt]:\n# spos.append({\n# 'subject_type': schemas_dict['rlt']['subject_type'],\n# 'subject': sbj_node.name,\n# 'predicate': rlt,\n# 'object_type': {'@value': schemas_dict['rlt']['object_type']['@value']},\n# 'object': {'@value': obj_node.name}\n# })\n# return spos\n\nstd_kg = KnowledgeGraph()\nstd_kg.load_graph_from_raw_data([ROOT_DATA + 'lic_2020/decomposed_dev_data.json',\n ROOT_DATA + 'lic_2020/decomposed_train_data.json'])\n\nfile_name = ROOT_DATA + 'lic_2020/my_schema.json'\nfp = open(file_name, mode='r', encoding='utf-8')\nschemas = [json.loads(d) for d in fp.readlines()]\nschemas_dict = dict([(d['predicate'], d) for d in schemas])\n\n\ndef correct_type(spo):\n spo['subject_type'] = schemas_dict[spo['predicate']]['subject_type']\n spo['object_type']['@value'] = schemas_dict[spo['predicate']]['object_type']['@value']\n return spo\n\n\ndef is_chinese(uchar):\n \"\"\"判断一个unicode是否是汉字\"\"\"\n if uchar >= u'\\u4e00' and uchar <= u'\\u9fa5':\n return True\n else:\n return False\n\n\ndef have_chinese(string):\n for s in string:\n if is_chinese(s):\n return True\n return False\n\n\nif __name__ == '__main__':\n from global_config import *\n\n train_file = ROOT_DATA + 'lic_2020/decomposed_train_data.json'\n dev_file = ROOT_DATA + 'lic_2020/decomposed_dev_data.json'\n test_file = ROOT_RESULT + 'ave3_test_joint.json'\n test_file_out = ROOT_RESULT + 'kg_test_joint.json'\n\n kg = KnowledgeGraph()\n kg.load_graph_from_raw_data([train_file, dev_file])\n # kg.self_check()\n exit()\n # kg.generate_kg(train_file)\n # kg.dump_kg(kg_file)\n # exit()\n # kg.load_kg(kg_file)\n\n count = 0\n new_data = []\n exist_num = 0\n unknown_num = 0\n unknown_num_0 = 0\n unknown_num_1 = 0\n all_num = 0\n with open(test_file, 'r', encoding='utf-8') as fp:\n raw_data = fp.readlines()\n for d in raw_data:\n d = json.loads(d)\n spos = kg.check_spo(d['text'], d['spo_list'])\n if spos != d['spo_list']:\n # print(spos)\n pass\n print(exist_num, unknown_num, unknown_num_0, all_num)\n","repo_name":"BaberMuyu/relation-extraction","sub_path":"utils/kg.py","file_name":"kg.py","file_ext":"py","file_size_in_byte":29014,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"48"} +{"seq_id":"74913916945","text":"import base64\nfrom io import BytesIO\n\nfrom aiohttp import web, ClientSession\nfrom PIL import Image\n\nfrom image_processing.handlers import ImageRotator\n\n\nrouters = web.RouteTableDef()\n\nSTORAGE_API_URL = 'http://storage_api:8080'\n\n\ndef base64_encode(data):\n return base64.b64encode(data).decode('utf-8')\n\n\ndef base64_decode(data):\n return base64.b64decode(data)\n\n\ndef check_for_madatories(req_data, params):\n message = None\n for param in params:\n if param not in req_data:\n message = \"Param '{}' is mandatory\".format(param)\n return message\n\n\ndef standard_response(success=None, message=None, data=None):\n \"\"\"Creates a standard response dict from keyword arguments.\"\"\"\n if success is None:\n success = True\n if message is None:\n message = ''\n if data is None:\n data = []\n _response = {'success': success, 'message': message, 'data': data}\n return web.json_response(_response)\n\n\n@routers.post('/rotate')\nasync def rotate_image(request):\n \"\"\"\n Rotates an image\n\n Parameters\n ----------\n image_id: UUID of image\n angle: int – In degrees counter clockwise.\n\n Returns\n -------\n success : bool\n True if successful, false if not\n message : str\n A human-readable error message\n data : dict\n The data response having the form\n ::\n {\n 'image': rotated image in base64-encoded representation\n }\n \"\"\"\n data = await request.json()\n error = check_for_madatories(data, ('image_id', 'angle'))\n if error:\n return standard_response(success=False, message=error)\n\n images_url = '{url}/images/{image_id}'.format(url=STORAGE_API_URL, image_id=data['image_id'])\n async with ClientSession() as session:\n async with session.get(images_url) as resp:\n resp_data = await resp.json()\n\n if not resp_data['success']:\n return standard_response(success=False, message=resp_data['message'])\n\n image_data = resp_data['data']['data']\n rotator = ImageRotator(data=base64_decode(image_data), format=resp_data['data']['format'])\n image = rotator.rotate(angle=data['angle'])\n if not image:\n return standard_response(success=False, message='Rotation error occurred')\n\n return standard_response(data={'image': base64_encode(image)})\n\n\nif __name__ == '__main__':\n app = web.Application()\n app.add_routes(routers)\n web.run_app(app, port=8081)\n","repo_name":"discort/ProgImage","sub_path":"services/rotation/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1738496989","text":"\nfrom django.contrib.auth import views as auth_views\nfrom django.urls import path, re_path\nfrom django_app import views\n\nfrom django_app.views import Mylist\n\nfrom rest_framework_simplejwt.views import (\n TokenObtainPairView,\n TokenRefreshView,\n)\n\nurlpatterns = [\n path('', views.index, name=\"index\"),\n\n path('token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),\n path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),\n\n \n\n re_path(route=r'^users/$', view=views.users, name=\"users\"),\n\n re_path(route=r'^chat/(?P\\d+)/$', view=views.chat, name=\"chat_id\"),\n re_path(route=r'^chat/$', view=views.chat, name=\"chat\"),\n\n re_path(route=r'^weather/$', view=views.weather, name=\"weather\"),\n re_path(route=r'^weather/(?P\\d+)/$', view=views.weather, name=\"weather_id\"),\n\n re_path(route=r'^icecream/(?P\\d+)/$', view=views.icecream, ),\n re_path(route=r'^icecream/$', view=views.icecream, ),\n \n\n re_path(route=r'^commenticecream/(?P\\d+)/$', view=views.comment_icecream),\n\n re_path(route=r'^jsonplaceholder/$', view=views.jsonplaceholder),\n\n path('registration/', views.registration),\n\n path('mylistdj//', Mylist.as_view()),\n path('mylistdj/', Mylist.as_view()),\n\n re_path(route=r'^sendemail/$', view=views.sendingemail, name=\"sendingemail\"),\n\n path('download_img/', view=views.download_img, name='download_img'),\n\n path('seleryredis/', view=views.seleryredis, name='seleryredis'),\n\n\n \n\n # path('frontpage/', views.frontpage, name='frontpage'),\n # path('signup/', views.signup, name='signup'),\n # path('login/', auth_views.LoginView.as_view(template_name = 'django_app/login.html'), name='login' ),\n # path('logout/', auth_views.LogoutView.as_view(), name='logout'),\n\n # path('rooms/', views.GetAllUsers.as_view(), name='rooms'),\n # path('rooms//', views.ChatRoom.as_view(), name='room'),\n \n\n \n]\n\n\n","repo_name":"lantemir/home-work","sub_path":"django_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15536798250","text":"from kivymd.app import MDApp\nfrom kivy.uix.screenmanager import Screen, ScreenManager\nfrom kivymd.uix.textfield import *\nfrom kivymd.uix.slider import MDSlider\n\n\nclass main(Screen):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n### ======= App goes here ======= ###\n\n\n self.text = MDTextField(hint_text = 'Sound', helper_text = 'Please set volume/sound level', helper_text_mode = 'on_focus',\n on_text_validate = lambda x: self.check_text())\n self.text.pos_hint = {'center_x' : 0.5, 'center_y' : 0.5}\n self.text.size_hint =(0.4, None)\n self.add_widget(self.text)\n\n self.sl = MDSlider(pos_hint = {'center_y' : 0.3})\n self.add_widget(self.sl)\n\n def check_text(self):\n self.sl.value = float(self.text.text)\n\n\n\n### ======= App goes here ======= ###\n \nclass wt_app(MDApp):\n def build(self):\n\n SC = ScreenManager()\n SC.add_widget(main(name = 'Main_Page'))\n self.theme_cls.theme_style = 'Dark'\n self.theme_cls.primary_palette = 'DeepPurple'\n\n return(SC)\n\nwt_app().run()","repo_name":"Vladzmano/Python-Kivy","sub_path":"text_field.py","file_name":"text_field.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1368374510","text":"#\n# @lc app=leetcode id=318 lang=python3\n#\n# [318] Maximum Product of Word Lengths\n#\n\n# @lc code=start\nclass Solution:\n def maxProduct(self, words: List[str]) -> int:\n return self.sol1(words)\n\n def sol1(self, words) :\n if not words : return 0\n bits = [0 for word in words]\n sorted(words, key = lambda word : len(word), reverse = True)\n for i in range(len(words)) :\n c = collections.Counter(words[i])\n for key in c.keys():\n bits[i] += 1 << (ord(key) - ord('a'))\n ans = []\n for i in range(len(words)) :\n for j in range(i + 1, len(words)) :\n if bits[i] & bits[j] == 0 : \n ans.append(len(words[i]) * len(words[j]))\n return max(ans) if ans else 0\n\n \n \n# @lc code=end\n\n","repo_name":"quixoteji/Leetcode","sub_path":"solutions/318.maximum-product-of-word-lengths.py","file_name":"318.maximum-product-of-word-lengths.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"15204026503","text":"\"\"\"MetaGenScope custom renderers that wraps responses in envelope.\"\"\"\n\nfrom flask_api.renderers import JSONRenderer\n\n\nclass EnvelopeJSONRenderer(JSONRenderer): # pylint: disable=too-few-public-methods\n \"\"\"JSON Renderer that wraps response in enveloper {status, message, and data}.\"\"\"\n\n media_type = 'application/json'\n\n def render(self, data, media_type, **options):\n \"\"\"Wrap response in envelope.\"\"\"\n response = {'status': 'error'}\n status_code = options['status_code']\n if status_code < 200 or status_code >= 300:\n detail = data['message']\n response['message'] = detail\n else:\n response['status'] = 'success'\n response['data'] = data\n return super(EnvelopeJSONRenderer, self).render(response, media_type, **options)\n","repo_name":"MetaGenScope/metagenscope-server","sub_path":"app/api/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32050942282","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 21 21:47:32 2021\n\n@author: leo\n\"\"\"\n\t\n\n\"\"\"\nImports\n\"\"\"\n\nfrom pandas import read_csv, datetime, DataFrame, concat, Series, Grouper, period_range, Timestamp\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential\nfrom keras.layers import Dense, LSTM\nfrom keras.utils.vis_utils import plot_model\nfrom math import sqrt\nimport matplotlib.pyplot as plt\nimport numpy\nimport sys\n\n\n\"\"\"\nFunctions\n\"\"\"\n\n# load dataset \ndef parser(x):\n format = '%Y-%m-%d'\n return datetime.strptime('T'.join(x.split('T', 2)[:1]), format)\n\n# frame a sequence as a supervised learning problem\ndef timeseries_to_supervised(data, lag=1):\n\tdf = DataFrame(data)\n\tcolumns = [df.shift(i) for i in range(1, lag+1)]\n\tcolumns.append(df)\n\tdf = concat(columns, axis=1)\n\treturn df\n\n# create a differenced series\ndef difference(dataset, interval=1):\n\tdiff = list()\n\tfor i in range(interval, len(dataset)):\n\t\tvalue = dataset[i] - dataset[i - interval]\n\t\tdiff.append(value)\n\treturn Series(diff)\n \n# invert differenced value\ndef inverse_difference(history, yhat, interval=1):\n\treturn yhat + history[-interval]\n\n# scale train and test data to [-1, 1]\ndef scale(train, test):\n\t# fit scaler\n\tscaler = MinMaxScaler(feature_range=(-1, 1))\n\tscaler = scaler.fit(train)\n\t# transform train\n\ttrain = train.reshape(train.shape[0], train.shape[1])\n\ttrain_scaled = scaler.transform(train)\n\t# transform test\n\ttest = test.reshape(test.shape[0], test.shape[1])\n\ttest_scaled = scaler.transform(test)\n\treturn scaler, train_scaled, test_scaled\n\n# inverse scaling for a forecasted value\ndef invert_scale(scaler, X, value):\n\tnew_row = [x for x in X] + [value]\n\tarray = numpy.array(new_row)\n\tarray = array.reshape(1, len(array))\n\tinverted = scaler.inverse_transform(array)\n\treturn inverted[0, -1]\n\n# define network and trains to fit data\ndef fit_lstm(train, batch_size, nb_epoch, neurons, timesteps):\n X, y = train[:, 0:-1], train[:, -1]\n X = X.reshape(X.shape[0], timesteps, 1)\n model = Sequential()\n model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))\n model.add(Dense(1))\n model.compile(loss='mean_squared_error', optimizer='adam')\n for i in range(nb_epoch):\n model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)\n model.reset_states()\n plot_model(model, to_file='LSTM_model.png', show_shapes=True, show_layer_names=True)\n return model\n\n# make a prediction\ndef forecast_lstm(model, batch_size, X):\n\tX = X.reshape(1, len(X), 1)\n\tyhat = model.predict(X, batch_size=batch_size)\n\treturn yhat[0,0]\n\n\n\"\"\"\nApplication on magnitude forcasting\n\"\"\"\n\nseries = read_csv('earthquake_data.csv', header=0, usecols=['time','mag'], parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)\n\n# line plot\nseries.plot()\nplt.xlabel('Time')\nplt.ylabel('Magnitude')\nplt.show()\nplt.close()\n\n# plot number of earthquakes per year\neq_year = series.groupby(series.index.year).count()\nplt.bar(eq_year.index.values, eq_year.values)\nplt.xlabel('Year')\nplt.ylabel('Number of earthquakes')\nplt.show()\n\n# remove data after 2011\nseries = series[:'2010-12-31']\n\n# plot number of earthquakes per year\neq_year = series.groupby(series.index.year).count()\nplt.bar(eq_year.index.values, eq_year.values)\nplt.xlabel('Year')\nplt.ylabel('Number of earthquakes')\nplt.show()\n\n# remove data before 1973\nseries = series['1973-01-01':]\n\n# plot number of earthquakes per year\neq_year = series.groupby(series.index.year).count()\nplt.bar(eq_year.index.values, eq_year.values)\nplt.xlabel('Year')\nplt.ylabel('Number of earthquakes')\nplt.show()\n\neq_month = series.groupby(Grouper(freq=\"M\"))\n\neq_month_mean = eq_month.mean()\neq_month_mean.fillna(0, inplace=True)\n\neq_month_count = eq_month.count()\n\ntimesteps = 1\n\n# get raw data\nraw_values = eq_month_count.values\ndiff_values = difference(raw_values, 1)\n\n# create supervised set\nsupervised = timeseries_to_supervised(diff_values, timesteps)\nsupervised_values = supervised.values[timesteps:,:]\n\n# split data into train and test-sets\ncounter = 0\nfor name, group in eq_month:\n counter +=1\n if '2006-12-31' in str(name):\n break\n\nsplit_index = len(eq_month) - counter\ntrain, test = supervised_values[0:-split_index], supervised_values[-split_index:]\n\n# transform scale \nscaler, train_scaled, test_scaled = scale(train, test)\n\n# fit the model\nlstm_model = fit_lstm(train_scaled, 1, 1000, 1, timesteps)\n\n# forecast the entire training dataset to build up state for forecasting\ntrain_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)\n\nlstm_model.predict(train_reshaped, batch_size=1)\n\n# walk-forward validation on the test data\npredictions = list()\nfor i in range(len(test_scaled)):\n\t# make one-step forecast\n X, y = test_scaled[i, 0:-1], test_scaled[i, -1]\n yhat = forecast_lstm(lstm_model, 1, X)\n\t# invert scaling\n yhat = invert_scale(scaler, X, yhat)\n\t# invert differencing\n yhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)\n\t# store forecast\n predictions.append(yhat)\n expected = raw_values[len(train) + i + 1]\n print('Month=%d, Predicted=%f, Expected=%f' % (i+1, yhat, expected))\n\n# report performance\nrmse = sqrt(mean_squared_error(raw_values[-split_index:], predictions))\nprint('Test RMSE: %.3f' % rmse)\n# line plot of observed vs predicted\nplt.plot(raw_values[-split_index:])\nplt.plot(predictions)\nplt.xlabel('Month number')\nplt.ylabel('Number of earthquakes')\nplt.legend(['Raw values', 'Predictions'], loc = 'upper right')\nplt.show()","repo_name":"LeoChazl/PFE","sub_path":"Simple LSTM/mag_lstm.py","file_name":"mag_lstm.py","file_ext":"py","file_size_in_byte":5586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16863767264","text":"#!/usr/bin/python\n\n# Coded by Siwei Liu\n# Date 2016-09-06\n# Version 1.0.0\n\nimport myapp\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n htmlstr = myapp.index(request.environ)\n return(htmlstr)\n\n@app.route('/photos')\ndef photos():\n htmlstr = \"Photo Gallery\"\n htmlstr += myapp.listdir(request.environ)\n return(htmlstr)\n\n@app.route('/visitors')\ndef visitors():\n htmlstr = \"Visited client database information\"\n htmlstr += myapp.visitor_db_query()\n return(htmlstr)\n\n@app.route('/imgdisplay')\ndef imgdisplay():\n htmlstr = \"Photo Gallery\"\n htmlstr += myapp.imgdisplay(request.args.get('dir'), request.environ)\n return(htmlstr)\n\n@app.route('/headers')\ndef headers():\n htmlstr = myapp.get_headers(request)\n return(htmlstr)\n\n@app.route('/memo')\ndef memo():\n htmlstr = \"Online memo\"\n htmlstr += myapp.memoindex(request.environ)\n return(htmlstr)\n\n@app.route('/writememo', methods=['POST'])\ndef writememo():\n title = request.form['title']\n text = request.form['text']\n return(myapp.memodb_write([title, text], request.environ))\n\n@app.route('/memodb')\ndef memodb():\n htmlstr = \"Memo Database\"\n htmlstr += myapp.memodb_read(request.environ)\n return(htmlstr)\n\n@app.route('/memoid')\ndef memoid():\n htmlstr = myapp.memodb_byID(request.args.get('id'), request.environ)\n return(htmlstr)\n\n@app.route('/memopage')\ndef memopage():\n htmlstr = \"Memo Database\"\n htmlstr += myapp.memodb_read_page(request.args.get('id'), request.environ)\n return(htmlstr)\n\nif __name__ == '__main__':\n app.run()\n\n","repo_name":"swliu-2016/wsgi","sub_path":"swliu.py","file_name":"swliu.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32666478847","text":"from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('main', '0007_round3_3'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='requestrequirement',\n name='date_from',\n field=models.DateField(blank=True, null=True, verbose_name='дата с'),\n ),\n migrations.AddField(\n model_name='requestrequirement',\n name='date_to',\n field=models.DateField(blank=True, null=True, verbose_name='дата по'),\n ),\n migrations.AlterField(\n model_name='timesheetrow',\n name='request',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='time_sheet_rows',\n to='main.request', verbose_name='проектный запрос'),\n ),\n migrations.AlterField(\n model_name='requestrequirementcv',\n name='status',\n field=models.CharField(\n choices=[('pre-candidate', 'Pre Candidate'), ('candidate', 'Candidate'), ('canceled', 'Canceled'),\n ('worker', 'Worker')], default='candidate', max_length=50, verbose_name='статус'),\n ),\n ]\n","repo_name":"skills-cloud/b2b-cloud","sub_path":"apps/main/migrations/0008_round3_4.py","file_name":"0008_round3_4.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"74940791824","text":"import pandas as pd\r\nimport babel.numbers, decimal \r\nimport matplotlib.pyplot as plt\r\n\r\ndf = pd.read_csv(\"london_house_prices.csv\")\r\n\r\ndef distance(miles):\r\n return miles * 1609.34\r\n\r\ndf[\"nearest_station_meters\"] = df[\"nearest_station_meters\"].apply(distance)\r\n\r\ndf[\"postcode_outer\"].fillna(-1, inplace=True)\r\ndf[\"size_sqft\"].fillna(-1, inplace=True)\r\n\r\n\r\n\r\n# less_500 = df[df[\"nearest_station_meters\"]<500][\"price_pounds\"].mean()\r\n# more_500 = df[df[\"nearest_station_meters\"]>500][\"price_pounds\"].mean()\r\n\r\n# less_500 = round(less_500, 2)\r\n# more_500 = round(more_500, 2)\r\n\r\n# print(\"Цена домов ближе, чем 500 метров от публичного транспорта: \", babel.numbers.format_currency(decimal.Decimal(less_500), \"GBP\" ))\r\n# print(\"Цена домов дальше, чем 500 метров от публичного транспорта:\", babel.numbers.format_currency(decimal.Decimal(more_500), \"GBP\" ))\r\n\r\n\r\nmaxless = round(df[df[\"nearest_station_meters\"]<500][\"price_pounds\"].max(), 2)\r\nmaxmore = round(df[df[\"nearest_station_meters\"]>500][\"price_pounds\"].max(), 2)\r\n\r\nminless = round(df[df[\"nearest_station_meters\"]<500][\"price_pounds\"].min(), 2)\r\nminmore = round(df[df[\"nearest_station_meters\"]>500][\"price_pounds\"].min(), 2)\r\n\r\nmeanless = round(df[df[\"nearest_station_meters\"]<500][\"price_pounds\"].mean(), 2)\r\nmeanmore = round(df[df[\"nearest_station_meters\"]>500][\"price_pounds\"].mean(), 2)\r\n\r\nmidless = round(df[df[\"nearest_station_meters\"]<500][\"price_pounds\"].median(), 2)\r\nmidmore = round(df[df[\"nearest_station_meters\"]>500][\"price_pounds\"].median(), 2)\r\n\r\nless = round(midless / 1000000, 2)\r\nmore = round(midmore / 1000000, 2)\r\n\r\n# s0 = pd.Series(data = [maxless, maxmore], index = [\"ближе\", \"дальше\"])\r\n# s0.plot(kind=\"bar\")\r\n# plt.show()\r\n\r\n\r\n# fig, ax = plt.subplots()\r\n# name = [\"ближе, чем 500 метров\" + \"\\n\" + \"от общественного транспорта\", \"дальше, чем 500 метров\" + \"\\n\" + \"от общественного транспорта\"]\r\n# counts = [less, more]\r\n# bar_colours = [\"tab:red\", \"tab:blue\"]\r\n\r\n# ax.bar(name, counts, color=bar_colours)\r\n\r\n# ax.set_ylabel(\"Фунты (в миллионах)\")\r\n# ax.set_title(\"Среднее значения цен квартир в Лондоне\")\r\n# plt.show()\r\n\r\n\r\ncounter = 0\r\ncentral = 0\r\nsuburban = 0\r\n\r\ndef central_apply(row):\r\n global central, counter\r\n if row[\"nearest_station_name\"].strip() == \"Oxford Circus Station\":\r\n counter += 1\r\n central += row[\"price_pounds\"]\r\n\r\ndef central_apply(row):\r\n global suburban, counter\r\n if row[\"nearest_station_name\"].strip() == \"Epping Station\":\r\n counter += 1\r\n suburban += row[\"price_pounds\"]\r\n\r\ndf.apply(central_apply, axis=1)\r\ncentral /= counter\r\nsuburban /= counter\r\ncentral /= 1000000\r\nsuburban /= 1000000\r\n\r\nfig, ax = plt.subplots()\r\nname = [\"Рядом со станцией в центральном Лондоне\", \"Рядом со станцией на оркаине Лондона\"]\r\ncounts = [central, suburban]\r\nbar_colours = [\"tab:red\", \"tab:blue\"]\r\n\r\nax.bar(name, counts, color=bar_colours)\r\n\r\nax.set_ylabel(\"Фунты (в миллионах)\")\r\nax.set_title(\"Среднее цена квартир в Лондоне\")\r\nplt.show()\r\n\r\n# s0 = pd.Series(data = [minless, minmore], index = [\"ближе\", \"дальше\"])\r\n# s0.plot(kind=\"bar\")\r\n# plt.show()\r\n# s0 = pd.Series(data = [meanless, meanmore], index = [\"ближе\", \"дальше\"])\r\n# s0.plot(kind=\"bar\")\r\n# plt.show()\r\n# s0 = pd.Series(data = [midless, midmore], index = [\"ближе\", \"дальше\"])\r\n# s0.plot(kind=\"bar\")\r\n# plt.show()\r\n\r\n","repo_name":"omipotence/presentation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"18444634456","text":"from flask import Flask, render_template, url_for, request, redirect\nfrom in_memory_data import user_stories, get_new_id, get_story, add_story, update_story\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef index():\n table_headers = [\"Id\", \"Story Title\", \"User Story\", \"Acceptance Criteria\",\n \"Business Value\", \"Estimation\", \"Status\"]\n user_story_keys = [\"id\", \"title\", \"story\", \"criteria\", \"business\", \"estimation\", \"status\"]\n\n return render_template(\"index.html\", table_headers=table_headers,\n user_stories=user_stories, user_story_keys=user_story_keys)\n@app.route(\"/add\")\ndef add_story_get():\n new_user_story =\\\n {\n \"id\": None,\n \"title\": \"\",\n \"story\": \"\",\n \"criteria\": \"\",\n \"business\": None,\n \"estimation\": None\n }\n return render_template(\"updatestory.html\", user_story=new_user_story, statuses=None )\n\n@app.route('/update/')\ndef update_story_get(user_story_id):\n user_story = get_story(user_story_id)\n\n if user_story is None:\n return redirect(url_for(\"index\"))\n else:\n statuses = [\"Planning\", \"TODO\", \"WIP\", \"QA\", \"DONE\"]\n return render_template(\"updatestory.html\", user_story=user_story, statuses=statuses)\n\n\n@app.route('/update/post', methods=['POST'])\ndef add_story_post():\n new_user_story = dict(request.form)\n new_id = get_new_id()\n new_user_story['id'] = new_id\n new_user_story['status'] = \"Planning\"\n\n add_story(new_user_story)\n\n return redirect(url_for(\"index\"))\n\n@app.route('/update/post', methods=['POST'])\ndef update_story_post():\n updated_user_story = dict(request.form)\n updated_user_story[\"id\"] = int(updated_user_story[\"id\"])\n\n update_story(updated_user_story)\n\n return redirect(url_for(\"index\"))\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"TomaszStraszewski/SuperSprinter3000","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28992294001","text":"import mitsuba\nimport pytest\nimport enoki as ek\n\n\ndef create_camera(o, d, fov=34, fov_axis='x', s_open=1.5, s_close=5):\n from mitsuba.core.xml import load_string\n return load_string(\"\"\"\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \"\"\".format(ox=o[0], oy=o[1], oz=o[2],\n tx=o[0]+d[0], ty=o[1]+d[1], tz=o[2]+d[2],\n fov=fov, fov_axis=fov_axis, so=s_open, sc=s_close))\n\n\norigins = [[1.0, 0.0, 1.5], [1.0, 4.0, 1.5]]\ndirections = [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]\n\n\n@pytest.mark.parametrize(\"origin\", origins)\n@pytest.mark.parametrize(\"direction\", directions)\n@pytest.mark.parametrize(\"s_open\", [0.0, 1.5])\n@pytest.mark.parametrize(\"s_time\", [0.0, 3.0])\ndef test01_create(variant_scalar_rgb, origin, direction, s_open, s_time):\n from mitsuba.core import BoundingBox3f, Vector3f, Transform4f\n\n camera = create_camera(origin, direction, s_open=s_open, s_close=s_open + s_time)\n\n assert ek.allclose(camera.near_clip(), 1)\n assert ek.allclose(camera.far_clip(), 35)\n assert ek.allclose(camera.focus_distance(), 15)\n assert ek.allclose(camera.shutter_open(), s_open)\n assert ek.allclose(camera.shutter_open_time(), s_time)\n assert not camera.needs_aperture_sample()\n assert camera.bbox() == BoundingBox3f(origin, origin)\n assert ek.allclose(camera.world_transform().eval(0).matrix,\n Transform4f.look_at(origin, Vector3f(origin) + direction, [0, 1, 0]).matrix)\n\n\n@pytest.mark.parametrize(\"origin\", origins)\n@pytest.mark.parametrize(\"direction\", directions)\ndef test02_sample_ray(variant_packet_spectral, origin, direction):\n # Check the correctness of the sample_ray() method\n from mitsuba.core import sample_shifted, sample_rgb_spectrum\n\n camera = create_camera(origin, direction)\n\n time = 0.5\n wav_sample = [0.5, 0.33, 0.1]\n pos_sample = [[0.2, 0.1, 0.2], [0.6, 0.9, 0.2]]\n aperture_sample = 0 # Not being used\n\n ray, spec_weight = camera.sample_ray(time, wav_sample, pos_sample, aperture_sample)\n\n # Importance sample wavelength and weight\n wav, spec = sample_rgb_spectrum(sample_shifted(wav_sample))\n\n assert ek.allclose(ray.wavelengths, wav)\n assert ek.allclose(spec_weight, spec)\n assert ek.allclose(ray.time, time)\n assert ek.allclose(ray.o, origin)\n\n # Check that a [0.5, 0.5] position_sample generates a ray\n # that points in the camera direction\n ray, _ = camera.sample_ray(0, 0, [0.5, 0.5], 0)\n assert ek.allclose(ray.d, direction, atol=1e-7)\n\n\n\n@pytest.mark.parametrize(\"origin\", origins)\n@pytest.mark.parametrize(\"direction\", directions)\ndef test03_sample_ray_differential(variant_packet_spectral, origin, direction):\n # Check the correctness of the sample_ray_differential() method\n from mitsuba.core import sample_shifted, sample_rgb_spectrum\n\n camera = create_camera(origin, direction)\n\n time = 0.5\n wav_sample = [0.5, 0.33, 0.1]\n pos_sample = [[0.2, 0.1, 0.2], [0.6, 0.9, 0.2]]\n\n ray, spec_weight = camera.sample_ray_differential(time, wav_sample, pos_sample, 0)\n\n # Importance sample wavelength and weight\n wav, spec = sample_rgb_spectrum(sample_shifted(wav_sample))\n\n assert ek.allclose(ray.wavelengths, wav)\n assert ek.allclose(spec_weight, spec)\n assert ek.allclose(ray.time, time)\n assert ek.allclose(ray.o, origin)\n\n # Check that the derivatives are orthogonal\n assert ek.allclose(ek.dot(ray.d_x - ray.d, ray.d_y - ray.d), 0, atol=1e-7)\n\n # Check that a [0.5, 0.5] position_sample generates a ray\n # that points in the camera direction\n ray_center, _ = camera.sample_ray_differential(0, 0, [0.5, 0.5], 0)\n assert ek.allclose(ray_center.d, direction, atol=1e-7)\n\n # Check correctness of the ray derivatives\n\n # Deltas in screen space\n dx = 1.0 / camera.film().crop_size().x\n dy = 1.0 / camera.film().crop_size().y\n\n # Sample the rays by offsetting the position_sample with the deltas\n ray_dx, _ = camera.sample_ray_differential(0, 0, [0.5 + dx, 0.5], 0)\n ray_dy, _ = camera.sample_ray_differential(0, 0, [0.5, 0.5 + dy], 0)\n\n assert ek.allclose(ray_dx.d, ray_center.d_x)\n assert ek.allclose(ray_dy.d, ray_center.d_y)\n\n\n@pytest.mark.parametrize(\"origin\", [[1.0, 0.0, 1.5]])\n@pytest.mark.parametrize(\"direction\", [[0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])\n@pytest.mark.parametrize(\"fov\", [34, 80])\ndef test04_fov_axis(variant_packet_spectral, origin, direction, fov):\n # Check that sampling position_sample at the extrimities of the unit square\n # along the fov_axis should generate a ray direction that make angle of fov/2\n # with the camera direction.\n\n from mitsuba.core import sample_shifted, sample_rgb_spectrum\n\n def check_fov(camera, sample):\n ray, _ = camera.sample_ray(0, 0, sample, 0)\n assert ek.allclose(ek.acos(ek.dot(ray.d, direction)) * 180 / ek.pi, fov / 2)\n\n # In the configuration, aspect==1.5, so 'larger' should give the 'x'-axis\n for fov_axis in ['x', 'larger']:\n camera = create_camera(origin, direction, fov=fov, fov_axis=fov_axis)\n for sample in [[0.0, 0.5], [1.0, 0.5]]:\n check_fov(camera, sample)\n\n # In the configuration, aspect==1.5, so 'smaller' should give the 'y'-axis\n for fov_axis in ['y', 'smaller']:\n camera = create_camera(origin, direction, fov=fov, fov_axis=fov_axis)\n for sample in [[0.5, 0.0], [0.5, 1.0]]:\n check_fov(camera, sample)\n\n # Check the 4 corners for the `diagonal` case\n camera = create_camera(origin, direction, fov=fov, fov_axis='diagonal')\n for sample in [[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]]:\n check_fov(camera, sample)\n\n\n","repo_name":"tizian/specular-manifold-sampling","sub_path":"src/sensors/tests/test_perspective.py","file_name":"test_perspective.py","file_ext":"py","file_size_in_byte":6757,"program_lang":"python","lang":"en","doc_type":"code","stars":204,"dataset":"github-code","pt":"48"} +{"seq_id":"7008852651","text":"#!/usr/bin/python3\n\nimport sys\nimport json\nimport networkx as nx\nimport numpy as np\nimport networkit as nk\nfrom collections import defaultdict\nimport heapq\n\ndef make_graph_from_json(json_data):\n G = nk.Graph()\n\n for node in json_data:\n for neighbor in json_data[node]:\n G.addEdge(int(node), int(neighbor), addMissing=True)\n\n return G\n\ndef get_seed_nodes(graph_data, n_players, n_seeds, n_rounds):\n G = make_graph_from_json(graph_data)\n N = G.numberOfNodes()\n\n print(\"[INFO]: |V|={}, |E|={}, seeds={}, iters={}\".format(\n N, G.numberOfEdges(), n_seeds, n_rounds))\n\n K = 0.1 * N # Top 10% of nodes\n node_scores = defaultdict(float)\n\n dc = nk.centrality.DegreeCentrality(G, normalized=True)\n dc.run()\n \n for node, score in dc.ranking():\n node_scores[node] += 0.5 * score\n\n tc = nk.centrality.TopHarmonicCloseness(G, k=K)\n tc.run()\n\n nodes = tc.topkNodesList(includeTrail=True)\n scores = np.array(tc.topkScoresList(includeTrail=True))\n\n for node, score in zip(nodes, scores):\n node_scores[node] += 0.5 * score\n\n top = heapq.nlargest(n_seeds, node_scores, key=lambda x: node_scores[x])\n seeds = []\n for _ in range(n_rounds):\n seeds.append(top)\n\n return [[str(node) for node in round] for round in seeds]\n","repo_name":"jyyeo/Pandamaniac","sub_path":"combined_centrality.py","file_name":"combined_centrality.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"12435069476","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 12 15:46:16 2020\n\n@author: sergio\n\"\"\"\nimport subprocess\nimport numpy as np\nimport random\nimport time\nimport math\nimport funcionesBasicas\nimport sys\nfrom sys import argv\nfrom decimal import Decimal, ROUND_CEILING, ROUND_FLOOR, ROUND_HALF_UP\nfrom numpy.polynomial import Polynomial as P\n\n#######################################################################################\n# INICIO del código común para todas las fichas\n#######################################################################################\nstart = time.time()\nsys.path.insert(0, './')\ndirectorioFichas = \"./fichas/\"\n# Creamos el código albético para encriptar.\ncodigoAlfabetico = funcionesBasicas.creaCodigoAlfabetico()\n# Leemos el archivo de texto txt que contiene los elementos y el tema.\ntema,elementos = funcionesBasicas.leeElementos(argv)\n# Leemos los datos sobre la ficha para crear el archivo LaTeX y cada ficha.\ndatos = funcionesBasicas.leeDatosCabecera(argv)\n# Creamos el archivo LaTeX.\nrutaArchivoLaTeX,fLaTeX = funcionesBasicas.creaArchivoLaTeX(datos,argv[2],directorioFichas)\n# Escribimos el preámbulo del archivo LaTeX.\nfuncionesBasicas.escribePreambuloLaTeX(datos,fLaTeX)\n#######################################################################################\n# FIN del código común para todas las fichas\n#######################################################################################\n\n#######################################################################################\n# Parámetros\n#######################################################################################\nmaximoPositivo = int(input(\"Introduce el máximo positivo: \"))\nminimoNegativo = -maximoPositivo\n\n#######################################################################################\n# INICIO del código específico para esta ficha\n#######################################################################################\ndef generaOperacionesTipo1(solucion, maximoPositivo, minimoNegativo):\n # p + (p+1) + (p+2)\n listaExpresiones = []\n textoOperacion = \"$p + (p+1) + (p+2)$\"\n listaExpresiones.append(textoOperacion)\n listaExpresiones.append(\"$p=\"+str(round((solucion-3)/3,2))+\"$\")\n return listaExpresiones\n\ndef generaOperacionesTipo2(solucion, maximoPositivo, minimoNegativo):\n # Ap +B\n seguir = 1\n while seguir == 1:\n A = random.randrange(minimoNegativo, maximoPositivo)\n B = random.randrange(minimoNegativo, maximoPositivo)\n if A != 0:\n seguir = 0\n listaExpresiones = []\n textoOperacion = \"$\" + str(A) + \"b\"\n if B < 0:\n textoOperacion += str(B) + \"$\"\n else:\n textoOperacion += \" + \" + str(B)+\"$\"\n listaExpresiones.append(textoOperacion)\n listaExpresiones.append(\"$b=\"+str(round((solucion-B)/A,2))+\"$\")\n return listaExpresiones\n\ndef generaOperacionesTipo3(solucion, maximoPositivo, minimoNegativo):\n # (Ap +B)(Cp+D)\n seguir = 1\n while seguir == 1:\n A = random.randrange(minimoNegativo, maximoPositivo)\n B = random.randrange(minimoNegativo, maximoPositivo)\n C = random.randrange(minimoNegativo, maximoPositivo)\n D = random.randrange(minimoNegativo, maximoPositivo)\n coefA = A*C\n coefB = A*D+B*C\n coefC = B*D-solucion\n discriminante = coefB*coefB-4*coefA*coefC\n if coefA != 0 and discriminante > 0:\n x1 = (-coefB+math.sqrt(discriminante))/(2*coefA)\n x2 = (-coefB-math.sqrt(discriminante))/(2*coefA)\n if x1 == solucion or x2 == solucion:\n seguir = 0\n listaExpresiones = []\n textoOperacion = \"$(\" + str(A) + \"x\"\n if B < 0:\n textoOperacion += str(B) + \")\"\n else:\n textoOperacion += \" + \" + str(B) + \")\"\n textoOperacion += \"(\" + str(C) + \"x\"\n if D < 0:\n textoOperacion += str(D) + \")$\"\n else:\n textoOperacion += \" + \" + str(D) + \")$\"\n listaExpresiones.append(textoOperacion)\n listaExpresiones.append(\"$x=\"+str(round(x1,2))+\"$\")\n return listaExpresiones\n\ndef generaOperacionesTipo4(solucion, maximoPositivo, minimoNegativo):\n # Ap+Bq\n seguir = 1\n while seguir == 1:\n A = random.randrange(minimoNegativo, maximoPositivo)\n B = random.randrange(minimoNegativo, maximoPositivo)\n p = round(random.uniform(minimoNegativo, maximoPositivo),2)\n if B != 0:\n seguir = 0\n q = round((solucion-A*p)/B,2)\n listaExpresiones = []\n textoOperacion = \"$\"+str(A)+\"p\"\n if B < 0:\n textoOperacion += str(B)+\"q$\"\n else:\n textoOperacion += \" + \"+str(B)+\"q$\"\n listaExpresiones.append(textoOperacion)\n listaExpresiones.append(\"$p=\" + str(p) + \", q=\" + str(q) + \"$\")\n return listaExpresiones\n\ndef generaOperacionesTipo5(solucion, maximoPositivo, minimoNegativo):\n # At+Bu^2+C\n seguir = 1\n while seguir == 1:\n A = random.randrange(minimoNegativo, maximoPositivo)\n B = random.randrange(minimoNegativo, maximoPositivo)\n C = random.randrange(minimoNegativo, maximoPositivo)\n u = round(random.uniform(minimoNegativo, maximoPositivo),2)\n if A != 0:\n seguir = 0\n t = round((solucion-C-B*u*u)/A,2)\n listaExpresiones = []\n textoOperacion = \"$\"+str(A)+\"t\"\n if B < 0:\n textoOperacion += str(B)+\"u^2\"\n else:\n textoOperacion += \" + \"+str(B)+\"u^2\"\n if C < 0:\n textoOperacion += str(C) + r\"$\"\n else:\n textoOperacion += \" + \" + str(C) + r\"$\"\n listaExpresiones.append(textoOperacion)\n listaExpresiones.append(\"$t=\" + str(t) + \", u=\" + str(u) + \"$\")\n return listaExpresiones\n\ndef generaOperacionesTipo6(solucion, maximoPositivo, minimoNegativo):\n # A(t+B)\n seguir = 1\n while seguir == 1:\n A = random.randrange(minimoNegativo, maximoPositivo)\n B = random.randrange(minimoNegativo, maximoPositivo)\n if A != 0:\n seguir = 0\n t = round((solucion-A*B)/A,2)\n listaExpresiones = []\n textoOperacion = \"$\"+str(A)+\"(m\"\n if B < 0:\n textoOperacion += str(B)+\")$\"\n else:\n textoOperacion += \" + \"+str(B)+\")$\"\n listaExpresiones.append(textoOperacion)\n listaExpresiones.append(\"$m=\" + str(t) + \"$\")\n return listaExpresiones\n\ndef generaOperacionesTipo7(solucion, maximoPositivo, minimoNegativo):\n # p(p+A)/B\n seguir = 1\n while seguir == 1:\n A = random.randrange(minimoNegativo, maximoPositivo)\n B = random.randrange(minimoNegativo, maximoPositivo)\n coefA = 1\n coefB = A\n coefC = -solucion*B\n discriminante = coefB*coefB-4*coefA*coefC\n if B != 0 and coefA != 0 and discriminante > 0:\n x1 = (-coefB+math.sqrt(discriminante))/(2*coefA)\n x2 = (-coefB-math.sqrt(discriminante))/(2*coefA)\n if x1 == solucion or x2 == solucion:\n seguir = 0\n listaExpresiones = []\n textoOperacion = r\"$\\dfrac{p(p \"\n if A < 0:\n textoOperacion += str(A)+\")}{\" + str(B) + \"}$\"\n else:\n textoOperacion += \" + \" + str(A)+\")}{\" + str(B) + \"}$\"\n listaExpresiones.append(textoOperacion)\n listaExpresiones.append(\"$p=\" + str(x2) + \"$\")\n return listaExpresiones\n\ndef generaOperacionesTipo8(solucion, maximoPositivo, minimoNegativo):\n # Ap(p+B)\n seguir = 1\n while seguir == 1:\n A = random.randrange(minimoNegativo, maximoPositivo)\n B = random.randrange(minimoNegativo, maximoPositivo)\n coefA = A\n coefB = A*B\n coefC = -solucion\n discriminante = coefB*coefB-4*coefA*coefC\n if coefA != 0 and discriminante > 0:\n x1 = (-coefB+math.sqrt(discriminante))/(2*coefA)\n x2 = (-coefB-math.sqrt(discriminante))/(2*coefA)\n if x1 == solucion or x2 == solucion:\n seguir = 0\n listaExpresiones = []\n textoOperacion = r\"$\" + str(A) + \"p(p\"\n if B < 0:\n textoOperacion += str(B)+\")$\"\n else:\n textoOperacion += \" + \" + str(B) + \")$\"\n listaExpresiones.append(textoOperacion)\n listaExpresiones.append(\"$p=\" + str(x1) + \"$\")\n return listaExpresiones\n\ndef generaOperacionesTipo9(solucion, maximoPositivo, minimoNegativo):\n # A+xyz+By^2\n seguir = 1\n while seguir == 1:\n A = random.randrange(minimoNegativo, maximoPositivo)\n B = random.randrange(minimoNegativo, maximoPositivo)\n x = round(random.uniform(minimoNegativo, maximoPositivo),2)\n y = round(random.uniform(minimoNegativo, maximoPositivo),2)\n if x != 0 and y != 0:\n z = round((solucion-B*y*y-A)/(x*y),2)\n if abs((A+x*y*z+B*y*y)-solucion) < 0.5:\n seguir = 0\n listaExpresiones = []\n textoOperacion = r\"$\" + str(A) + \" + xyz\"\n if B < 0:\n textoOperacion += str(B)+\"y^2$\"\n else:\n textoOperacion += \" + \" + str(B) + \"y^2$\"\n listaExpresiones.append(textoOperacion)\n listaExpresiones.append(\"$x=\" + str(x) + \", y=\" + str(y) + \", z=\" + str(z) + \"$\")\n return listaExpresiones\n\ndef generaOperacionesTipo10(solucion, maximoPositivo, minimoNegativo):\n # (A+p)/(p-2)\n seguir = 1\n while seguir == 1:\n A = random.randrange(minimoNegativo, maximoPositivo)\n if (1-solucion) == 0:\n p = (-A-2*solucion)/(1-solucion+random.uniform(0.1,0.2))\n else:\n p = (-A-2*solucion)/(1-solucion)\n pRedondeado = round(p,2)\n if pRedondeado-2 > 0 and abs(round((A+pRedondeado)/(pRedondeado-2),2)-solucion) < 0.5:\n seguir = 0\n listaExpresiones = []\n textoOperacion = r\"$\\dfrac{\" + str(A) + \" + p}{p-2}$\"\n listaExpresiones.append(textoOperacion)\n listaExpresiones.append(\"$p=\" + str(pRedondeado) + \"$\")\n return listaExpresiones\n\ndef generaOperacionesTipo11(solucion, maximoPositivo, minimoNegativo):\n # (Ap^2-Bp)/(Cp^2-Dp)\n seguir = 1\n while seguir == 1:\n A = random.randrange(minimoNegativo, maximoPositivo)\n B = random.randrange(minimoNegativo, maximoPositivo)\n C = random.randrange(minimoNegativo, maximoPositivo)\n D = random.randrange(minimoNegativo, maximoPositivo)\n if (A-solucion*C) == 0:\n p = (B-solucion*D)/(A-solucion*C+random.uniform(0.05,0.1))\n else:\n p = (B-solucion*D)/(A-solucion*C)\n pRedondeado = round(p,2)\n if C*pRedondeado*pRedondeado-D*pRedondeado > 0 and abs(round((A*pRedondeado*pRedondeado-B*pRedondeado)/(C*pRedondeado*pRedondeado-D*pRedondeado),2)-solucion) < 0.5:\n seguir = 0\n listaExpresiones = []\n if B>0:\n textoOperacion = r\"$\\dfrac{\" + str(A) + r\" p^2\" + \"-\" + str(abs(B)) + r\"p}{\" + str(C) + r\"p^2\"\n else:\n textoOperacion = r\"$\\dfrac{\" + str(A) + r\" p^2\" + \"+\" + str(abs(B)) + r\"p}{\" + str(C) + r\"p^2\"\n if D<0:\n textoOperacion += r\"+\" + str(abs(D)) + r\"p}$\"\n else:\n textoOperacion += r\"-\" + str(abs(D)) + r\"p}$\"\n listaExpresiones.append(textoOperacion)\n listaExpresiones.append(\"$p=\" + str(pRedondeado) + \"$\")\n return listaExpresiones\n\nnumeroTiposOperaciones = 11\n#######################################################################################\n# INICIO del código LaTeX específico para esta ficha\n#######################################################################################\nfor koko in range(len(elementos)):\n funcionesBasicas.escribeInicioFichaLaTeX(datos,tema,fLaTeX)\n fLaTeX.write(r\"Para desencriptarlo, tendrás que calcular el valor numérico de las siguientes expresiones algebraicas para los valores dados. Cuando lo hayas calculado, \\textbf{redondéalo a las unidades}, búscalo en la tabla y anota la letra correspondiente.\"+\"\\n\")\n fLaTeX.write(r\"\"+\"\\n\")\n fLaTeX.write(r\"\\vspace{0.5\\baselineskip}\"+\"\\n\")\n fLaTeX.write(r\"\\renewcommand{\\arraystretch}{1.5}\"+\"\\n\")\n fLaTeX.write(r\"\\begin{footnotesize}\"+\"\\n\")\n fLaTeX.write(r\"\\noindent\\begin{tabularx}{\\textwidth}{|X|c|c|c|}\"+\"\\n\")\n fLaTeX.write(r\"\t\\hline\"+\"\\n\")\n fLaTeX.write(r\"\t\\textbf{Expresión algebraica} & \\textbf{Valor de las variables} & \\textbf{Resultado} & \\textbf{Letra} \\\\\"+\"\\n\")\n fLaTeX.write(r\"\t\\hline\"+\"\\n\")\n # Añadimos al archivo fuente LaTeX las operaciones con enteros para el elemento que toque.\n print(str(koko+1), \"de\", str(len(elementos)),\":\", elementos[koko])\n operacionesDistintas = funcionesBasicas.generaOperacionesDistintas(numeroTiposOperaciones,len(elementos[koko]))\n for papa in range(len(elementos[koko])):\n # Obtenemos el número correspondiente a cada letra del primer elemento,\n # y generamos una operación que da ese número como resultado.\n if codigoAlfabetico.get(elementos[koko][papa]) is not None:\n #----------------------------------------------\n exec(\"cadenas = generaOperacionesTipo\" + str(operacionesDistintas[papa]+1) + \"(codigoAlfabetico.get(elementos[koko][papa]), maximoPositivo, minimoNegativo)\")\n pot = 2*random.randrange(0,int(len(cadenas)/2))\n fLaTeX.write(cadenas[pot]+r\" &\" + cadenas[pot+1] +r\"& & \\\\\\hline\"+\"\\n\") \n #---------------------------------------------- \n fLaTeX.write(r\"\\end{tabularx}\"+\"\\n\")\n fLaTeX.write(r\"\\end{footnotesize}\"+\"\\n\")\n funcionesBasicas.escribeFinalFichaLaTeX(fLaTeX)\nfLaTeX.write(r\"\\end{document}\"+\"\\n\")\nfLaTeX.close()\nsubprocess.run([\"pdflatex\",\"--interaction=batchmode\",\"-output-directory=fichas\", rutaArchivoLaTeX])\nend = time.time()\nprint(len(elementos), \"elementos procesados en\", int(end - start), \" segundos.\")\n","repo_name":"toreroeconomico/criptofichas","sub_path":"criptografia_Q_PolinomiosValorNumerico.py","file_name":"criptografia_Q_PolinomiosValorNumerico.py","file_ext":"py","file_size_in_byte":13465,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"25947200476","text":"from collections import Counter\n\ndef frequency_sort(nums):\n counts = Counter(nums)\n\n new_list = sorted(nums, key = lambda num: (counts[num], -num))\n print(counts)\n return new_list\n\nprint(frequency_sort([2,3,1,3,2]))","repo_name":"J22Pregbaha/python-tutorials","sub_path":"challenges/sort-by-increasing-frequency.py","file_name":"sort-by-increasing-frequency.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"11450733920","text":"import pandas as pd\r\nimport numpy as np \r\nimport os\r\nfrom pprint import pprint\r\nimport matplotlib.pyplot as plt\r\nimport copy\r\n\r\n\r\npd.set_option('display.max_rows', 500)\r\npd.set_option('display.max_columns', 500)\r\npd.set_option('display.width', 1000)\r\n\r\nos.chdir(r'C:\\Users\\mkommaraju\\OneDrive - PayPal\\MyDocuments_from_Laptop\\Work\\ML\\NLP_Showcase')\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\",category=DeprecationWarning)\r\n\r\n\r\ndf_train_validate = pd.read_csv('train.csv')\r\ndf_test = pd.read_csv('test.csv')\r\n\r\ndf_train_validate = df_train_validate.loc[:1000]\r\ndf_test = df_test.loc[:1000]\r\n\r\ndf_train_validate.columns\r\ndf_train_validate['ABSTRACT'].head()\r\n\r\nfrom sklearn.model_selection import train_test_split\r\ndf_train, df_validate = train_test_split(df_train_validate, test_size = 0.2)\r\n\r\ndf_train.reset_index(inplace=True)\r\ndf_validate.reset_index(inplace=True)\r\ndf_test.reset_index(inplace=True)\r\n\r\nX_train = df_train['ABSTRACT']\r\nY_train = df_train[['Computer Science', 'Physics', 'Mathematics', 'Statistics', 'Quantitative Biology', 'Quantitative Finance']]\r\n\r\nX_validate = df_validate['ABSTRACT']\r\nY_validate = df_validate[['Computer Science', 'Physics', 'Mathematics', 'Statistics', 'Quantitative Biology', 'Quantitative Finance']]\r\n\r\nX_test = df_test['ABSTRACT']\r\n\r\n\r\n############################################################\r\n# Pre processing\r\n############################################################ \r\n\r\nimport custom_preprocessing as cp\r\ndef my_custom_preprocessing(texts):\r\n texts= cp.my_simple_preprocessing(texts)\r\n texts = [w for w in cp.sent_to_words(texts)] \r\n texts = cp.lemmatization(texts)\r\n texts = cp.remove_stopwords(texts)\r\n return texts\r\n\r\ndocs_train = my_custom_preprocessing(X_train)\r\ndocs_validate = my_custom_preprocessing(X_validate)\r\ndocs_test = my_custom_preprocessing(X_test)\r\n\r\n\r\n\r\nimport gensim.corpora as corpora \r\nimport gensim\r\nid2word = corpora.Dictionary(docs_train)\r\nid2word.filter_extremes(no_below=10, keep_n=10000)\r\nprint('id2word length:' + str(len(id2word)))\r\n\r\n# build the TFIDF model\r\ntfidf_model = gensim.models.TfidfModel([id2word.doc2bow(text) for text in docs_train], id2word=id2word)\r\n\r\n# build the corpus for train, validate and test data\r\ncorpus_train = [id2word.doc2bow(text) for text in docs_train]\r\ncorpus_validate = [id2word.doc2bow(text) for text in docs_validate]\r\ncorpus_test = [id2word.doc2bow(text) for text in docs_test]\r\n\r\n# Build TF-IDF corpus for the train, validate and test data\r\ncorpus_tfidf_train = tfidf_model[corpus_train]\r\ncorpus_tfidf_validate = tfidf_model[corpus_validate]\r\ncorpus_tfidf_test = tfidf_model[corpus_test]\r\n\r\n# build the X vectors for train, validate and test data\r\nX_train_corpus_df = pd.DataFrame(data = gensim.matutils.corpus2dense(corpus_train , len(id2word.keys()) ).T, columns = list(id2word.values()))\r\nX_validate_corpus_df = pd.DataFrame(data = gensim.matutils.corpus2dense(corpus_validate , len(id2word.keys()) ).T, columns = list(id2word.values()))\r\nX_test_corpus_df = pd.DataFrame(data = gensim.matutils.corpus2dense(corpus_test , len(id2word.keys()) ).T, columns = list(id2word.values()))\r\n\r\n# build the TFIDF X vectors for train, validate and test data\r\nX_train_corpus_tfidf_df = pd.DataFrame(data = gensim.matutils.corpus2dense(corpus_tfidf_train , len(id2word.keys()) ).T, columns = list(id2word.values()))\r\nX_validate_corpus_tfidf_df = pd.DataFrame(data = gensim.matutils.corpus2dense(corpus_tfidf_validate, len(id2word.keys()) ).T, columns = list(id2word.values()))\r\nX_test_corpus_tfidf_df = pd.DataFrame(data = gensim.matutils.corpus2dense(corpus_tfidf_test , len(id2word.keys()) ).T, columns = list(id2word.values()))\r\n\r\n\r\n#######################\r\n# check data\r\n####################### \r\n\r\ni=0\r\nprint(X_train[i])\r\nprint(docs_train[i])\r\n[(id2word[id], freq) for id, freq in corpus_train[i]]\r\n[(id2word[id], round(freq,2)) for id, freq in corpus_tfidf_train[i]]\r\nY_train.loc[i]\r\n \r\n# columns/terms in the dictionary, top 20 words in the corpus\r\nX_train_corpus_df.columns\r\nX_train_corpus_df.mean(axis=0).sort_values(ascending =False).head(20)\r\n\r\n\r\n\r\n\r\n#####################################\r\n# save data so far\r\n####################################\r\n\r\nimport dill\r\nfilename = 'step_1_data_prep.pkl'\r\ndill.dump_session(filename)\r\n# and to load the session again:\r\n#dill.load_session(filename)\r\n\r\n\r\n\r\n","repo_name":"mallikkommaraju/ml","sub_path":"learn_nlp_project_1/step_1_data_prep.py","file_name":"step_1_data_prep.py","file_ext":"py","file_size_in_byte":4413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33782745530","text":"# -*- encoding: utf-8 -*-\n# @Time : 2018/12/6 18:02\n# @Author : mike.liu\n# @File : VarConfig.py\n\n# 用于定义整个框架中所需要的一些全局常量值,方便维护\n\n# 获取当前文件夹所在目录的父目录的绝对路径\nimport os\n\nieDriverFilePath = \"E:\\Python36\\IEDriverServer.exe\"\nchromeDriverFilePath = \"E:\\Python36\\chromedriver.exe\"\nfirefoxDriverFilePath = \"E:\\Python36\\geckodriver.exe\"\n\n\nparentDirPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n# 存放截图文件\nscreenPicturesDir = parentDirPath + \"\\\\exceptionpictures\"\n","repo_name":"liudefang/selenium-UI-Python-","sub_path":"第十三课/KeyWorkFrameWorkDemo/config/VarConfig.py","file_name":"VarConfig.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"24746989662","text":"#!/usr/bin/env python3\nimport os\nimport shutil\nimport configparser\nimport getpass\nos.system(\"clear\") #clear console-screen\n\n#read the config and set variables\nconfig = configparser.ConfigParser()\nconfig.read('settings.conf')\n\npath = config['DEFAULT']['source_dir'] \npath = path.replace(\"username\", getpass.getuser()) #input directory\n\ncount_jpg = 0\ncount_error = 0\nlist_error = [] \n\n#WELCOME-PROMPT and changing of variables\nprint(\"Welcome to my image-filter-tool!\", config['EASTFOXES']['version'],\"\\nPlease make sure to read the readme.rtf before you continue\\n\")\nprint(\"In which folder are the files located?\\nCurrent set source path is :\", path)\npath = path + input(\".../\")\npath_ext = path + config['DEFAULT']['working_dir'] #output directory\n\ntry: \n list = os.listdir(path) #Read all files from input directory\nexcept:\n print(\"\\n...source directory could not be found. Exiting the script.\\n\")\n quit()\n\n#Creating the dest. directory, if not present\nprint(\"\\n... creating destination directory\")\ntry:\n os.makedirs(path_ext)\nexcept:\n print(\"... destination directory already exists or not allowed to create directory\") \n\n#Go trough every file from the directory and check if there's a \"jpg\" in the filename\nprint(\"... starting the filtering and copying of the files\")\nfor object in list: \n if \".JPG\" in object:\n count_jpg += 1\n #create destination-filename\n file = object.replace(\"JPG\", \"NEF\")\n #set source-file and destination-file\n source = path + \"/\" + file\n target = path_ext + \"/\" + file\n\n try:\n shutil.copy(source, target)\n \n except IOError as e:\n list_error.append(\"Unable to copy file. %s\" % e)\n count_error += 1\n except:\n list_error.append(\"Unexpected error:\", sys.exc_info())\n count_error += 1\n\n#Giving the user a report about the script-run\nprint(\"\\nAll objects in the list were filtered. \\nThe script found\", count_jpg, \"JPG-files.\")\n#Outputs a message if any file-errors occured\nif count_error > 0:\n print(\"\\nThere were\", count_error, \"NEF-files which COULDNT BE copied. \\nPlease check if all files are avaiable and the directory (\", path_ext, \") is not write-protected.\")\n \n if 'yes' in config['DEFAULT']['file_debug']: #outputs all error-messages if debugging is set to 'yes'\n for error in list_error:\n print(error)\n \nif 'yes' in config['DEFAULT']['extended_debug']: #outputs all important variables with its values\n print(\"\\nsource_path:\", path)\n print(\"working_path:\", path_ext)\n print(\"count_jpg:\", count_jpg)\n print(\"count_error:\", count_error)\n #print(\"all found files:\", list)\n\nprint(\"\")\n\n#fin","repo_name":"EastFoxDE/image-sort","sub_path":"image-sort.py","file_name":"image-sort.py","file_ext":"py","file_size_in_byte":2717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29330225107","text":"import numpy as np\nimport json\nfrom skimage import io\nfrom PIL import Image\n\nimport dash\nfrom dash.exceptions import PreventUpdate\nfrom dash.dependencies import Input, Output, State\nimport dash_html_components as html\nimport dash_core_components as dcc\nimport plotly.graph_objs as go\n\nimport dash_canvas\nfrom dash_canvas.utils import (parse_jsonstring,\n superpixel_color_segmentation,\n image_with_contour, image_string_to_PILImage,\n array_to_data_url)\nfrom dash_canvas.components import image_upload_zone\n\napp = dash.Dash(__name__)\nserver = app.server\napp.config.suppress_callback_exceptions = True\n# Image to segment and shape parameters\n\nfilename = \"/app/apps/remove-background/assets/dress.jpg\"\ntry:\n img_app3 = io.imread(filename)\nexcept FileNotFoundError:\n filename = \"assets/dress.jpg\"\n img_app3 = io.imread(filename)\nheight, width, _ = img_app3.shape\ncanvas_width = 500\ncanvas_height = round(height * canvas_width / width)\nscale = canvas_width / width\n\n\napp.layout = html.Div([\n html.Div([\n html.Div([\n html.H2(children='Remove image background'),\n dcc.Markdown('''\n Draw on the object of interest, and press remove background.'''),\n dash_canvas.DashCanvas(\n id='canvas-bg',\n width=canvas_width,\n height=canvas_height,\n scale=scale,\n image_content=array_to_data_url(img_app3),\n lineWidth=4,\n goButtonTitle='Remove background',\n hide_buttons=['line', 'zoom', 'pan'],\n ),\n html.H6(children=['Brush width']),\n dcc.Slider(\n id='bg-width-slider',\n min=2,\n max=40,\n step=1,\n value=[5]\n ),\n image_upload_zone('upload-image-bg'),\n ], className=\"seven columns\"),\n html.Div([\n html.Img(src='https://github.githubassets.com/images/modules/logos_page/GitHub-Mark.png', width='30px'),\n html.A(\n id='gh-link',\n children=[\n 'View on GitHub'],\n href=\"http://github.com/plotly/canvas-portal/\"\n \"blob/master/apps/remove-background/app.py\",\n style={'color': 'black',\n 'border':'solid 1px black',\n 'float':'left'}\n ),\n\n html.H3(children='How to use this app and remove background',\n id='bg-title'),\n html.Img(id='segmentation-bg',\n src='assets/bg.gif',\n width='100%')\n ], className=\"five columns\")],\n className=\"row\")\n ])\n\n# ----------------------- Callbacks -----------------------------\n\n\n@app.callback(Output('bg-title', 'children'),\n [Input('canvas-bg', 'json_data')])\ndef modify_bg_title(json_data):\n if json_data:\n return \"Image without background\"\n else:\n raise PreventUpdate\n\n\n@app.callback(Output('segmentation-bg', 'src'),\n [Input('canvas-bg', 'json_data')],\n [State('canvas-bg', 'image_content')])\ndef update_figure_upload(string, image):\n if string:\n if image is None:\n im = img_app3\n else:\n im = image_string_to_PILImage(image)\n im = np.asarray(im)\n shape = im.shape[:2]\n try:\n mask = parse_jsonstring(string, shape=shape)\n except IndexError:\n raise PreventUpdate\n if mask.sum() > 0:\n seg = superpixel_color_segmentation(im, mask)\n else:\n seg = np.ones(shape)\n fill_value = 255 * np.ones(3, dtype=np.uint8)\n dat = np.copy(im)\n dat[np.logical_not(seg)] = fill_value\n return array_to_data_url(dat)\n else:\n raise PreventUpdate\n\n\n@app.callback(Output('canvas-bg', 'json_data'),\n [Input('canvas-bg', 'image_content')])\ndef clear_data(image_string):\n return ''\n\n\n@app.callback(Output('canvas-bg', 'image_content'),\n [Input('upload-image-bg', 'contents')])\ndef update_canvas_upload(image_string):\n if image_string is None:\n raise ValueError\n if image_string is not None:\n return image_string\n else:\n return None\n\n\n@app.callback(Output('canvas-bg', 'lineWidth'),\n [Input('bg-width-slider', 'value')])\ndef update_canvas_linewidth(value):\n return value\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n\n","repo_name":"plotly/canvas-portal","sub_path":"apps/remove-background/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4595,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"48"} +{"seq_id":"15893067315","text":"import socket\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nsock.bind((\"localhost\",8888))\nsock.listen(5)\n#sock.setblocking(False)\nsock.settimeout(5)\n#sock.settimeout(0) # -> sock.setblocking(False)\n#sock.settimeout(None) # -> sock.setblocking(True)\nwhile True:\n try:\n client, addr = sock.accept()\n except socket.error:\n print(\"no clients\")\n except KeyboardInterrupt:\n break\n else:\n client.setblocking(True)\n result = client.recv(1024)\n client.close()\n print(\"Rain and Pain\", result.decode(\"utf-8\"))\n","repo_name":"LilMeel/webPython","sub_path":"blocking/nonblock_srver.py","file_name":"nonblock_srver.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20758580456","text":"\"\"\"\nLemon\n=====\n\nSingle Page Application extension for Flask.\n\n\"\"\"\n\nimport sys\nfrom setuptools import setup\nfrom setuptools.command.test import test as TestCommand\n\n\nclass PyTest(TestCommand):\n def initialize_options(self):\n TestCommand.initialize_options(self)\n self.pytest_args = []\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = []\n self.test_suite = True\n\n def run_tests(self):\n import pytest\n errno = pytest.main('-x tests/ --cov lemon --cov-report term-missing --pdb')\n sys.exit(errno)\n\n\nsetup(\n name='Lemon',\n version='0.0.1',\n url='http://github.com/theorchard/lemon-py/',\n license='MIT',\n author='Michael Ortali',\n author_email='mortali@theorchard.com',\n description='Single Page Application extension for Flask',\n long_description=__doc__,\n packages=['lemon',],\n zip_safe=False,\n include_package_data=True,\n platforms='any',\n install_requires=[\n 'Flask',\n 'requests'],\n tests_require=[\n 'pytest-cov'],\n cmdclass = {\n 'test': PyTest})\n","repo_name":"theorchard/lemon-py","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"9617813717","text":"import time, datetime\n\n\ndef get_week_day(date):\n week_day_dict = {\n 0: '星期一',\n 1: '星期二',\n 2: '星期三',\n 3: '星期四',\n 4: '星期五',\n 5: '星期六',\n 6: '星期天',\n }\n day = date.weekday()\n return week_day_dict[day]\n\n\nprint(get_week_day(datetime.datetime.now()))\n\n\ntimestamp=1541191585\nt=time.localtime(timestamp)\nh=time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(timestamp))\nprint(h)\nprint(str(t.tm_year)+\"-\"+str(t.tm_mon))\nprint(type(t))\n\n","repo_name":"jianjunyue/KmmtML","sub_path":"Utils/datetime_util.py","file_name":"datetime_util.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"27962085730","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 20 16:07:21 2020\n\n@author: USER\n\"\"\"\n\n\n## QCalendarWidget\n# 달력 만들어주는 위젯\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget, QLabel, QVBoxLayout, QCalendarWidget\nfrom PyQt5.QtCore import QDate\n\n## QDate 달력의 날짜를 클릭하게 되면 그것을 string 로 갖고오기위한 모듈\n\nclass MyApp(QWidget):\n\n def __init__(self):\n super().__init__()\n self.initUI()\n\n def initUI(self):\n cal = QCalendarWidget(self)\n cal.setGridVisible(True) #글자와 글자 사이에 가로와 세로선이 나타나게 해준다.\n cal.clicked[QDate].connect(self.showDate) #캘린더 객체도 버튼처럼 push시그널이 있다. 클릭하게되면 QDate객체를 만들어준다.\n\n self.lbl = QLabel(self)\n date = cal.selectedDate() # 현제 선택된 날짜를 얻어내는 함수.\n self.lbl.setText(date.toString()) #select 함수는 QDate객체를 반환하기 때문에 tostring 으로 문자열로 만들어줌\n\n vbox = QVBoxLayout() #달력을 위치시켜주기 위한 레이아웃\n vbox.addWidget(cal) # \n vbox.addWidget(self.lbl) \n\n self.setLayout(vbox)\n\n self.setWindowTitle('QCalendarWidget')\n self.setGeometry(300, 300, 400, 300)\n self.show()\n\n def showDate(self, date): #클릭 시그널이 발행되면 호출하는 함수\n self.lbl.setText(date.toString()) #라벨에 클릭한 날짜를 보여주게 하기\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n ex = MyApp()\n sys.exit(app.exec_())","repo_name":"ojjang1/learnPython","sub_path":"anaconda/gui_15_QCalendarWidget.py","file_name":"gui_15_QCalendarWidget.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"15590208283","text":"import re\nfrom datetime import date\n\n\n__all__ = [\n 'SceneName'\n ]\n\nRELEASE_TYPE_MOVIE = 'Movie'\nRELEASE_TYPE_TV = 'TV'\nRELEASE_TYPE_EBOOK = 'EBook'\nRELEASE_TYPE_UNKNOWN = 'Unknown'\n\nRELEASE_TYPES = [\n RELEASE_TYPE_MOVIE,\n RELEASE_TYPE_TV,\n RELEASE_TYPE_EBOOK,\n RELEASE_TYPE_UNKNOWN,\n ]\n\n\n# list of valid encoding types\nENCODING_TYPES = [\n 'XviD',\n 'x264',\n ]\n\n\n# list of valid movie source types\nMOVIE_SOURCE_NAMES = [\n 'DVDRip',\n 'BDRip',\n 'BluRay',\n ]\n\n\n# list of valid tv source types\nTV_SOURCE_NAMES = [\n 'DSR',\n 'PDTV',\n 'HDTV',\n ]\n\n\n# list of valid ebook source types\nEBOOK_SOURCE_NAMES = [\n 'EBook',\n ]\n\n\n# list of other valid tags\nVALID_TAGS = [\n 'FESTIVAL',\n 'STV',\n 'LIMITED',\n 'TV',\n 'READ.NFO',\n 'WS',\n 'FS',\n 'PROPER',\n 'REPACK',\n 'RERIP',\n 'REAL',\n 'RETAIL',\n 'EXTENDED',\n 'REMASTERED',\n 'RATED',\n 'UNRATED',\n 'CHRONO',\n 'THEATRICAL',\n 'DC',\n 'SE',\n 'UNCUT',\n 'INTERNAL',\n 'DUBBED',\n 'SUBBED',\n 'FINAL',\n 'COLORIZED',\n '1080p',\n '720p',\n 'RETAiL',\n ]\n\n\n# regular expression for extractiong a cd number\nCD_NUMBER_RE = re.compile('^cd\\d+$', re.IGNORECASE)\n\n\n# regular expression for matching the production year\nYEAR_RE = re.compile(\n '^[12]\\d{3}$'\n )\n\n\n# regular expression for matching season and episode numbers\nSEASON_EPISODE_RE = re.compile(\n '^S(\\d{2,})E(\\d{2,})$',\n re.IGNORECASE\n )\n\n\n# For when an episode name screws up the logic\nSEASON_EPISODE_REMAINDER_RE = re.compile(\n '^(.*?)\\.S(\\d{2,})E(\\d{2,})\\.(.*?)$',\n re.IGNORECASE\n )\n\n\n# For when an episode date screws up the logic (With optional episode name)\nEPISODE_DATE_REMAINDER_RE = re.compile(\n '^(.*?)\\.(\\d{4})\\.(\\d{2})\\.(\\d{2})(\\.(.*?))?$'\n )\n\n\n# create a function to check for the tag in a specific list\ndef make_check_tag_function(*tag_lists):\n # find the tag in tag_list or return None\n def check_tag_in_list(tag):\n for tag_list in tag_lists:\n for tag_name in tag_list:\n if tag.lower() == tag_name.lower():\n return tag_name\n\n return None\n\n return check_tag_in_list\n\n\ncheck_tag_encoding_type = make_check_tag_function(ENCODING_TYPES)\n\ncheck_tag_valid_tag = make_check_tag_function(VALID_TAGS)\n\ncheck_tag_source_type = make_check_tag_function(\n MOVIE_SOURCE_NAMES,\n TV_SOURCE_NAMES,\n EBOOK_SOURCE_NAMES\n )\n\n\nclass SceneName(object):\n # pylint: disable-msg=R0902\n def __init__(self):\n self.name = None\n self.group = None\n self.encoding_type = None\n self._source = None\n self.release_type = RELEASE_TYPE_UNKNOWN\n self.tags = []\n self.production_year = None\n self.cd_number = None\n self.season = None\n self.episode = None\n self.episode_name = None\n self.episode_date = None\n\n def _set_source(self, source):\n self._source = source\n\n if self._source in TV_SOURCE_NAMES:\n self.release_type = RELEASE_TYPE_TV\n elif self._source in MOVIE_SOURCE_NAMES:\n self.release_type = RELEASE_TYPE_MOVIE\n elif self._source in EBOOK_SOURCE_NAMES:\n self.release_type = RELEASE_TYPE_EBOOK\n else:\n self.release_type = RELEASE_TYPE_UNKNOWN\n\n def _get_source(self):\n return self._source\n\n source = property(fget=_get_source, fset=_set_source)\n\n @classmethod\n def parse(cls, release_name):\n # extract release group\n last_dash = release_name.rfind('-')\n parsed_name = cls()\n\n # look for the last dash in the name\n if last_dash != -1:\n # get the part after that dash\n parsed_name.group = release_name[last_dash + 1:]\n\n # if that has a . in it, disregard it\n if '.' in parsed_name.group:\n parsed_name.group = None\n else:\n # else remove that from the relase name\n release_name = release_name[:last_dash]\n\n # split the name by .\n release_parts = release_name.split('.')\n\n while release_parts:\n name_part = release_parts.pop()\n\n # check for an encoding type\n encoding = check_tag_encoding_type(name_part)\n if encoding:\n parsed_name.encoding_type = encoding\n continue\n\n # check for a source type\n source = check_tag_source_type(name_part)\n if source:\n parsed_name.source = source\n continue\n\n # check for one of the other tags\n tag = check_tag_valid_tag(name_part)\n if tag:\n parsed_name.tags.append(tag)\n continue\n\n # check for a cd number\n if CD_NUMBER_RE.match(name_part):\n parsed_name.cd_number = name_part\n continue\n\n # check for an episode number\n episode_match = SEASON_EPISODE_RE.match(name_part)\n if episode_match:\n parsed_name.season = episode_match.group(1)\n parsed_name.episode = episode_match.group(2)\n continue\n\n # check for a production year\n if YEAR_RE.match(name_part):\n parsed_name.production_year = name_part\n continue\n\n # if none of the above matches,\n # this must be part of the name itself\n # so stop checking the parts\n release_parts.append(name_part)\n break\n\n # Now for special cases\n\n # re-combine the name in the correct order\n parsed_name.name = '.'.join(release_parts)\n\n # Look to see if a season/episode tag is still present in the name\n # If so use the season/episode,\n # and anything after that is an episode name\n has_episode_name_match = SEASON_EPISODE_REMAINDER_RE.match(\n parsed_name.name)\n\n if has_episode_name_match:\n parsed_name.name = has_episode_name_match.group(1)\n parsed_name.season = has_episode_name_match.group(2)\n parsed_name.episode = has_episode_name_match.group(3)\n parsed_name.episode_name = has_episode_name_match.group(4)\n\n # Look to see if a date is embedded in the name\n # since this also screws up the above logic\n has_episode_date_match = EPISODE_DATE_REMAINDER_RE.match(\n parsed_name.name)\n\n if has_episode_date_match:\n parsed_name.name = has_episode_date_match.group(1)\n\n parsed_name.episode_date = date(\n int(has_episode_date_match.group(2)),\n int(has_episode_date_match.group(3)),\n int(has_episode_date_match.group(4)))\n\n parsed_name.episode_name = has_episode_date_match.group(5)\n\n return parsed_name\n","repo_name":"bobbyrward/SceneExtractor","sub_path":"deluge/plugins/scene_extractor/scene_name.py","file_name":"scene_name.py","file_ext":"py","file_size_in_byte":6909,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"1119813197","text":"from gi.repository import Gtk\nfrom gi.repository import cairo\nimport sys\nimport math\n\n\nclass MyWindow(Gtk.ApplicationWindow):\n\n def __init__(self, app):\n Gtk.Window.__init__(self, title=\"Choose an angle\", application=app)\n self.set_default_size(400, 400)\n self.set_border_width(10)\n\n # a default angle\n self.angle = 360\n\n grid = Gtk.Grid()\n\n # a spinbutton that takes the value of an angle\n ad = Gtk.Adjustment(360, 0, 360, 1, 0, 0)\n self.spin = Gtk.SpinButton(adjustment=ad, climb_rate=1, digits=0)\n self.spin.connect(\"value-changed\", self.get_angle)\n\n # a drawing area for drawing whatever we want\n self.darea = Gtk.DrawingArea()\n # that we describe in the method draw(), connected to the signal \"draw\"\n self.darea.connect(\"draw\", self.draw)\n # we have to request a minimum size of the drawing area, or it will\n # disappear\n self.darea.set_size_request(300, 300)\n\n grid.attach(self.spin, 0, 0, 1, 1)\n grid.attach(self.darea, 0, 1, 1, 1)\n\n self.add(grid)\n\n # whenever we get a new angle in the spinbutton\n def get_angle(self, event):\n self.angle = self.spin.get_value_as_int()\n # redraw what is in the drawing area\n self.darea.queue_draw()\n\n def draw(self, darea, cr):\n # a 10-pixels-wide line\n cr.set_line_width(10)\n # red\n cr.set_source_rgba(0.5, 0.0, 0.0, 1.0)\n\n # get the width and height of the drawing area\n w = self.darea.get_allocated_width()\n h = self.darea.get_allocated_height()\n\n # move to the center of the drawing area\n # (translate from the top left corner to w/2, h/2)\n cr.translate(w / 2, h / 2)\n # draw a line to (55, 0)\n cr.line_to(55, 0)\n # and get back to (0, 0)\n cr.line_to(0, 0)\n # draw an arc centered in the origin, 50 pixels wide, from the angle 0\n # (in radians) to the angle given by the spinbutton (in degrees)\n cr.arc(0, 0, 50, 0, self.angle * (math.pi / 180))\n # draw a line back to the origin\n cr.line_to(0, 0)\n # drawing the path, and keeping the path for future use\n cr.stroke_preserve()\n\n # set a colour\n cr.set_source_rgba(0.0, 0.5, 0.5, 1.0)\n # and use it to fill the path (that we had kept)\n cr.fill()\n\n\nclass MyApplication(Gtk.Application):\n\n def __init__(self):\n Gtk.Application.__init__(self)\n\n def do_activate(self):\n win = MyWindow(self)\n win.show_all()\n\n def do_startup(self):\n Gtk.Application.do_startup(self)\n\napp = MyApplication()\nexit_status = app.run(sys.argv)\nsys.exit(exit_status)\n","repo_name":"Kr0mAgn0n/gnome-devel-docs","sub_path":"platform-demos/C/samples/widget_drawing.py","file_name":"widget_drawing.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"30500127690","text":"import calendar\nimport config\nimport csv\nimport cv2\nimport glob\nimport json\nimport os\nimport pyexcel\nimport pytesseract\nimport typer\nfrom datetime import datetime\nfrom enum import Enum\nfrom nanoid import generate as id_generate\nfrom pydantic import BaseModel\nfrom pydantic import ValidationError\nfrom typing import List\nfrom typing_extensions import Annotated\n\napp = typer.Typer()\nabsolute_path = os.path.dirname(os.path.abspath(__file__))\n\n\nclass TemplateCoordinatesModel(BaseModel):\n x: int\n y: int\n width: int\n height: int\n key: str\n is_number: bool = False\n\n\nclass TemplateModel(BaseModel):\n main: List[TemplateCoordinatesModel]\n second: List[TemplateCoordinatesModel]\n\n\nclass NumberType(Enum):\n even = 0\n odd = 1\n\n\nclass Collector():\n TESSERACT_ARGS_FOR_NUMBER = \"--oem 3 --psm 7 digits -c tessedit_char_whitelist=0123456789\"\n TESSERACT_ARGS_FOR_TEXT = \"--oem 3 --psm 7\"\n\n def __init__(self, template: TemplateModel) -> None:\n self.template = template\n\n tpl_main = self.template.main and len(self.template.main) > 0\n tpl_second = self.template.second and len(self.template.second) > 0\n\n if not tpl_main and not tpl_second:\n print(f\"Error: template is not conformed\")\n return\n\n # Attention: ID should be in second key of template.json\n idx_id_field = [i for i, e in enumerate(\n self.template.second) if e.key == \"id\"]\n is_idx = len(idx_id_field) > 0\n\n if is_idx is True:\n self.template_idx_idkey = idx_id_field[0]\n else:\n print(f\"Error: No id field found in template.second\")\n return\n\n def import_old(self):\n \"\"\" Allows to import old data for \"merging purpose\".\n Example:\n During KvK, you import your first data.\n Then you have somewhere (in google sheet) your governors data.\n You then use this script to update data.\n This will allow to keep the same order from your main data.\n Thus, with copy paste, you can update more easily.\n \"\"\"\n history = dict()\n order_ids = []\n filename = \"old/export.csv\"\n path_filename = os.path.abspath(filename)\n if not os.path.exists(path_filename):\n return None, []\n with open(path_filename, \"r\") as f:\n reader = csv.reader(f)\n for row in reader:\n player_id = row[0]\n if player_id == \"ID\" or player_id == \"\":\n continue\n else:\n try:\n player_id = int(player_id)\n except Exception as e:\n print(row)\n raise e\n order_ids += [player_id]\n history[player_id] = row[1:]\n return history, order_ids\n\n def run(self, folder_name: str):\n # works only for *.jpg or *.png screenshots\n jpg_names = f\"./{folder_name}/*.jpg\"\n png_names = f\"./{folder_name}/*.png\"\n files_paths = glob.glob(jpg_names) + glob.glob(png_names)\n if len(files_paths) == 0:\n print(f\"Error: Folder is empty\")\n return\n if len(files_paths) % 2 != 0:\n print(f\"Error: Folder contains no odd number of files\")\n return\n\n paths_sorted = sorted(files_paths) # sort by file name\n\n # we need to find if the screenshot at index even or odd contain the id info\n self.idx_screen_id_info = None\n screen_0 = paths_sorted[0]\n print(f\"...pre-processing {screen_0}\")\n image_0 = cv2.imread(screen_0)\n image_data = self.get_data(image_0, self.template.second)\n supposed_id = image_data[self.template_idx_idkey]\n if supposed_id != \"ERROR\" and isinstance(supposed_id, int) and len(str(supposed_id)) > 7:\n self.idx_screen_id_info = 0\n else:\n screen_01 = paths_sorted[1]\n print(f\"...pre-processing {screen_01}\")\n image_01 = cv2.imread(screen_01)\n image_data = self.get_data(image_01, self.template.second)\n supposed_id = image_data[self.template_idx_idkey]\n if supposed_id != \"ERROR\" and isinstance(supposed_id, int) and len(str(supposed_id)) > 7:\n self.idx_screen_id_info = 1\n if self.idx_screen_id_info is None:\n print(f\"Error: Could not find where is governor ID screenshot\")\n return\n\n id_screen = NumberType.even if self.idx_screen_id_info == 0 else NumberType.odd\n governors = dict()\n current_gov_id = None\n for i, name_path in enumerate(paths_sorted):\n file_path = os.path.join(absolute_path, name_path)\n path_exits = os.path.exists(file_path)\n result = f\"{'OK' if path_exits else 'FAILED'}\"\n print(f\"...reading {file_path}: {result}\")\n if not path_exits:\n continue\n image = cv2.imread(file_path)\n print(f\"...processing {file_path}: PENDING\")\n\n data = None\n if i % 2 == 0:\n current_gov_id = id_generate()\n if id_screen == NumberType.odd:\n data = self.get_data(image, self.template.main)\n governors[current_gov_id] = data\n elif id_screen == NumberType.even:\n data = self.get_data(image, self.template.second)\n gov_id = data[self.template_idx_idkey]\n rest_data = [e for i, e in enumerate(\n data) if i != self.template_idx_idkey]\n governors[current_gov_id] = [gov_id] + rest_data\n else:\n if id_screen == NumberType.odd:\n data = self.get_data(image, self.template.second)\n if current_gov_id is not None:\n gov_id = data[self.template_idx_idkey]\n rest_data = [e for i, e in enumerate(\n data) if i != self.template_idx_idkey]\n _old = governors[current_gov_id]\n governors[current_gov_id] = [gov_id] + _old + rest_data\n elif id_screen == NumberType.even:\n data = self.get_data(image, self.template.main)\n if current_gov_id is not None:\n _old = governors[current_gov_id]\n governors[current_gov_id] = [_old[0]] + data + _old[1:]\n\n governors_data = governors.values()\n self.save_data(governors_data)\n return governors_data\n\n def get_data(self, image, locations: List[TemplateCoordinatesModel]) -> List[str]:\n \"\"\" For an image, locations refer to all possible data we want to extract.\n Return a list of values extracted from provided locations.\n \"\"\"\n values = []\n for location in locations:\n # use coordinates above to select the desired area\n cropped_image = image[\n location.y:location.y + location.height,\n location.x:location.x + location.width\n ]\n\n # image in gray scale mode, easier to extract data\n gray_image = cv2.cvtColor(cropped_image, cv2.COLOR_BGR2GRAY)\n\n # if the data we want to extract is a number or a string\n data_type_number = location.is_number is True\n if data_type_number:\n tess_cfg = Collector.TESSERACT_ARGS_FOR_NUMBER\n else:\n tess_cfg = Collector.TESSERACT_ARGS_FOR_TEXT\n\n result = pytesseract.image_to_string(gray_image, config=tess_cfg)\n\n # if the result is an empty value, we skip it\n text_result = result.strip().replace(\"\\n\", \"\")\n if text_result == \"\":\n values += [\"ERROR\"]\n continue\n\n # if data is a number, we cast the value to an integer\n if data_type_number:\n try:\n text_result = int(text_result)\n except Exception as e:\n text_result = \"ERROR\"\n print(e)\n\n values += [text_result]\n return values\n\n def save_data(self, data, prefix_filename=None):\n headers = [\"id\"] + \\\n [e.key for e in self.template.main] + \\\n [e.key for i, e in enumerate(\n self.template.second) if i != self.template_idx_idkey]\n\n export_content = []\n export_content.append(headers)\n export_content += data\n\n d_now = datetime.utcnow()\n date = d_now.strftime('%d_%m_%Y')\n ts = calendar.timegm(d_now.timetuple())\n if prefix_filename is None:\n filename = \"%s-%s\" % (date, ts)\n else:\n filename = \"%s-%s-%s\" % (prefix_filename, date, ts)\n file_destination = os.path.abspath(\"./%s.xlsx\" % (filename))\n\n # Save data into an .xlsx file\n pyexcel.save_as(array=export_content, dest_file_name=file_destination)\n\n\n@app.command()\ndef hello():\n print(f\"=== {config.APP_NAME}: version {config.APP_VERSION} ===\")\n\n\n@app.command()\ndef collect(\n template: Annotated[str, typer.Option(\"--using\")],\n folder: Annotated[str, typer.Option(\"--from-folder\")]\n):\n hello()\n\n file_path = os.path.join(absolute_path, template)\n json_data = None\n\n try:\n with open(file_path) as json_file:\n json_data = json.load(json_file)\n except Exception as e:\n print(e)\n\n if json_data is None:\n print(f\"Could not read the template configuration from {template}\")\n return\n\n try:\n data = TemplateModel(**json_data)\n except ValidationError as e:\n print(e)\n return\n\n collector = Collector(data)\n collected = collector.run(folder)\n _, gov_order = collector.import_old()\n\n if collected is None:\n return\n\n collected = list(collected)\n len_gov_properties = 0\n if len(collected) > 0:\n len_gov_properties = len(collected[0])\n\n gov_collected = dict()\n for gov in collected:\n gov_collected[gov[0]] = gov\n\n existing_governors = []\n new_governors = []\n\n for gov_id in gov_order:\n gov = gov_collected.get(gov_id, None)\n if gov is None:\n # we have an old player registered but no new data, we put 0 for each columns\n existing_governors += [[gov_id] + [0] * (len_gov_properties - 1)]\n else:\n existing_governors += [[gov_id] + gov[1:]]\n\n for gov_id in gov_collected.keys():\n if gov_id not in gov_order:\n gov = gov_collected.get(gov_id, None)\n if gov:\n new_governors += [gov]\n\n collector.save_data(existing_governors, \"existing\")\n collector.save_data(new_governors, \"new\")\n\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"sovanna/rok-data-collector","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20349982973","text":"import pygame\nfrom common.global_variable import GV\nfrom entities.sprite import Sprite\nfrom map.scan_map import map\n\n\ndef apply_gravity(entity: Sprite):\n entity.vsp += entity.gravity * GV.TICK\n entity.t_pos.y += entity.vsp * GV.TICK\n return entity\n\n\ndef apply_friction(entity: Sprite):\n if entity.hsp > 0:\n entity.hsp -= entity.friction * GV.TICK\n elif entity.hsp < 0:\n entity.hsp += entity.friction * GV.TICK\n entity.t_pos.x += entity.hsp * GV.TICK\n return entity\n\n\ndef check_ground(player):\n player.rect.centery = int(\n player.pos.y + player.rect.height / 2 + (player.t_pos.y - player.pos.y)\n )\n if pygame.sprite.spritecollideany(player, map):\n player.rect.centery = int(player.pos.y + player.rect.height / 2)\n player.vsp = 0\n player.jump = True\n player.t_pos.y = player.pos.y\n else:\n player.pos.y = player.t_pos.y\n\n player.rect.centerx = int(player.pos.x + player.rect.width + (player.t_pos.x - player.pos.x))\n if pygame.sprite.spritecollideany(player, map):\n player.rect.centerx = int(player.pos.x + player.rect.width)\n player.hsp = 0\n player.t_pos.x = player.pos.x\n else:\n player.pos.x = player.t_pos.x\n return player\n","repo_name":"SparrowTen/SparrowTenGame","sub_path":"client_server/common/physics.py","file_name":"physics.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1368089370","text":"#\n# @lc app=leetcode id=220 lang=python3\n#\n# [220] Contains Duplicate III\n#\n\n# @lc code=start\nclass Solution:\n def containsNearbyAlmostDuplicate(self, nums: List[int], k: int, t: int) -> bool:\n nums = list(zip(nums, range(len(nums))))\n nums.sort()\n for i in range(len(nums)) :\n j = i + 1\n while j < len(nums) and nums[j][0] - nums[i][0] <= t :\n if abs(nums[j][1] - nums[i][1]) <= k: return True\n else : j += 1\n return False\n \n\n#[1,2,3,1]\\n3\\n0\n \n# @lc code=end\n\n","repo_name":"quixoteji/Leetcode","sub_path":"solutions/220.contains-duplicate-iii.py","file_name":"220.contains-duplicate-iii.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"72448320145","text":"import glob\nimport os\nimport sys\nimport re\nimport concurrent.futures\n\ndef dealer(names):\n for name in names:\n with open(name) as f:\n head = next(f).strip()\n header = head.split(\",\")\n for i, line in enumerate(f):\n if i%10000 == 0:\n print(\"now iter %d\"%i, file=sys.stderr)\n line = line.strip()\n o = dict(zip(header, line.split(\",\")))\n if o[\"最終リンク先URL\"] != \"--\" and 'suumo.jp' in o[\"最終リンク先URL\"]:\n print(o['日'], o['キーワード'], o[\"最終リンク先URL\"])\nprint(\"start to globbing...\", file=sys.stderr) \nnames = [name for name in glob.glob(\"./yssDataset/*/*\")]\nprint(\"finish to globbing...\", file=sys.stderr) \nbucket = []\nfor i in range(0, len(names), 100):\n bucket.append( names[i:i+100] ) \n\nprint(\"finish to building backet...\", file=sys.stderr) \nwith concurrent.futures.ProcessPoolExecutor(max_workers=16) as executor:\n [_ for _ in executor.map(dealer, bucket)]\n\n\n\n","repo_name":"GINK03/kotlin-treasuredata-driver","sub_path":"examples/ConcurrencialSSHFsReader.py","file_name":"ConcurrencialSSHFsReader.py","file_ext":"py","file_size_in_byte":990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16567929136","text":"from DAO.DAO import DAOCrud\nfrom DAO.Mapeamento import Historico, HistoricoDisciplina\nfrom sqlalchemy.orm.exc import StaleDataError\n\nclass ManipulaBanco():\n # insere ---------------------\n def cadastraHistorico(historico: Historico):\n try:\n sessao = DAOCrud.getSession()\n DAOCrud.insere(sessao, historico)\n sessao.commit()\n sessao.close()\n return True\n except:\n return False\n\n # retorna id SERIAL inserido ------------------\n def retornaByIdHistorico():\n try:\n sessao = DAOCrud.getSession()\n sessao.expire_on_commit = False\n id = DAOCrud.retornaIdInserido(sessao)\n sessao.commit()\n # sessao.close()\n return id\n except:\n return False\n\n # insere varias disciplinas no historico ------------\n def cadastraListaDiscHistorico(listaRelacionamento: HistoricoDisciplina):\n try:\n sessao = DAOCrud.getSession()\n DAOCrud.insereLista(sessao, listaRelacionamento)\n sessao.commit()\n sessao.close()\n return True\n except:\n return False\n\n # deleta historico -----------------------------------------\n def deletaHistorico(matric):\n try:\n sessao = DAOCrud.getSession()\n DAOCrud.deletaHistorico(sessao, matric)\n sessao.commit()\n sessao.close()\n return True\n except:\n return False\n \n # listar historicos -----------------------------------------\n def listaHistoricos():\n try:\n sessao = DAOCrud.getSession()\n sessao.expire_on_commit = False\n historicos = DAOCrud.listaHistorico(sessao)\n sessao.commit()\n # sessao.close()\n return historicos\n except :\n return False\n\n # consulta historico por nro_matric(FK) ---------------------------\n def consultaHistorico(matric):\n try:\n sessao = DAOCrud.getSession()\n sessao.expire_on_commit = False\n historico = DAOCrud.consultaHistorico(sessao, matric)\n sessao.commit()\n #sessao.close()\n return historico\n except:\n return False\n\n # insere uma disciplina na grade ----------------------------\n def inserirDisciplinaNoHistorico(historicoDisciplina: HistoricoDisciplina):\n try:\n sessao = DAOCrud.getSession()\n DAOCrud.insere(sessao, historicoDisciplina)\n sessao.commit()\n sessao.close()\n return True\n except:\n return False \n \n # remove uma disciplina na grade ----------------------\n def removerDisciplinaDoHistorico(id_historico, id_disciplina):\n try:\n sessao = DAOCrud.getSession()\n DAOCrud.deletaHistoricoDisciplina(sessao, id_historico, id_disciplina)\n sessao.commit()\n sessao.close()\n return True\n except:\n return False\n\n # consulta GradeDisciplina para verificar disciplina é obrigatória para o Aluno ou não\n def consultaGradeDisc(gradeAno, gradeCurso, idDisc):\n try:\n sessao = DAOCrud.getSession()\n sessao.expire_on_commit = False\n grade = DAOCrud.consultaGradeDisciplina(sessao, gradeAno, gradeCurso, idDisc)\n sessao.commit()\n # sessao.close()\n return grade\n except StaleDataError as error:\n return False","repo_name":"ygor-salles/MVC-BD-Academico","sub_path":"02_MVC-BD-SQL-Academico/Model/HistoricoModel.py","file_name":"HistoricoModel.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29046933281","text":"# importing the module \n# example from: https://www.geeksforgeeks.org/tweet-using-python/\nimport tweepy \n\n# personal details \nconsumer_key =\"067x70cnWGAltFo8YUFIhrN6X\"\nconsumer_secret =\"7O9NTSm1oB7BH1hLLIe2pHFPuqm2KQmbDSUQo2yOWyn41p4jyh\"\naccess_token =\"1052610096327524355-0nGLxfYObTplXq5lx5gwS7eT8Kf1pQ\"\naccess_token_secret =\"NGmpho8ZVqOPFcYuGmHgA3VZgPCwIWuzCioTRvtS5UpoP\"\n\n# authentication of consumer key and secret \nauth = tweepy.OAuthHandler(consumer_key, consumer_secret) \n\n# authentication of access token and secret \nauth.set_access_token(access_token, access_token_secret) \napi = tweepy.API(auth) \n\n# update the status \napi.update_status(status =\"Hello Everyone !\") \n\n","repo_name":"thedancomplex/the.narrative.machine.twitter","sub_path":"twitter_post.py","file_name":"twitter_post.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6035957594","text":"class laptop:\n def __init__(self,brand_name,model_name,price):\n self.brand_name=brand_name\n self.model_name=model_name\n self.price=price\n \n def apply_discount(self,dis_persentage):\n discount=(dis_persentage/100)*self.price\n return discount\n\np=laptop('lenovo',\"ideapad gaming\",63000)\nprint(p.apply_discount(50))","repo_name":"dox404/All-Python-Code","sub_path":"HARSHIT PYTHON/oop3.py","file_name":"oop3.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73881920784","text":"m = 1\nn = 1\ninicio = True\n\n\nwhile inicio:\n try: \n m = int(input(\"base =\"))\n n = int(input(\"altura =\"))\n if (m and n) < 1:\n raise Exception(\"El número debe ser mayor que 0 para generar un rectángulo\")\n except:\n print(\"El valor debe ser mayor que 0\")\n \n else: \n res = \" \"\n\n for l in range(0,m):\n res = res+\" -\"\n for t in range(0,n):\n res = res +\"\\n| \"\n for y in range(0,m):\n res = res + \" \"\n res = res + \"|\"\n res = res + \"\\n \"\n for u in range(0,m):\n res = res+\" -\"\n print(res)\n consulta = int(input(\"Si desea salir presione 1, si quiere seguir presione 2 \"))\n if consulta == 1:\n inicio = False \n elif consulta == 2:\n inicio = True\n\n\n\n \n\n ","repo_name":"alexbaezah/tryyyyy","sub_path":"cubo.py","file_name":"cubo.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23484960164","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\nx, y = np.loadtxt(\"ex1data1.txt\", dtype=np.float32, comments=\"#\", delimiter=\",\", unpack=True)\nm = len(y)\n\nplt.plot(x, y, 'rx')\nplt.ylabel('Profit in $10,000s')\nplt.xlabel('Population of City in 10,000s')\n\nX = np.array([[1, i] for i in x])\ntheta = np.zeros(2)\niterations = 1500\nalpha = 0.01\n\ndef cost_function(X, y, theta):\n h = X.dot(theta)\n return np.sum(np.square(h-y))/(2*m)\n\nprint(cost_function(X, y, theta))\n\nj_history = np.array([0 for _ in range(iterations)], dtype=np.float)\nfor i in range(iterations): \n temp0 = theta[0] - (alpha/m) * np.sum((X.dot(theta) - y) * np.array(X[:, 0]))\n temp1 = theta[1] - (alpha/m) * np.sum((X.dot(theta) - y) * np.array(X[:, 1]))\n theta[0] = temp0\n theta[1] = temp1\n j_history[i] = cost_function(X, y, theta)\n\nprint(theta)\n\npredict1 = theta.dot([1, 3.5])\npredict2 = theta.dot([1, 7])\n\nprint(predict1, predict2)\n\nplt.plot(x, X.dot(theta), '-b')\n\ntheta0_vals = np.linspace(-10, 10, 100)\ntheta1_vals = np.linspace(-1, 4, 100)\nj_vals = np.array([[0.0 for _ in range(len(theta1_vals))] for _ in range(len(theta0_vals))])\nfor i in range(len(theta0_vals)):\n for j in range(len(theta1_vals)):\n t = [theta0_vals[i], theta1_vals[j]]\n j_vals[i][j] = cost_function(X, y, t)\n\nj_vals = j_vals.transpose()\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\ntheta0_vals, theta1_vals = np.meshgrid(theta0_vals, theta1_vals)\nax.plot_surface(theta0_vals, theta1_vals, j_vals)\nplt.xlabel('Intercept')\nplt.ylabel('Slope')\n\nfig1, ax1 = plt.subplots()\nax1.contour(theta0_vals, theta1_vals, j_vals, levels=30)\nplt.xlabel('theta_0')\nplt.ylabel('theta_1')\nplt.plot(theta[0], theta[1], 'rx')\n\nplt.show()","repo_name":"Aiganymus/machine-learning-2019","sub_path":"lab-2/lab.py","file_name":"lab.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"33149928266","text":"# open file\n# go through and extract:\n# Cause of death, Deaths, Crude Rate\n# Save in Dictionary as {\"Cause Of Death\":{\"Deaths\": ####, \"Crude Rate\"}}\n# Save as json file\nimport csv\n# import re\nimport json \nAllDATA = []\nwith open(\"Underlying Cause of Death 2018.csv\") as csv_file:\n csv_reader = csv.DictReader(csv_file)\n line_count = 0\n residential_count = 0\n for row in csv_reader:\n # print(row[\"Cause of death\"], row[\"Deaths\"], row[\"Crude Rate\"])\n AllDATA.append({\"name\": row[\"Causes of Death\"] + \" (2018)\", \"Deaths\": row[\"Deaths\"], \"Crude Rate\":row[\"Crude Rate\"]})\n\n# print(AllDATA)\n\nwith open('./covidstats/src/Deaths2018.json', 'w') as fp:\n json.dump(AllDATA, fp)","repo_name":"ocampossoto/Covid","sub_path":"Get2018Deaths.py","file_name":"Get2018Deaths.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21893825014","text":"# -*- coding: utf-8 -*-\nimport tornado\nimport json\nfrom tornado import web, websocket\nimport tornado.testing\nfrom tornado.ioloop import TimeoutError\n\nimport tests.utils as u\n\n\nclass TestDataAPI(tornado.testing.AsyncHTTPTestCase):\n pages_uri = r\"/ws-pages-data\"\n jobs_uri = r\"/ws-jobs-data\"\n\n @classmethod\n def setUpClass(cls):\n u.init_db()\n\n @classmethod\n def tearDownClass(cls):\n u.clear_db()\n\n def get_app(self):\n return u.get_app(self.pages_uri, self.jobs_uri)\n\n @tornado.testing.gen_test\n def test_set_message_size(self):\n test_command = self.get_command(\"test_set_0\",'set_max_message_size', {\"max_size\":10000})\n ws_url = \"ws://localhost:\" + str(self.get_http_port()) + self.jobs_uri\n ws_client = yield tornado.websocket.websocket_connect(ws_url)\n ws_client.write_message(json.dumps(test_command))\n response = yield ws_client.read_message()\n json_response = json.loads(response)\n res = json_response.get(\"result\", False)\n self.assertTrue(res)\n\n @tornado.testing.gen_test\n def test_jobs_no_filter(self):\n jobs_command = self.get_command(\"test_jobs_0\",'subscribe_to_jobs', {})\n yield self.execute_jobs_command(jobs_command, wait_result=True)\n\n @tornado.testing.gen_test\n def test_jobs_filter_include(self):\n jobs_command = self.get_command(\"test_jobs_1\",'subscribe_to_jobs', {\"include\":[\"127.0.0.1\"],})\n yield self.execute_jobs_command(jobs_command, wait_result=True)\n\n @tornado.gen.coroutine\n def execute_jobs_command(self, jobs_command, wait_result=True):\n ws_url = \"ws://localhost:\" + str(self.get_http_port()) + self.jobs_uri\n ws_client = yield tornado.websocket.websocket_connect(ws_url)\n ws_client.write_message(json.dumps(jobs_command))\n response = yield ws_client.read_message()\n json_response = json.loads(response)\n subs_id = json_response.get(\"result\").get(\"id\", -1)\n self.assertNotEqual(subs_id, -1)\n if wait_result:\n response = yield ws_client.read_message()\n json_response = json.loads(response)\n if json_response is None:\n self.fail(\"incorrect response\")\n else:\n self.assertTrue('stats' in json_response)\n self.assertTrue(isinstance(json_response[\"stats\"], dict))\n yield self.execute_cancel(ws_client, subs_id, True)\n\n def test_jobs_filter_include_not_exists(self):\n @tornado.gen.coroutine\n def f():\n jobs_command = self.get_command(\"test_jobs_2\",'subscribe_to_jobs', {\"include\":[\"notexists.com\"],})\n yield self.execute_jobs_command(jobs_command, wait_result=True)\n self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=3)\n\n @tornado.testing.gen_test\n def test_pages_filter_url_groups(self):\n url_value = 'http://example.com'\n pages_command = self.get_command(\"test_pages_0\",'subscribe_to_pages', {'url_groups': {1: {url_value: None}}})\n yield self.execute_pages_command(pages_command, wait_result=True, required_url=url_value)\n\n def test_pages_no_result(self):\n @tornado.gen.coroutine\n def f():\n url_value = 'http://mysite.com'\n pages_command = self.get_command(\"test_pages_3\",'subscribe_to_pages', {'url_groups': {1: {url_value: None}}})\n yield self.execute_pages_command(pages_command,\n wait_result=True,\n required_url=url_value,\n max_count=0)\n self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=3)\n\n def test_pages_exact_count(self):\n @tornado.gen.coroutine\n def f():\n url_value = 'http://example.com'\n pages_command = self.get_command(\"test_pages_4\",'subscribe_to_pages', {'url_groups': {1: {url_value: None}}})\n yield self.execute_pages_command(pages_command,\n wait_result=True,\n required_url=url_value,\n max_count=1)\n self.assertRaises(TimeoutError, self.io_loop.run_sync, f, timeout=3)\n\n @tornado.testing.gen_test\n def test_pages_no_filter(self):\n pages_command = self.get_command(\"test_pages_1\",'subscribe_to_pages', {})\n yield self.execute_pages_command(pages_command, wait_result=True)\n\n @tornado.testing.gen_test\n def test_pages_filter_urls(self):\n url_value = 'http://example.com'\n pages_command = self.get_command(\"test_pages_2\",'subscribe_to_pages', {'urls': {url_value: None}})\n yield self.execute_pages_command(pages_command, wait_result=True, required_url=url_value)\n\n def get_command(self, id, method, params):\n return {\n 'id': id,\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params\n }\n\n @tornado.gen.coroutine\n def execute_pages_command(self, pages_command, wait_result=False, required_url=None, max_count=None):\n ws_url = \"ws://localhost:\" + str(self.get_http_port()) + self.pages_uri\n ws_client = yield tornado.websocket.websocket_connect(ws_url)\n ws_client.write_message(json.dumps(pages_command))\n response = yield ws_client.read_message()\n json_response = json.loads(response)\n subs_id = json_response.get(\"result\").get(\"single_subscription_id\", -1)\n if not subs_id:\n group_sub_ids = json_response.get(\"result\").get(\"id\", {})\n for group_id in group_sub_ids.keys():\n if group_sub_ids[group_id] != -1:\n subs_id = group_sub_ids[group_id]\n self.assertNotEqual(subs_id, -1)\n if wait_result:\n if max_count is None:\n response = yield ws_client.read_message()\n json_response = json.loads(response)\n if json_response is None:\n self.fail(\"incorrect response\")\n else:\n cnt = 0\n while True:\n response = yield ws_client.read_message()\n json_response = json.loads(response)\n if json_response is None:\n self.fail(\"incorrect response\")\n cnt += 1\n if cnt > max_count:\n self.fail(\"max count of pages exceeded\")\n yield self.execute_cancel(ws_client, subs_id, True)\n\n @tornado.testing.gen_test\n def test_wrong_cancel(self):\n ws_url = \"ws://localhost:\" + str(self.get_http_port()) + self.pages_uri\n ws_client = yield tornado.websocket.websocket_connect(ws_url)\n yield self.execute_cancel(ws_client, -1, False)\n\n @tornado.gen.coroutine\n def execute_cancel(self, ws_client, subscription_id, expected):\n cmd_id = \"test_cancel\"\n cancel_command = self.get_command(cmd_id,'cancel_subscription', {\"subscription_id\": subscription_id})\n ws_client.write_message(json.dumps(cancel_command))\n while True:\n response = yield ws_client.read_message()\n json_response = json.loads(response)\n if json_response.get(\"id\", None) == cmd_id:\n self.assertEqual(json_response.get(\"result\"), expected)\n break\n","repo_name":"TeamHG-Memex/arachnado","sub_path":"tests/test_data.py","file_name":"test_data.py","file_ext":"py","file_size_in_byte":7433,"program_lang":"python","lang":"en","doc_type":"code","stars":156,"dataset":"github-code","pt":"48"} +{"seq_id":"17419311294","text":"cv_data = {\n \"personal\": {\n \"name\": \"Alex Dragan\",\n \"email\": \"alex@alexdragan.com\",\n \"phone\": \"+40729501514\"\n },\n \"experience\": [\n {\n \"position\": \"Python Developer\",\n \"company\": \"NxData\",\n \"year\": \"2017-2018\"\n },\n {\n \"position\": \"Python Developer\",\n \"company\": \"Cegeka\",\n \"year\": \"2018-2019\"\n },\n {\n \"position\": \"Senior Network Developer\",\n \"company\": \"Vodafone\",\n \"year\": \"2019-2021\"\n },\n {\n \"position\": \"Full Stack Developer\",\n \"company\": \"Coherent Solutions\",\n \"year\": \"2021-Present\"\n }\n ],\n \"education\": [\n {\n \"degree\": \"Bachelor of Science in Computer Science\",\n \"university\": \"Bucharest Academy of Economic Studies\",\n \"year\": \"2014-2017\"\n },\n {\n \"degree\": \"Master of Science in Computer Science\",\n \"university\": \"Bucharest Academy of Economic Studies\",\n \"year\": \"2017-2019\"\n }\n ]\n}\n","repo_name":"DraganAlexandru/cegeka_alex_dragan","sub_path":"cv_data.py","file_name":"cv_data.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32874332456","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 26 15:49:07 2021\n\n@author: forte\n\"\"\"\nfrom tkinter import *\nimport random\nfrom tkinter import messagebox\n\nclass Actions_game:\n \n def __Init__(self):\n self.question = ()\n \n ## take to the question and answer to the screen \n def load_level(self, widgets, question):\n widgets.question = question\n #update level and score in screen\n widgets.id_level_label.set(\"Nivel \"+str(widgets.level))\n widgets.id_score_label.set(str(widgets.score)+\" Puntos\")\n #load question and answers\n widgets.id_question_label.set(question[2]) #question\n widgets.id_ans1_rb.set(question[4])\n widgets.id_ans2_rb.set(question[5])\n widgets.id_ans3_rb.set(question[6])\n widgets.id_ans4_rb.set(question[7])\n \n # widgets.id_question_label.set(\"¿pregunta \"+str(widgets.level)+\"?\")\n # widgets.id_ans1_rb.set(\"respuesta 1\")\n # widgets.id_ans2_rb.set(\"respuesta 2\")\n # widgets.id_ans3_rb.set(\"respuesta 3\")\n # widgets.id_ans4_rb.set(\"respuesta 4\")\n \n \n #compare if the answer of user is correcto or not \n def check_answer(self, widgets, question, dates_db, ranking_db):\n\n question = self.question\n if widgets.answer.get() > 3 and widgets.answer.get() < 8: #when is diferent to the interval, radiobutton is unselected\n if question[3] == question[widgets.answer.get()]: #if correct_answer == answer: \n self.next_level(widgets, dates_db, ranking_db)\n else:\n widgets.score = 0\n messagebox.showinfo(\"Respuesta incorrecta\", \"La respuesta es incorrecta.\")\n widgets.screen(\"s2\") #show final screen\n else:\n messagebox.showwarning(\"Advertencia\",\"Por favor, seleccione una respuesta\")\n \n \n ## Pass to next level while the level be less or equal to 5 level. If win 5 level, finishe the game\n def next_level(self, widgets, dates, ranking_db):\n widgets.answer.set(1)\n if widgets.level+1 <= 5:\n widgets.score += 10*widgets.level\n widgets.level += 1\n question = self.load_question(dates, widgets)\n self.load_level(widgets, question)\n \n else:\n widgets.score += 100 #Score final level\n self.finish_game(widgets, ranking_db)\n \n #play again the game\n def play_again(self, widgets, dates_db):\n widgets.score = 0\n widgets.level = 1\n widgets.forget_widgets(widgets.list_widgets_s2)\n #widgets.screen(\"s1\") #show level 1 of the game\n widgets.frame1.pack(side=TOP)\n widgets.frame2.pack(side=TOP)\n question = self.load_question(dates_db, widgets)\n self.load_level(widgets, question) #load a question of the level\n\n\n #load an aleatory question of the database\n def load_question(self, dates_db,widgets):\n random_number = random.randint(1,5)+5*(widgets.level-1)\n question = dates_db.execution(\"SELECT * FROM questions WHERE ID=\"+str(random_number))\n self.question = question[0]\n return question[0]\n \n \n def finish_game(self, widgets, ranking_db): #button \"Finalizar juego\" in any moment\n #ranking_db.openConection(\"Ranking.db\")\n values_ranking = \"'\"+widgets.id_entry_init.get()+\"',\"+str(widgets.score)\n ranking_db.execution( \"INSERT INTO ranking ('USERNAME','SCORE') VALUES (\"+values_ranking+\")\" ) #save score in ranking\n #ranking_db.closeConnection()\n widgets.screen(\"s2\") #show final screen\n \n\n \n \n ####show ranking in a new frame whith scroll at the right \n def show_ranking(self,widgets,ranking_db, s):\n dates = ranking_db.execution(\"SELECT * FROM ranking\")\n len_dates = len(dates)\n widgets.forget_widgets(widgets.list_widgets_s0)\n widgets.forget_widgets(widgets.list_widgets_s2) \n \n ####configure scrollbar\n #widgets.canvas1.configure(scrollregion = widgets.canvas1.bbox(\"all\")) #update range of scroll\n widgets.canvas1.pack(fill= BOTH, side=TOP, expand=1)\n widgets.yscrollbar.pack(side=RIGHT, fill=Y) #Y: Llenar hasta el eje Y\n widgets.canvas1.configure(yscrollcommand= widgets.yscrollbar.set)\n widgets.canvas1.bind('', lambda e:widgets.canvas1.configure(scrollregion = widgets.canvas1.bbox(\"all\")))\n widgets.canvas1.create_window((0,0),window=widgets.frame3, anchor=\"nw\")\n \n rowi = 0\n Label(widgets.frame3, text = \"Usuario\", font=widgets.fontTextBold).grid(row=rowi, column=0, padx=3)\n Label(widgets.frame3, text = \"Puntuación\", font=widgets.fontTextBold).grid(row=rowi, column=1, padx=3)\n rowi+=1\n self.list_ranking = []\n for i in range(len_dates):\n labelx = Label(widgets.frame3, text = dates[i][1], font=widgets.fontText)\n labelx.grid(row=rowi, column=0, padx=3)\n self.list_ranking.append(labelx)\n labely = Label(widgets.frame3, text = dates[i][2], font=widgets.fontText)\n labely.grid(row=rowi, column=1, padx=3)\n self.list_ranking.append(labely)\n rowi+=1\n \n self.back_button = Button(widgets.frame3, text=\"Volver\", width=widgets.width_button, font=widgets.fontTextBold, command=lambda:self.back_ranking(s, widgets))\n self.back_button.grid(row=rowi, column=0, columnspan = 2, pady=20)\n \n \n \n \n ####Return to previous screen to continue with the development of the game\n def back_ranking(self, s, widgets): #back to previous screen\n widgets.canvas1.configure(scrollregion = widgets.canvas1.bbox(\"all\")) #update range of scroll\n \n #hide the widgets of screen\n self.back_button.grid_remove()\n widgets.forget_widgets(self.list_ranking)\n widgets.frame3.pack_forget()\n widgets.canvas1.pack_forget()\n \n widgets.screen(s) #show previous screen","repo_name":"jhonfg85/GameQuestions","sub_path":"actions_game.py","file_name":"actions_game.py","file_ext":"py","file_size_in_byte":6093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"73354024786","text":"from typing import Optional\n\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, val=0, next_=None):\n self.val = val\n self.next = next_\n\n\nclass Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n def convert_to_list(node):\n lst = []\n current = node\n while True and node:\n lst.append(current.val)\n if current.next:\n current = current.next\n else:\n break\n else:\n return [0]\n return lst\n\n l1 = convert_to_list(l1)\n l2 = convert_to_list(l2)\n l3 = []\n\n ln1 = len(l1)\n ln2 = len(l2)\n\n car = 0\n for i in range(max([ln1, ln2])):\n x = l1[i] if i < ln1 else 0\n y = l2[i] if i < ln2 else 0\n\n sum_ = x + y + car\n\n if sum_ > 9:\n l3.append(sum_ % 10)\n car = 1\n else:\n l3.append(sum_)\n car = 0\n if car:\n l3.append(1)\n\n head = None\n prev = None\n for l in l3:\n if not prev:\n head = ListNode(l)\n prev = head\n else:\n node = ListNode(l)\n prev.next = node\n prev = node\n\n return head\n","repo_name":"cetinca/study","sub_path":"algos/sum_linked_list.py","file_name":"sum_linked_list.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"39180477930","text":"import tensorflow as tf\nfrom tensorflow.python.framework import ops\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import preprocessing\nfrom matplotlib import pyplot as plt\n\ndef log_gaussian(x, mu, sigma):\n \"\"\"\n Returns the log gaussian likelihood of an observation x given a mean and a std. deviation\n \n Keyword arguments:\n - x: observation.\n - mu: mean.\n - sigma: std. deviation.\n \"\"\"\n return tf.cast(-0.5 * tf.log(2 * np.pi) - tf.log(sigma) - tf.square(x - mu) / (2 * tf.square(sigma)), tf.float64)\n\n\ndef reparametrise_weights(mu, rho):\n \"\"\"\n Petforms the reparameterisation trick: w = mu + log(1+exp(rho))*e\n \n Keyword arguments:\n - mu: variational paramter for the mean.\n - rho: variational parameter for the std. deviation.\n \"\"\"\n epsilon = tf.random_normal(mu.shape, mean=0., stddev=1.)\n return mu + tf.multiply(tf.log(1. + tf.exp(rho)), epsilon)\n\n\n\ndef sample_network(weights_mu, biases_mu, weights_rho, biases_rho, n_hidden_layers):\n \"\"\"\n Samples the weights and biases from the posterior distribution.\n \n Keyword arguments: \n - weights_mu: variational paramters for mu for the weights.\n - biases_mu: variational paramters for mu for the biases.\n - weights_rho: variational paramters for rho for the weights.\n - biases_rho: variational paramters for rho for the biases.\n \"\"\"\n weights = {}\n biases = {}\n for i in range(n_hidden_layers+1):\n weights[i] = tf.random_normal(weights_mu[i].shape, \n mean = weights_mu[i], \n stddev=tf.log(1. + tf.exp(weights_rho[i])))\n biases[i] = tf.random_normal(biases_mu[i].shape, \n mean = biases_mu[i], \n stddev=tf.log(1. + tf.exp(biases_rho[i])))\n return weights, biases\n\n\n\n\n\n\ndef build_network(x, weights, biases, n_hidden_layers):\n \"\"\"\n Builds network recursively.\n \n Keyword arguments:\n - weights: weights of the network.\n - biases: biases of the network.\n \"\"\"\n network = []\n for i in range(n_hidden_layers+1):\n if i == 0:\n network.append(tf.nn.relu(tf.matmul(x, weights[i]) + biases[i]))\n else:\n network.append(tf.nn.relu(tf.matmul(network[i-1], weights[i]) + biases[i]))\n output = tf.matmul(network[n_hidden_layers-1], weights[n_hidden_layers]) + biases[n_hidden_layers]\n return output\n\n\ndef log_prior_gaussian(x, sigma):\n \"\"\"\n Returns the log probability of an observation under a guassian.\n \n Keyword arguments:\n - x: samples.\n - sigma: the std. devaition.\n \"\"\"\n return log_gaussian(x = x, mu = 0., sigma = sigma)\n\n\ndef sample(x, y, n_samples, weights_mu, weights_rho, biases_mu, biases_rho, n_hidden_layers, prior_sigma): \n \"\"\"\n Computes the approximation of the log prior, posterior and likelihood\n \n Keyword arguments:\n - n_samples: number of samples to be used to compute the approximation.\n - weights_mu: variational paramters mu for the weights.\n - weights_rho: variational paramters rho for the weights.\n - biases_mu: variational paramters mu for the biases.\n - biases_rho: variational paramters rho for the biases.\n \"\"\"\n approximate_log_prior = 0.\n approximate_log_posterior = 0.\n approximate_log_likelihood = 0.\n for _ in range(n_samples):\n # Reparametrise weights\n weights = {}\n biases = {}\n for i in range(n_hidden_layers+1):\n weights[i] = reparametrise_weights(mu = weights_mu[i], rho = weights_rho[i])\n biases[i] = reparametrise_weights(mu = biases_mu[i], rho = biases_rho[i])\n # Build network\n output = build_network(x = x, weights = weights, biases = biases, n_hidden_layers = n_hidden_layers)\n # Initialise samples to 0\n sample_log_prior = 0.\n sample_log_posterior = 0.\n sample_log_likelihood = 0.\n # For each layer \n for i in range(n_hidden_layers+1):\n # Sample log prior\n sample_log_prior += tf.reduce_sum(log_prior_gaussian(x = weights[i],\n sigma = prior_sigma))\n sample_log_prior += tf.reduce_sum(log_prior_gaussian(x = biases[i],\n sigma = prior_sigma))\n # Sample log posterior\n sample_log_posterior += tf.reduce_sum(log_gaussian(x = weights[i], \n mu = weights_mu[i], \n sigma = tf.log(1. + tf.exp(weights_rho[i]))))\n sample_log_posterior += tf.reduce_sum(log_gaussian(x = biases[i], \n mu = biases_mu[i], \n sigma = tf.log(1. + tf.exp(biases_rho[i]))))\n # Sample log likelihood\n sample_log_likelihood += tf.reduce_sum(-tf.nn.softmax_cross_entropy_with_logits(labels = y, logits = output))\n\n approximate_log_prior += sample_log_prior\n approximate_log_posterior += sample_log_posterior\n approximate_log_likelihood += sample_log_likelihood\n\n approximate_log_prior = tf.cast(tf.div(approximate_log_prior, n_samples), tf.float64)\n approximate_log_posterior = tf.cast(tf.div(approximate_log_posterior, n_samples), tf.float64)\n approximate_log_likelihood = tf.cast(tf.div(approximate_log_likelihood, n_samples), tf.float64)\n \n return approximate_log_prior, approximate_log_posterior, approximate_log_likelihood\n\n\n\ndef initialise_variational_parametes(n_hidden_layers, hidden_layers_dim, n_input, n_output, init_params):\n \"\"\"\n Initialises the variational parameters mu and rho.\n \n Keyword arguments:\n - n_hidden_layers: number of hidden layers\n - hidden_layers_dim: the number of neurons per in each hidden layer\n - n_input: dimension of the input \n - n_output: dimention of the output\n \"\"\"\n neurons = [n_input] + hidden_layers_dim + [n_output]\n weights_mu={}\n biases_mu={}\n weights_rho={}\n biases_rho={}\n \n init_sigma_weights_mu = init_params[0]\n init_sigma_biases_mu = init_params[1]\n init_sigma_weights_rho = init_params[2]\n init_sigma_biases_rho = init_params[3]\n \n for i in range(n_hidden_layers+1):\n weights_mu[i] = tf.Variable(tf.random_normal((neurons[i], neurons[i+1]), \n mean = 0., \n stddev=init_sigma_weights_mu),\n tf.float64)\n biases_mu[i] = tf.Variable(tf.random_normal((neurons[i+1],), \n mean = 0., \n stddev=init_sigma_biases_mu),\n tf.float64)\n weights_rho[i] = tf.Variable(tf.random_normal((neurons[i], neurons[i+1]), \n mean = -5., \n stddev=init_sigma_weights_rho),\n tf.float64)\n biases_rho[i] = tf.Variable(tf.random_normal((neurons[i+1],), \n mean = -5., \n stddev = init_sigma_biases_rho),\n tf.float64)\n return weights_mu, biases_mu, weights_rho, biases_rho\n\n\nclass BN():\n\n\tdef __init__(self, mnist):\n\t\tN = mnist.data.shape[0]\n\t\tdata = np.float32(mnist.data[:]) / 255.\n\t\ttarget = mnist.target.reshape(N, 1)\n\t\ttrain_idx, test_idx = train_test_split(np.array(range(N)), test_size=0.15)\n\t\tself.train_data, self.test_data = data[train_idx], data[test_idx]\n\t\tself.train_target, self.test_target = target[train_idx], target[test_idx]\n\t\tself.train_target = np.float32(preprocessing.OneHotEncoder(sparse=False).fit_transform(self.train_target))\n\n\tdef train_bayesian_nn(self,params):\n\t\tlearning_rate_log = params[0]\n\t\tn_epochs = int(params[1])\n\t\tbatch_size = int(params[2])\n\t\tn_samples = int(params[3])\n\t\tprior_sigma = np.float32(params[4])\n\t\tinit_sigma_weights_mu = np.float32(params[5])\n\t\tinit_sigma_biases_mu = np.float32(params[6])\n\t\tinit_sigma_weights_rho = np.float32(params[7])\n\t\tinit_sigma_biases_rho =np.float32(params[8])\n\t\t\n\t\tlearning_rate = float(np.exp(learning_rate_log))\n\t\tprint (\"\\tLearning rate: \" + str(learning_rate) +\", training epochs: \" + str(n_epochs) + \", batch size: \"+ str(batch_size) + \", n_samples: \" + str(n_samples) + \", prior_sigma: \" +str(prior_sigma) +\", init_sigma_weights_mu: \" + str(init_sigma_weights_mu) +\", init_sigma_biases_mu: \"+ str(init_sigma_biases_mu) +\", init_sigma_weights_rho: \"+ str(init_sigma_weights_rho) +\", init_sigma_biases_rho: \"+ str(init_sigma_biases_rho))\n\t\t \n\t\tn_batches = int(self.train_data.shape[0]/ float(batch_size))\n\n\t\tops.reset_default_graph()\n\n\t\t# Define input and ouput dimension\n\t\tn_input = 784\n\t\tn_output = 10\n\t\t# Define input and output placeholders\n\t\tx = tf.placeholder(tf.float32, shape = [None, n_input])\n\t\ty = tf.placeholder(tf.float32, shape = [None, n_output]) \n\n\t\t# Set number of hidden layers\n\t\tn_hidden_layers = 2\n\n\t\t# Initialise variational paramters: mus and rhos for weights and biases\n\t\tweights_mu, biases_mu, weights_rho, biases_rho = initialise_variational_parametes(n_hidden_layers = n_hidden_layers, \n\t\t hidden_layers_dim = [200, 200],\n\t\t n_input = n_input,\n\t\t n_output = n_output,\n\t\t init_params = [init_sigma_weights_mu,\n\t\t init_sigma_biases_mu,\n\t\t init_sigma_weights_rho,\n\t\t init_sigma_biases_rho,])\n\t\t # Sample prior, posterior and likelihood\n\t\tlog_prior, log_posterior, log_likelihood = sample(x = x,\n\t\t y = y,\n\t\t n_samples = n_samples,\n\t\t weights_mu = weights_mu, \n\t\t weights_rho = weights_rho, \n\t\t biases_mu = biases_mu, \n\t\t biases_rho = biases_rho,\n\t\t n_hidden_layers = n_hidden_layers,\n\t\t prior_sigma = prior_sigma)\n\t\t# Set the scaling factor for log_posterior - log_prior to account for the fact that we are\n\t\t# using bacthes of data\n\t\tscaling_factor = tf.placeholder(tf.float64, shape = None, name = 'scaling_factor')\n\t\t# Create loss function\n\t\tloss = tf.reduce_sum(scaling_factor*(log_posterior - log_prior) - log_likelihood)\n\t\t# Create optimiser\n\t\toptimiser = tf.train.AdamOptimizer(learning_rate)\n\t\toptimise = optimiser.minimize(loss)\n\t\t# Sample the weights and biases of the network\n\t\tweights, biases = sample_network(weights_mu, biases_mu, weights_rho, biases_rho, n_hidden_layers= n_hidden_layers)\n\t\t# Build the netwotk\n\t\toutput = tf.nn.softmax(build_network(x=x, weights = weights, biases = biases, n_hidden_layers = n_hidden_layers))\n\t\t# Store predictions\n\t\tpred = tf.argmax(output, 1)\n\t\t# Initialise all variables\n\t\tinit = tf.global_variables_initializer()\n\n\t\tresults = {'loss':[], 'test_set_accuracy':[]}\n\t\twith tf.Session() as sess:\n\t\t sess.run(init)\n\t\t for epoch in range(n_epochs):\n\t\t #print (\"Epoch: %03d/%03d\" % (epoch+1, n_epochs)) \n\t\t for i in range(n_batches):\n\t\t ob = sess.run([loss, optimise, log_prior], feed_dict={x: self.train_data[i * batch_size: (i + 1) * batch_size],\n\t\t y: self.train_target[i * batch_size: (i + 1) * batch_size],\n\t\t scaling_factor: (2 ** (n_batches - (i + 1))) / ((2 ** n_batches) - 1 )})\n\t\t predictions = sess.run(pred, feed_dict={x: self.test_data})\n\t\t test_accuracy = np.count_nonzero(predictions == np.int32(self.test_target.ravel())) / float(self.test_data.shape[0]) ; print (\"Accuracy \" + str(test_accuracy))\n\t\treturn 1 - test_accuracy\n\n \n","repo_name":"wtywty5ty/Bayesian-opt","sub_path":"BO/BN.py","file_name":"BN.py","file_ext":"py","file_size_in_byte":13065,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"20525145017","text":"from gym import spaces\nimport numpy as np\n\nfrom manipulator_learning.sim.envs.thing_reaching import ThingReachingGeneric\n\n\nclass PandaReachXYZState(ThingReachingGeneric):\n def __init__(self, max_real_time=6, n_substeps=10, dense_reward=True, action_multiplier=0.1, **kwargs):\n self.action_space = spaces.Box(-1, 1, (3,), dtype=np.float32)\n self.observation_space = spaces.Box(-np.inf, np.inf, (8,))\n\n CONFIG = dict(\n block_random_lim=[[.35, .35]],\n init_block_pos=[[-.025, -.05]],\n block_style='small',\n init_gripper_pose=[[0.0, .5, .25], [np.pi, 0, 0]],\n robot_base_ws_cam_tf=((-.4, .6, .3), (-2, 0, -1.85)),\n pos_limits=[[.85, -.35, .655], [1.15, -0.05, 0.8]],\n valid_r_dofs=[0, 0, 0]\n )\n\n super().__init__('reaching_xyz', False, dense_reward, 'w',\n state_data=('pos', 'obj_pos', 'obj_rot_z'),\n max_real_time=max_real_time, n_substeps=n_substeps,\n action_multiplier=action_multiplier, reach_radius=.1, robot='panda',\n limits_cause_failure=False, failure_causes_done=False, success_causes_done=False,\n control_frame='b', **CONFIG, **kwargs)\n","repo_name":"utiasSTARS/manipulator-learning","sub_path":"manipulator_learning/sim/envs/panda_reaching.py","file_name":"panda_reaching.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"48"} +{"seq_id":"24083581860","text":"# Matt Crow\nfrom socket import *\nfrom config import UDP_SERVER_ADDR, BUFFER_SIZE, formatAddr\n\ndef startUdpClient():\n client = socket(AF_INET, SOCK_DGRAM)\n message = input(\"Enter a sentence: \")\n client.sendto(message.encode(), UDP_SERVER_ADDR) #2\n response, serverAddr = client.recvfrom(BUFFER_SIZE) # blocks until 3\n response = response.decode()\n print(f'Server responded with \"{response}\"')\n client.close()\n\nif __name__ == \"__main__\":\n startUdpClient()\n","repo_name":"Matt-Crow/SmallPythonPrograms","sub_path":"network/socket/udpClient.py","file_name":"udpClient.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"40312881783","text":"from sklearn.metrics import classification_report\nfrom sklearn.metrics import confusion_matrix\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\nimport torch\nimport json\n\n\nclass Trainer(object):\n\n def __init__(self, model, num_epochs, device, train_loader, validation_loader, test_loader, optimizer,\n criterion, log_path, model_path, conf_path, metric_path, scheduler=None):\n '''\n :param log_path: (str) path to save learning curve data\n :param model_path: (str) path to save trained model\n :param conf_path: (str) path to save confusion matrix\n :param metric_path: (str) path to save classification report\n '''\n self.model = model\n self.num_epochs = num_epochs\n self.device = device\n self.train_loader = train_loader\n self.validation_loader = validation_loader\n self.test_loader = test_loader\n self.optimizer = optimizer\n self.criterion = criterion\n self.scheduler = scheduler\n self.log_path = log_path\n self.model_path = model_path\n self.conf_path = conf_path\n self.metric_path = metric_path\n\n def train_one_epoch(self, model, device, train_loader, optimizer, criterion,\n epoch_number, num_epochs, scheduler=None):\n running_train_loss = 0.0\n running_train_correct_predictions = 0\n num_items = 0\n model.train()\n with tqdm(train_loader, total=len(train_loader)) as loop:\n for data in loop:\n optimizer.zero_grad()\n inputs, labels = data[0].to(device), data[1].to(device)\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n num_items += inputs.size(0)\n running_train_loss += loss.item()\n avg_loss = running_train_loss / num_items\n pred = outputs.argmax(dim=1, keepdim=True)\n running_train_correct_predictions += pred.eq(\n labels.view_as(pred)).sum().item()\n avg_accuracy = running_train_correct_predictions * 100 / num_items\n loop.set_description(f\"Epoch [{epoch_number+1}/{num_epochs}]\")\n loop.set_postfix(loss=avg_loss, acc=avg_accuracy,\n lr=optimizer.param_groups[0]['lr'])\n if scheduler:\n scheduler.step()\n return running_train_loss, running_train_correct_predictions\n\n def validate_one_epoch(self, model, device, validation_loader, criterion, epoch_number, num_epochs):\n running_validation_loss = 0.0\n running_validation_correct_predictions = 0\n num_items = 0\n model.eval()\n with tqdm(validation_loader, total=len(validation_loader)) as loop:\n with torch.no_grad():\n for data in loop:\n inputs, labels = data[0].to(device), data[1].to(device)\n outputs = model(inputs)\n loss = criterion(outputs, labels)\n num_items += inputs.size(0)\n running_validation_loss += loss.item()\n avg_loss = running_validation_loss / num_items\n pred = outputs.argmax(dim=1, keepdim=True)\n running_validation_correct_predictions += pred.eq(\n labels.view_as(pred)).sum().item()\n avg_accuracy = running_validation_correct_predictions * 100 / num_items\n loop.set_description(\n f\"Epoch [{epoch_number+1}/{num_epochs}]\")\n loop.set_postfix(val_loss=avg_loss, val_acc=avg_accuracy)\n return running_validation_loss, running_validation_correct_predictions\n\n def finetune_model(self):\n best_validation_loss = float('inf')\n train_acc = []\n train_loss = []\n val_acc = []\n val_loss = []\n state_dict = None\n\n for epoch in range(self.num_epochs):\n running_train_loss, running_train_accuracy = self.train_one_epoch(\n self.model, self.device, self.train_loader, self.optimizer,\n self.criterion, epoch, self.num_epochs, self.scheduler)\n running_validation_loss, running_validation_accuracy = self.validate_one_epoch(\n self.model, self.device, self.validation_loader,\n self.criterion, epoch, self.num_epochs)\n\n avg_train_loss = running_train_loss / \\\n len(self.train_loader.dataset)\n avg_validation_loss = running_validation_loss / \\\n len(self.validation_loader.dataset)\n avg_train_accuracy = running_train_accuracy / \\\n len(self.train_loader.dataset)\n avg_validation_accuracy = running_validation_accuracy / \\\n len(self.validation_loader.dataset)\n\n train_acc.append(avg_train_accuracy)\n train_loss.append(avg_train_loss)\n val_acc.append(avg_validation_accuracy)\n val_loss.append(avg_validation_loss)\n\n if avg_validation_loss < best_validation_loss:\n best_validation_loss = avg_validation_loss\n state_dict = self.model.state_dict()\n\n torch.save(state_dict, self.model_path)\n self.save_training_data(train_acc, train_loss, val_acc,\n val_loss, self.log_path)\n\n def save_training_data(self, train_acc, train_loss, val_acc, val_loss, file_path):\n data = {'train_acc': train_acc,\n 'train_loss': train_loss,\n 'val_acc': val_acc,\n 'val_loss': val_loss}\n with open(file_path, 'w') as fp:\n json.dump(data, fp, indent=4)\n\n def evaluate_model(self):\n y_pred = []\n y_true = []\n for data in self.test_loader:\n inputs, labels = data[0].to(self.device), data[1].to(self.device)\n with torch.no_grad():\n output = self.model(inputs)\n output = (torch.max(torch.exp(output), 1)[1]).data.cpu().numpy()\n y_pred.extend(output)\n labels = labels.data.cpu().numpy()\n y_true.extend(labels)\n classes = self.test_loader.dataset.classes\n cf_matrix = confusion_matrix(y_true, y_pred, normalize=\"true\")\n df_cm = pd.DataFrame(cf_matrix, index=[i for i in classes],\n columns=[i for i in classes])\n df_cm.to_csv(self.conf_path)\n info = f\"Test Accuracy: {np.sum(np.array(y_pred) == np.array(y_true))/len(y_pred)*100} %\"\n print(info)\n self.save_metrics(y_true, y_pred)\n\n def save_metrics(self, y_true, y_pred):\n labels = ['Bacterial Blight (CBB)',\n 'Brown Streak Disease (CBSD)',\n 'Green Mottle (CGM)',\n 'Mosaic Disease (CMD)',\n 'Healthy']\n report = pd.DataFrame.from_dict(\n classification_report(y_true, y_pred,\n target_names=labels,\n output_dict=True))\n report.to_csv(self.metric_path, index=False)\n","repo_name":"Ethan-Yang0101/Cassava-CKA-Explainer-Project","sub_path":"Trainer.py","file_name":"Trainer.py","file_ext":"py","file_size_in_byte":7189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12980378275","text":"import time\r\ni = 1\r\nname = input(\"Enter your name: \\n\")\r\noppo = input(\"Enter opponents name: \\n\")\r\nuser_score = 0\r\ncomp_score = 0\r\nimport random\r\nrounds = int(input(\"How many rounds do you want to play? \\n\"))\r\nwhile iend:\n return 0\n mid = (start+end)//2 # 중간값 받기\n if target==data[mid]:\n return 1\n if target > data[mid]:\n # 타겟이 중간값보다 크다면 data의 뒤부분\n return binary(data, mid+1,end, target)\n else:\n return binary(data, start,mid-1,target)\n\nN = int(input())\nA = list(map(int, input().split()))\nA.sort()\nM = int(input())\ntargets = list(map(int,input().split()))\nfor t in targets:\n print(binary(A,0,N-1,t))\n\n","repo_name":"wjddn3711/algorithm","sub_path":"sequentialSearch/1920.py","file_name":"1920.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12439717943","text":"import requests\nfrom datetime import timedelta\nimport Parser\nimport random\nfrom urllib.parse import urljoin, urlparse, parse_qs\n\"\"\"\nCrawls the webpage, following links gathered from the parser and randomly generated links.\nHandles login authentication, cookies, and forms as well.\n\"\"\"\n\n\nclass Crawler:\n\n \"\"\"\n Hard coded authentication information and file extensions for page guessing\n \"\"\"\n dvwa = {'username': 'admin', 'password': 'password', 'Login': 'Login'}\n bodgeit = {'username': 'fake@fake.com', 'password1': 'password', 'password2': 'password'}\n extensions = ['.jsp', '.php', '.html', '.js', '.asp']\n\n \"\"\"\n Constructor.\n Takes the args object returned by argsparser in Fuzz.read_input()\n For the arrays, opens the file and reads in the data. Splits it by \\n\n mode = discover || test\n url = the URL to start fuzzing from\n authflag = custom authentication to DVWA or BodgeIt\n common = text file of common words for guessing\n vectors = text file of malicious input\n sensitive = text file of target sensitive data\n random = True/False\n slow = time that is considered \"slow\"\n accessible = list of accessible webpages\n visited = set (unique list) of visited urls\n forms = dictionary of form name and fields\n cookies = dictionary of cookie names and values\n \"\"\"\n def __init__(self, args):\n self.mode = args['mode']\n self.url = args['url'][0]\n self.parser = Parser.Parser()\n self.authflag = args['custom_auth=']\n self.common = open('res/' + args['common_words='], 'r').read().split('\\n')\n self.vectors = open('res/' + args['vectors='], 'r').read().split('\\n') if args['vectors='] else []\n self.sensitive = open('res/'+args['sensitive='], 'r').read().split('\\n') if args['sensitive='] else []\n self.random = args['random=']\n self.slow = timedelta(milliseconds=args['slow='])\n self.accessible = []\n self.visited = set()\n self.forms = {}\n self.cookies = {}\n self.session = None\n self.url_params = {}\n\n \"\"\"\n String representation of a Crawler for debugging.\n \"\"\"\n def __str__(self):\n return \"[\" + self.mode[0] + \", \" + self.url[0] + \", \" + self.authflag + \"]\"\n\n \"\"\"\n Acts as a switch/case.\n Provides the proper URL for the auth flag, if given.\n Otherwise starts from home\n \"\"\"\n def switch(self):\n return {\n 'dvwa': 'http://127.0.0.1/dvwa/login.php',\n 'bodgeit': 'http://127.0.0.1:8080/bodgeit/register.jsp',\n '': 'http://127.0.0.1/'\n }[self.authflag]\n\n \"\"\"\n Generate random URLs to try to input, if the get request is successful, add the URL to the accessible list.\n \"\"\"\n def find_random_urls(self, base_url, s):\n generated = []\n for word in self.common:\n extension = ''\n extension += word # append the common word to the string\n for ext in self.extensions:\n extension += ext # append the file extensions. eg \"admin.jsp, admin.php...\"\n url = urljoin(base_url, extension) # completes the url by joining it with the base url.\n extension = word # reset the word so we generate valid urls\n if s.get(url).status_code == requests.codes.ok: # if the status code is ok then we can access the page.\n generated.append(url)\n return generated\n\n \"\"\"\n Crawls the webpage and finds all possible URLs to access\n returns the urls it successfully visited\n \"\"\"\n def crawl(self):\n self.url = self.switch()\n # open a new session\n with requests.Session() as s:\n # get cookies and html from the initial page\n r = s.post(self.url, data=getattr(self, self.authflag), allow_redirects=True) if self.authflag \\\n else s.get(self.url)\n html = r.text\n\n # if custom auth is on, go to the correct page\n if self.authflag == 'bodgeit':\n self.url = 'http://127.0.0.1:8080/bodgeit/'\n elif self.authflag == 'dvwa':\n s.cookies.pop('security')\n s.cookies['security'] = 'low'\n self.url = 'http://127.0.0.1/dvwa/'\n \n self.session = s\n self.cookies.update(s.cookies.get_dict())\n self.visited.add(r.url)\n\n # parse the HTML from the new URL\n self.parser.parse(html, r.url)\n\n # update the forms\n if self.parser.form_data:\n self.forms.update({r.url: self.parser.form_data})\n\n # add any new urls that were found to the list\n generated = self.find_random_urls(r.url, s)\n self.accessible.extend(generated)\n self.accessible.extend(self.parser.found_urls)\n\n for url in self.accessible: # for all accessible urls, visit them and parse\n if url not in self.visited:\n self.accessible.remove(url)\n self.crawl_helper(url, s)\n\n return self.visited, self.parser.form_data\n\n \"\"\"\n Helper function to visit each url\n Parses them and gets the links, form data, and cookies from the webpage.\n \"\"\"\n def crawl_helper(self, url, s):\n html = s.get(url) if 'http:' in url else s.get(self.url + url)\n text = html.text\n parent_url = html.url\n self.parser.parse(text, parent_url) # scan\n if self.parser.form_data:\n self.forms.update({url: self.parser.form_data})\n self.visited.add(url)\n self.accessible.extend([x for x in self.parser.found_urls if x not in self.visited])\n self.cookies.update(s.cookies.get_dict())\n self.url = url\n self.url_params.update({url: parse_qs(urlparse(url).query)})\n\n \"\"\"\n Submits vectors to forms and logs behavior.\n Submits to all forms on each webpage that has forms\n \"\"\"\n def test(self):\n self.crawl()\n output = set()\n with self.session as s:\n if self.random:\n random.seed()\n while self.visited:\n target = self.visited.pop()\n data = {}\n if target in self.forms.keys() and 'login' not in target:\n for key in self.forms[target]:\n vector = self.vectors[random.randint(0, len(self.vectors)-1)]\n data.update({key: vector})\n response = s.post(target, data=data, allow_redirects=True)\n temp = self.check_response(response, vector)\n if temp not in output:\n output.add(temp)\n\n else:\n for url in self.visited:\n data = {}\n if url in self.forms.keys() and 'login' not in url:\n for key in self.forms[url]:\n for vector in self.vectors:\n if key == \"Submit\" or key == \"submit\":\n data.update({key: \"Submit\"})\n else:\n data.update({key: vector})\n\n response = s.post(url, data=data, allow_redirects=True)\n temp = self.check_response(response, vector)\n if temp not in output:\n output.add(temp)\n\n return output\n\n \"\"\"\n Check the response of each sent vector.\n \"\"\"\n def check_response(self, r, v):\n output = \"\"\n sanitized = [\"<\", \">\", \"/\", \"\\\\\", \"'\"]\n\n if r.status_code != requests.codes.ok: # bad response exploit check\n output += \"\\nPosting to \" + r.url + \" with vector \" + v + \\\n \" returns invalid response \" + self.http_codes(r.status_code) + \"\\n\"\n\n if r.elapsed > self.slow: # slow response exploit check\n output += \"\\nResponse time for post to \" + r.url + \" was slow with vector \" + v + \"\\n\"\n\n for sens in self.sensitive: # sensitive data exploit check\n if sens in r.text:\n output += \"\\nSensitive data leaked from \" + r.url + \" \" + sens + \" found.\\n\"\n\n for char in sanitized: # input sanitization check\n if char in v:\n if v in r.text:\n output += \"\\nPossible lack of sanitation on \" + r.url + \", vector \" + v + \" not sanitized.\\n\"\n\n return output\n\n \"\"\"\n human-readable http codes\n \"\"\"\n def http_codes(self, code):\n return {\n 500: '500, internal server error encountered',\n 400 : '400, bad request sent.',\n 403 : '403, page access is forbidden.'\n }.get(code)","repo_name":"jvd33/Fuzzer","sub_path":"src/Crawler.py","file_name":"Crawler.py","file_ext":"py","file_size_in_byte":8846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"42113651245","text":"n = int(input())\r\ncars = set()\r\n\r\nfor _ in range(n):\r\n (command, car) = input().split(', ')\r\n if command == 'IN':\r\n cars.add(car)\r\n else:\r\n cars.remove(car)\r\n\r\nif cars:\r\n [print(car) for car in cars]\r\nelse:\r\n print(f'Parking Lot is Empty')","repo_name":"AlexanderIvanofff/Python-OOP","sub_path":"tuples_and_sets/parking_lot.py","file_name":"parking_lot.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13086377724","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 24 21:56:24 2021\n\n@author: egt_d\n\"\"\"\n\nclass Persona():\n \n def __init__(self, n, edad):\n self.nombre = n\n self.edad = edad\n \npersona1 = Persona(\"Erick\", 24)\n\nprint(persona1.edad)\nprint(persona1.nombre)","repo_name":"ErickTocasca/PROYECTO_MINSUP_UNCP","sub_path":"7. POO/6.1 Clases y Objetos/1_Persona.py","file_name":"1_Persona.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13725435647","text":"from django.db import models\n\nclass Node(models.Model):\n nodeId = models.PositiveIntegerField(primary_key=True)\n nodeType = models.PositiveSmallIntegerField()\n korName = models.CharField(max_length=20)\n latitude = models.FloatField()\n longitude = models.FloatField()\n linkedNode = models.JSONField()\n\n def __str__(self):\n return self.korName\n\n class Meta:\n db_table = \"node_information\"\n\nclass Edge(models.Model):\n edgeId = models.PositiveIntegerField(primary_key=True)\n fnodeId = models.PositiveIntegerField()\n enodeId = models.PositiveIntegerField()\n korName = models.CharField(max_length=20)\n roadLength = models.PositiveSmallIntegerField()\n\n class Meta:\n db_table = \"edge_information\"","repo_name":"kmj3785/CopAndRobberInBusan","sub_path":"sitemap/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"938875805","text":"from Pages.plans_page import PlansPage\nimport utilities.custom_logger as cl\nimport logging\nimport time\nclass GSTDetailEntry(PlansPage):\n\n log = cl.customLogger(logging.DEBUG)\n\n def __init__(self, driver):\n super().__init__(driver)\n self.driver = driver\n\n id_email_field = \"email\"\n xpath_save_button=\"//button[contains(text(),'SAVE')]\"\n \n def gst_details_entry(self, mobile_no, otp, email, gst_no, address):\n self.login( mobile_no, otp)\n time.sleep(2)\n self.navigate_to_buy_plan( mobile_no, email)\n time.sleep(3)\n self.enter_GST_Details( gst_no, address, email)\n time.sleep(2)\n def verify_gst_details(self):\n result=self.isElementPresent(self.xpath_save_button, locatorType=\"xpath\")\n time.sleep(2)\n self.screenShot(\"GSTDetailsPopUp\")\n time.sleep(2)\n self.elementClick(self.xpath_save_button, locatorType='xpath')\n return result\n ","repo_name":"KavyaJyothi/RecruiterPanel","sub_path":"Pages/gst_details_page.py","file_name":"gst_details_page.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28737596654","text":"import bpy\n\ndef append_to_list(menuname, y, name, x, listname):\n menu = menuname\n if not menu in listname:\n newmenu = {}\n newmenu['name'] = name\n newmenu['x'] = x\n newmenu['y'] = y\n listname[menu] = newmenu\n\ndef remove_from_list(menuname, listname):\n del(listname[menuname])\n","repo_name":"mx1001/bc_p","sub_path":"utils/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"73340843024","text":"from flask import Flask\nfrom apscheduler.schedulers.background import BackgroundScheduler\nimport threading\nimport os\nimport json\nfrom datetime import datetime, timedelta\n\napp = Flask(__name__)\nscheduler = BackgroundScheduler()\njob = None\n\n# Global flag and lock for graceful shutdown\nis_task_running = False\ntask_lock = threading.Lock()\n\nfrom dataconverter.communication.message_broker_if import RabbitMQInterface as rabbitmq\nfrom dataconverter.utils.data_checker import FtpDataCheck\n\nrabbit = rabbitmq(os.environ.get('RABBITMQ_HOST', \"localhost\"), 5672, 'guest', 'guest', 'ftp_tasks')\nrabbit.connect()\n\n\ndef ftp_check_task():\n global is_task_running\n with task_lock:\n is_task_running = True\n\n # Simulate FTP check (replace with real logic)\n print(\"FTP check started\")\n try:\n # Test Success Scenario\n\n # Test Fail Scenario\n checker = FtpDataCheck()\n checker.full_check()\n # raise Exception(\"FTP check failed\")\n except Exception as e:\n print(f\"FTP check failed: {e}\")\n payload = {\n \"queue_name\": \"ftp-tasks\",\n \"content\": f\"{rabbit.get_current_time()}:Failed:Task:{e}\",\n \"service_name\": \"FTP Checker\",\n \"producer_ip\": rabbit.get_ip(),\n \"status\": \"Failed\",\n }\n rabbit.send(message=json.dumps(payload))\n\n with task_lock:\n is_task_running = False\n\n\n@app.route('/start', methods=['GET'])\ndef start_monitoring():\n global job\n if not scheduler.running:\n job = scheduler.add_job(ftp_check_task, 'interval', minutes=10,\n next_run_time=datetime.now() + timedelta(minutes=1))\n # job = scheduler.add_job(ftp_check_task, 'interval', minutes=1)\n scheduler.start()\n return \"FTP Monitoring Started\"\n return \"FTP Monitoring is already running\"\n\n\n@app.route('/stop', methods=['GET'])\ndef stop_monitoring():\n global scheduler, job\n if scheduler.running:\n # Wait for the ongoing task to complete\n with task_lock:\n if job:\n job.remove()\n job = None\n scheduler.shutdown(wait=False)\n scheduler = BackgroundScheduler()\n return \"FTP Monitoring Stopped\"\n\n return \"FTP Monitoring is not running\"\n\n\n@app.route('/status', methods=['GET'])\ndef status():\n return f\"FTP Monitoring Running: {scheduler.running}, Task Running: {is_task_running}\"\n\n\n# Call the start monitoring method directly\nstart_monitoring()\n\nif __name__ == '__main__':\n try:\n app.run(debug=True, host='0.0.0.0', port=5000)\n finally:\n rabbit.close()\n","repo_name":"KenanBolat/d_f_m","sub_path":"data_retrieval/ftp_checker/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1557613385","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom .kaiser_bessel_filter import (kaiser_bessel_with_sinc,\n estimate_kaiser_bessel_beta)\n\n\ndef taper(filter_type, ny, nx, conv_filter, **kwargs):\n r\"\"\"\n Parameters\n ----------\n filter_type : {\"kaiser-bessel\"}\n Type of filter\n ny : int\n Number of pixels in the v dimension.\n nx : int\n Number of pixels in the u dimension.\n conv_filter : :class:`africanus.filters.ConvolutionFilter`\n Associated Convolution Filter.\n\n Returns\n -------\n :class:`numpy.ndarray`\n Taper of shape :code:`(ny, nx)`\n \"\"\"\n cf = conv_filter\n\n if filter_type == \"sinc\":\n return np.ones((ny, nx))\n elif filter_type == \"kaiser-bessel\":\n try:\n beta = kwargs.pop('beta')\n except KeyError:\n beta = estimate_kaiser_bessel_beta(cf.full_sup)\n\n # What would Andre Offringa do?\n # He would compute the numeric solution\n taps = np.arange(cf.no_taps) / cf.oversample - cf.full_sup // 2\n kb = kaiser_bessel_with_sinc(taps, cf.full_sup, cf.oversample, beta)\n kbshift = np.fft.fftshift(kb)\n\n width = nx * cf.oversample\n height = ny * cf.oversample\n\n # Put the first and last halves of the shifted Kaiser Bessel\n # at each end of the output buffer, then FFT\n buf = np.zeros(width, dtype=kb.dtype)\n buf[:kbshift.size // 2] = kbshift[:kbshift.size // 2]\n buf[-kbshift.size // 2:] = kbshift[-kbshift.size // 2:]\n x = np.fft.ifft(buf).real\n\n buf = np.zeros(height, dtype=kb.dtype)\n buf[:kbshift.size // 2] = kbshift[:kbshift.size // 2]\n buf[-kbshift.size // 2:] = kbshift[-kbshift.size // 2:]\n y = np.fft.ifft(buf).real\n\n # First quarter of the taper\n quarter = y[:ny // 2, None] * x[None, :nx // 2]\n\n # Create the taper by copying\n # the quarter into the appropriate bits\n taper = np.empty((ny, nx), dtype=kb.dtype)\n taper[:ny // 2, :nx // 2] = quarter[::-1, ::-1]\n taper[ny // 2:, :nx // 2] = quarter[:, ::-1]\n taper[:ny // 2, nx // 2:] = quarter[::-1, :]\n taper[ny // 2:, nx // 2:] = quarter[:, :]\n\n # Normalise by oversampling factor\n taper *= cf.oversample**2\n\n return taper\n else:\n raise ValueError(\"Invalid filter_type '%s'\" % filter_type)\n","repo_name":"phiadaarr/codex-africanus","sub_path":"africanus/filters/filter_tapers.py","file_name":"filter_tapers.py","file_ext":"py","file_size_in_byte":2520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"42384956508","text":"#Utilizando la función del punto 1, realizar otra función que reciba de\n#parámetro una lista de números y devuelva sólo aquellos que son primos en\n#otra lista\n\ndef es_primo(num):\n \"\"\"\n Función que verifica si un número es primo.\n \"\"\"\n if num <= 1:\n return False\n \n for i in range(2, num):\n if num % i == 0:\n return False\n return True\n\ndef lista_de_primos(lista):\n primos = []\n for num in lista:\n if es_primo(num):\n primos.append(num)\n return primos\n \nlista1 = [2, 3, 4, 5, 6, 7, 8, 9, 10]\nprint(lista_de_primos(lista1))","repo_name":"NOCTURNO91/FUNCIONS_PYTHON","sub_path":"function2.py","file_name":"function2.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"28925232223","text":"import torch\r\nfrom softmax_regression import Accumulator, accuracy\r\nfrom torch import nn\r\nimport torchvision\r\nfrom torch.utils import data\r\nfrom torchvision import transforms\r\nfrom LeNet import train_by_gpu, evaluate_accuracy_gpu\r\n\r\n\r\ndef load_data_fashion_minst(batch_size, resize=None):\r\n \"\"\"下载fashion数据集\"\"\"\r\n trans = [transforms.ToTensor()]\r\n if resize:\r\n trans.insert(0, transforms.Resize(resize))\r\n trans = transforms.Compose(trans)\r\n # minst_train.data可以取得前面的一些数据\r\n mnist_train = torchvision.datasets.FashionMNIST(root=\"./data\",\r\n train=True, transform=trans,\r\n download=False)\r\n mnist_test = torchvision.datasets.FashionMNIST(root=\"./data\",\r\n train=False, transform=trans,\r\n download=False)\r\n train = data.DataLoader(mnist_train, batch_size=256, shuffle=True, )\r\n test = data.DataLoader(mnist_test, batch_size=256, shuffle=False)\r\n return train, test\r\n\r\n\r\nnet_byimgnet = nn.Sequential(\r\n nn.Conv2d(1, 96, kernel_size=(11, 11), stride=(4, 4), padding=1), nn.ReLU(),\r\n nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2)),\r\n nn.Conv2d(96, 256, kernel_size=(5, 5), padding=2), nn.ReLU(),\r\n nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2)),\r\n nn.Conv2d(256, 384, kernel_size=(3, 3), padding=1), nn.ReLU(),\r\n nn.Conv2d(384, 384, kernel_size=(3, 3), padding=1), nn.ReLU(),\r\n nn.Conv2d(384, 256, kernel_size=(3, 3), padding=1), nn.ReLU(),\r\n nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2)),\r\n nn.Flatten(),\r\n nn.Linear(6400, 4096), nn.ReLU(), nn.Dropout(p=0.5),\r\n nn.Linear(4096, 4096), nn.ReLU(), nn.Dropout(p=0.5),\r\n nn.Linear(4096, 100)\r\n)\r\n# net_byminst = nn.Sequential(\r\n# nn.Conv2d(1, 96, kernel_size=(3, 3), stride=(3, 3), padding=1), nn.ReLU(),\r\n# nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2)),\r\n# nn.Conv2d(96, 256, kernel_size=(3, 3), padding=1), nn.ReLU(),\r\n# nn.Conv2d(256, 384, kernel_size=(3, 3), padding=1), nn.ReLU(),\r\n# nn.Conv2d(384, 384, kernel_size=(3, 3), padding=1), nn.ReLU(),\r\n# nn.Conv2d(384, 256, kernel_size=(3, 3), padding=1), nn.ReLU(),\r\n# nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding=1),\r\n# nn.Flatten(),\r\n# nn.Linear(256*3*3, 256*3*2), nn.ReLU(), nn.Dropout(p=0.5),\r\n# nn.Linear(256*3*2, 256*2), nn.ReLU(), nn.Dropout(p=0.5),\r\n# nn.Linear(256*2, 10)\r\n# )\r\n# train_iter, test_iter = load_data_fashion_minst(64, 224)\r\n# lr, epochs = 0.01, 10\r\n# print(\"train on AlexNet\")\r\n# train_by_gpu(net_byimgnet, train_iter, test_iter, epochs, lr,\r\n# device=torch.device(\"cuda\")\r\n# )\r\n","repo_name":"luoxue-star/-Hands-on-learning-and-deep-learning","sub_path":"AlexNet.py","file_name":"AlexNet.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"69815112786","text":"def findLength(string, n):\n current_sum = 0\n max_sum = 0\n \n # traverse a binary string from left\n # to right\n for i in range(n):\n \n # add current value to the current_sum\n # according to the Character\n # if it's '0' add 1 else -1\n current_sum += (1 if string[i] == '0' else -1)\n \n if current_sum < 0:\n current_sum = 0\n \n # update maximum sum\n max_sum = max(current_sum, max_sum)\n \n # return -1 if string does not contain\n # any zero that means all ones\n # otherwise max_sum\n return max_sum if max_sum else 0\n \n# Driven Program\ns = \"11000010001\"\nn = 11\nprint(findLength(s, n))\n","repo_name":"DDR7707/Final-450-with-Python","sub_path":"Dynamic Programming/437.Max diff of zeros and ones in binary string.py","file_name":"437.Max diff of zeros and ones in binary string.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"39313891253","text":"# coding=utf-8\nimport unittest\n\nfrom src.utils_suggester import (\n suggest_phrase_query_full_example,\n FR_ANALYZER,\n STD_ANALYZER,\n TRI_GRAM_ANALYZER,\n)\n\n\nclass UTests(unittest.TestCase):\n def test_suggest_phrase_query_one_word(self):\n expected = {\n \"text\": {\n \"Bactér\": [],\n \"Bactéri\": [\"bactéries\"],\n \"Bactérie\": [\"bactéries\"],\n \"bactérie\": [\"bactéries\"], # case insensitive\n \"bacterie\": [\"bactéries\"], # accent insensitive\n # => does what we want : lowercase suggestions with accents\n },\n \"keyword\": {\n \"Bactér\": [],\n \"Bactéri\": [\"Bactéries\"],\n \"Bactérie\": [\"Bactéries\"],\n \"bactérie\": [], # case sensitive\n \"bacterie\": [],\n \"Bacterie\": [\"Bactéries\"], # accent insensitive\n # does not what we want (unless we lowercase tags before insertion in ES)\n },\n \"completion\": {\n \"Bactér\": [],\n \"Bactéri\": [\"bactéries\"], # case insensitive\n \"Bactérie\": [\"bactéries\"],\n \"bactérie\": [\"bactéries\"],\n \"bacterie\": [\"bactéries\"], # accent insensitive\n \"Bacterie\": [\"bactéries\"],\n # does what we want\n },\n }\n\n doc = {\"tags\": [\"Bactéries\"]}\n for type_ in expected:\n mapping = {\"tags\": {\"type\": type_}}\n for text_ in expected[type_]:\n suggestions = suggest_phrase_query_full_example(mapping, doc, text_)\n self.assertEqual(suggestions, expected[type_][text_])\n\n def test_suggest_phrase_query_multiple_words(self):\n expected = {\n \"text\": {\n \"Appare\": [\"appareil\"],\n \"Appareil g\": [],\n \"Appareil géni\": [],\n },\n \"keyword\": {\n \"Appare\": [],\n \"Appareil g\": [],\n \"Appareil géni\": [],\n },\n \"completion\": {\n \"Appare\": [],\n \"Appareil g\": [],\n \"Appareil géni\": [],\n },\n }\n\n doc = {\"tags\": [\"Appareil génital féminin\"]}\n for type_ in expected:\n mapping = {\"tags\": {\"type\": type_}}\n for text_ in expected[type_]:\n suggestions = suggest_phrase_query_full_example(mapping, doc, text_)\n self.assertEqual(suggestions, expected[type_][text_])\n\n def test_suggest_phrase_query_multiple_words_trigram_analyzer(self):\n expected = {\n \"text\": {\n \"noble prize\": [\"nobel prize\"],\n \"nobel priz\": [\"nobel prize\"],\n \"nobel pri\": [],\n # \"Appare\": [\"appareil\"],\n # \"Appareil g\": [],\n # \"Appareil géni\": [],\n # },\n # \"keyword\": {\n # \"Appare\": [],\n # \"Appareil g\": [],\n # \"Appareil géni\": [],\n # },\n # \"completion\": {\n # \"Appare\": [],\n # \"Appareil g\": [],\n # \"Appareil géni\": [],\n },\n }\n\n # doc = {\"tags\": [\"Appareil génital féminin\"]}\n doc = {\"tags\": [\"nobel prize\"]}\n for type_ in expected:\n mapping = {\"tags\": {\"type\": type_}}\n for text_ in expected[type_]:\n suggestions = suggest_phrase_query_full_example(\n mapping, doc, text_, analyzer=TRI_GRAM_ANALYZER\n )\n self.assertEqual(suggestions, expected[type_][text_])\n\n\nif __name__ == \"__main__\":\n pass\n","repo_name":"drussier/elasticsearch-queries","sub_path":"src/test/test_suggester_phrase.py","file_name":"test_suggester_phrase.py","file_ext":"py","file_size_in_byte":3807,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13324832777","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*- \n# Author: lionel\n\nimport tensorflow as tf\n\n# vocabulary_list = tf.constant([\"emerson\", \"lake\", \"palmer\"])\n# table = tf.contrib.lookup.index_table_from_tensor(\n# mapping=vocabulary_list, num_oov_buckets=1, default_value=-1)\n# features = tf.constant([\"emerson\", \"lake\", \"and\", \"palmer\"])\n# ids = table.lookup(features)\nsess = tf.InteractiveSession()\n# tf.tables_initializer().run()\n# print(ids.eval())\n\nkeys = tf.constant(['messi', 'henry', 'jiang'])\nvalues = tf.constant([1, 2, 3])\n\ntable = tf.contrib.lookup.HashTable(\n tf.contrib.lookup.KeyValueTensorInitializer(keys, values), -1)\n\nout = table.lookup(tf.constant(['j']))\ntable.init.run()\nprint(out.eval())\n","repo_name":"xinchungitHub/feng-python-apply","sub_path":"feng-ml-tf/src/HashTableExample.py","file_name":"HashTableExample.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"31975654641","text":"class Augment_Images:\n def __init__(self, img_path):\n self.img_path = img_path\n self.img_id = img_path.split('.')[0]\n full_path = os.path.join('/content/FullData/dataset/images', img_path)\n self.img = cv2.imread(full_path)\n\n def info(self, keyword):\n width, height = self.img.shape[:2]\n\n bb = abs(train[train['image_path']==self.img_path][['xmin', 'ymin', 'xmax', 'ymax']]*2).values\n bboxes = np.where(bb<0, 0, bb)\n labels = train[train['image_path']==self.img_path]['class'].values\n \n with open(f\"{keyword}.txt\", 'w') as file:\n for i in range(len(bboxes)):\n x_cen = min(round((bboxes[i][0] + bboxes[i][2]) / (2*width), 3), 1)\n y_cen = min(round((bboxes[i][1] + bboxes[i][3]) / (2*height), 3), 1)\n shape_width = min(round((bboxes[i][2] + bboxes[i][0]) / (width), 3), 1)\n shape_height = min(round((bboxes[i][3] + bboxes[i][1]) / (height), 3), 1)\n file.write(f'{labels[i]} {x_cen} {y_cen} {shape_width} {shape_height}\\n') \n\n\n def equalizeImage(self): #return 1 image\n gray = cv2.cvtColor(self.img,cv2.COLOR_BGR2GRAY)\n self.image = cv2.equalizeHist(gray)\n cv2.imwrite(f'{self.img_id}equalize.jpg', self.image)\n self.info(f'{self.img_id}equalize')\n \n def sharpImage(self): #return 2 images\n kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])\n self.image = cv2.filter2D(self.img, -1, kernel)\n cv2.imwrite(f'{self.img_id}sharp1.jpg', self.image)\n self.info(f'{self.img_id}sharp1')\n\n kernel = np.array([[-1,-1,-1], [-1,9,-1], [-1,-1,-1]])\n self.image = cv2.filter2D(self.img, -1, kernel)\n cv2.imwrite(f'{self.img_id}sharp2.jpg', self.image)\n self.info(f'{self.img_id}sharp2')\n\n \n def blurImage(self, kernel_size=(15,15)): #return 1 image\n self.image = cv2.GaussianBlur(self.img,kernel_size, 0) \n cv2.imwrite(f'{self.img_id}blur.jpg', self.image) \n self.info(f'{self.img_id}blur') \n\n def randomBrightContrast(self): #return 9 images\n values = np.arange(1,10)/10\n for i in values:\n transform = A.Compose([\n A.RandomBrightnessContrast(brightness_limit=i, contrast_limit=i, p=0.8)\n ])\n transformed = transform(image=self.img)\n cv2.imwrite(f'{self.img_id}{str(i)}randomBright.jpg', transformed['image'])\n self.info(f'{self.img_id}{str(i)}randomBright')\n\n def randomShadow(self): #return 5 images\n for i in range(1, 6):\n transform = A.Compose([\n A.RandomShadow(num_shadows_lower=1, num_shadows_upper=2, shadow_dimension=3, shadow_roi=(0, 0.5, 1, 1), p=1)\n ])\n transformed = transform(image=self.img)\n cv2.imwrite(f'{self.img_id}{str(i)}randomshadow.jpg', transformed['image'])\n self.info(f'{self.img_id}{str(i)}randomshadow')\n\n def groupedProcessing(self): #return 6 images\n medium = A.Compose([\n A.CLAHE(p=1),\n A.HueSaturationValue(hue_shift_limit=20, sat_shift_limit=50, val_shift_limit=50, p=1),\n ], p=1)\n transformed2 = medium(image=self.img)\n\n strong = A.Compose([\n A.ChannelShuffle(p=1),\n ], p=1)\n transformed3 = strong(image=self.img)\n\n cv2.imwrite(f'{self.img_id}group2.jpg', transformed2['image'])\n cv2.imwrite(f'{self.img_id}group3.jpg', transformed3['image'])\n \n self.info(f'{self.img_id}group2')\n self.info(f'{self.img_id}group3')\n\n for i in range(1, 5):\n light = A.Compose([\n A.RandomBrightnessContrast(p=1), \n A.RandomGamma(p=1), \n A.CLAHE(p=1), \n ], p=1)\n transformed1 = light(image=self.img)\n\n \n cv2.imwrite(f'{self.img_id}{str(i)}group1.jpg', transformed1['image'])\n \n self.info(f'{self.img_id}{str(i)}group1')\n \n\n def selectAll(self): #return 24 images\n self.equalizeImage()\n self.sharpImage()\n self.blurImage()\n self.randomShadow()\n self.randomBrightContrast()\n self.groupedProcessing()\n\n\nclass AugmentedImagesWithBBox:\n def __init__(self, img_path):\n self.img_id = img_path.split('.')[0]\n full_path = os.path.join('/content/FullData/dataset/images', img_path)\n self.img = cv2.imread(full_path)\n \n def rotateImage(self):\n height, width = self.img.shape[:2]\n img_c = (width/2,height/2) # Image Center Coordinates\n rotation_matrix = cv2.getRotationMatrix2D(img_c, self.angle, 1.0)\n \n abs_cos = abs(rotation_matrix[0,0]) # Cos(angle)\n abs_sin = abs(rotation_matrix[0,1]) # sin(angle)\n\n # New Width and Height of Image after rotation\n bound_w = int(height*abs_sin + width*abs_cos)\n bound_h = int(height*abs_cos + width*abs_sin)\n \n # subtract the old image center and add the new center coordinates\n rotation_matrix[0,2]+=bound_w/2-img_c[0]\n rotation_matrix[1,2]+=bound_h/2-img_c[1]\n \n # rotating image with transformed matrix and new center coordinates\n rotated_matrix = cv2.warpAffine(self.img, rotation_matrix,(bound_w, bound_h))\n \n self.image = rotated_matrix\n cv2.imwrite(f'{self.img_id}rotate.jpg', self.image)\n\n\n\ndef pascal_voc_to_yolo(image_path):\n width, height = 1080, 1920\n bboxes = abs(train[train['image_path']==image_path][['xmin', 'ymin', 'xmax', 'ymax']]*2).values\n labels = train[train['image_path']==image_path]['class'].values\n \n with open(f\"{image_path.split('.')[0]}.txt\", 'w') as file:\n for i in range(len(bboxes)):\n x_cen = round((bboxes[i][0] + bboxes[i][2]) / (2*width), 3)\n y_cen = round((bboxes[i][1] + bboxes[i][3]) / (2*height), 3)\n shape_width = round((bboxes[i][2] + bboxes[i][0]) / (width), 3)\n shape_height = round((bboxes[i][3] + bboxes[i][1]) / (height), 3)\n file.write(f'{labels[i]} {x_cen} {y_cen} {shape_width} {shape_height}\\n')","repo_name":"HabibaShera/Smartathon","sub_path":"preprocessing/Augmentation.py","file_name":"Augmentation.py","file_ext":"py","file_size_in_byte":5709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"41867946722","text":"# Sales,Sales Items,Sales Return,Sales Return Items,Employee Attendance - (Web Interface,API),Draw Balance (Web,API).\n# Use sqlite\nfrom flask import Flask, render_template, request, url_for, redirect\nfrom db.core import Employee\n\napp = Flask(__name__)\n\nshop_list = [\"Shop 1\",\"Shop 2\"] # Add Shops Here\n\n# Add Shop Details Here - This functionality will be replaced in futher update.\ndetails_shop_1 = [\"Sales : 5000\",\"Sales Return : 100\",\"Active Employees : 5001,5002\",\"Draw Balance : \"]\ndetails_shop_2 = [\"Sales : 50000\",\"Sales Return : 100\",\"Active Employees : 5001,5002\",\"Draw Balance : \"]\n\n# Add the details to the list\nshop_details = [details_shop_1,details_shop_2]\n\n# Employee List\nEmployee().get_employee_names()\nemp_lst = [\"Reshma\",\"Shiji\",\"Aswathy\"]\n\n# This code deals with the adding the shop and list together as a dictionary, for html parsing\ndetails = {}\ni = 0\nfor shop in shop_list:\n details[shop] = shop_details[i]\n i += 1\n\n\n# Home Page\n@app.route(\"/\",methods=[\"GET\",\"POST\"])\ndef home():\n return render_template(\"home.html\")\n\n@app.route(\"/owner\",methods=[\"GET\",\"POST\"])\ndef index():\n if request.method == 'GET': \n return render_template(\"index.html\",title=\"HomePage\",shops = shop_list,details=details) # Here list of shops is updated , details dictionary is passed.\n elif request.method == 'POST':\n return \"Working\"\n\n# Employee Page\n@app.route(\"/employee\",methods=[\"GET\",\"POST\"])\ndef employee():\n if request.method == 'GET':\n return render_template(\"employee.html\",employee=emp_lst)\n else:\n return \"200 OK\"\n\n# List API\n@app.route(\"/api\")\ndef api():\n return \"API Active\"\n\n# Creator Page :)\n@app.route(\"/abhinand\",methods=[\"GET\",\"POST\"])\n@app.route(\"/creator\",methods=[\"GET\",\"POST\"])\n@app.route(\"/master\",methods=[\"GET\",\"POST\"])\ndef master():\n return redirect(\"https://abhinand.xyz\")\n\nif __name__ == '__main__':\n app.run(debug=\"True\",host=\"0.0.0.0\")\n","repo_name":"abhinanddhandapani/Krishna-TG-Updater","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37664453109","text":"from __future__ import absolute_import, division, print_function\nfrom sensirion_shdlc_driver.command import ShdlcCommand\nfrom struct import pack, unpack\n\nimport logging\nlog = logging.getLogger(__name__)\n\n\nclass SensorBridgeCmdI2cRepeatedTransceiveBase(ShdlcCommand):\n \"\"\"\n SHDLC command 0x12: \"I2c Repeated Transceive\".\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(SensorBridgeCmdI2cRepeatedTransceiveBase, self).__init__(\n 0x12, *args, **kwargs)\n\n\nclass SensorBridgeCmdI2cRepeatedTransceive(SensorBridgeCmdI2cRepeatedTransceiveBase):\n\n def __init__(self, interval_us, port, i2c_address, tx_length, rx_length, timeout_us, read_delay_us, tx_data):\n \"\"\"\n I2c Repeated Transceive Command\n\n Starts an asynchronous transceive command in a specific interval.\n Multiple parallel repeated transceives are possible. The returned data\n is stored in a buffer and can be read out with the \"Read Buffer\"\n command. The amount of data to write or read is limited.\n\n :param int interval_us:\n The interval in microseconds.\n :param int port:\n The port(s) where the repeated transceive should be started:\n\n - 0x00: Port 1\n - 0x01: Port 2\n - 0xFF: All ports\n :param int i2c_address:\n I2C address of the targeted device.\n :param int tx_length:\n Number of bytes to send. This amount of bytes has to be attached to\n the command. Set to zero if only read header is needed.\n :param int rx_length:\n Number of bytes to receive. This amount of bytes are returned as\n response to the read buffer command. Set to zero if no read\n operation is needed.\n :param int timeout_us:\n I2C timeout in microseconds when reading bytes. If frame is NACK'd\n it will be retried up to the timeout value. Same applies for clock\n stretching.\n :param int read_delay_us:\n This time will be inserted between the write and the read frame. to\n allow a device taking a certain amount of time for its\n measurements. The delay must be smaller than the specified timeout.\n :param bytes tx_data:\n Bytes to send (if any).\n \"\"\"\n super(SensorBridgeCmdI2cRepeatedTransceive, self).__init__(\n data=b\"\".join([pack(\">I\", interval_us),\n pack(\">B\", port),\n pack(\">B\", i2c_address),\n pack(\">I\", tx_length),\n pack(\">I\", rx_length),\n pack(\">I\", timeout_us),\n pack(\">I\", read_delay_us),\n bytes(bytearray(tx_data))]),\n max_response_time=0.05,\n post_processing_time=0.0,\n min_response_length=0,\n max_response_length=255\n )\n\n @staticmethod\n def interpret_response(data):\n \"\"\"\n :return: One or more unique handles with following information:\n\n - Unique number (higher nibble, 0..7)\n - Channel number (lower nibble, 0..1)\n\n There are 8 available memory slots for the repeated transceive\n tasks and they are identified by the unique number.\n :rtype: bytes\n \"\"\"\n handles = bytes(data[0:]) # bytearray\n return handles\n","repo_name":"Sensirion/python-shdlc-sensorbridge","sub_path":"sensirion_shdlc_sensorbridge/commands/i2c_repeated_transceive.py","file_name":"i2c_repeated_transceive.py","file_ext":"py","file_size_in_byte":3450,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"23883534776","text":"# !/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n\r\nimport psycopg2\r\n\r\nDBNAME = \"news\"\r\n\r\nq1 = \"Quais são os três artigos mais populares de todos os tempos?\"\r\n\r\nquery1 = \"\"\"\r\nSELECT title,\r\n count(*) AS views\r\nFROM articles\r\nJOIN log ON log.path = '/article/' || articles.slug\r\nGROUP BY title\r\nORDER BY views DESC\r\nLIMIT 3;\r\n\"\"\"\r\n\r\nq2 = \"Quem são os autores de artigos mais populares de todos os tempos?\"\r\n\r\nquery2 = \"\"\"\r\nSELECT authors.name, count(*) as views\r\nFROM articles\r\nJOIN authors\r\nON articles.author = authors.id\r\nJOIN log\r\nON articles.slug = substring(log.path, 10)\r\nWHERE log.status LIKE '200 OK'\r\nGROUP BY authors.name ORDER BY views DESC;\r\n\"\"\"\r\n\r\nq3 = \"Em quais dias mais de 1% das requisições resultaram em erros?\"\r\n\r\nquery3 = (\"select data, percentual from percentualerro where percentual>1.0;\")\r\n\r\n# Conecta ao banco e faz a consulta\r\n\r\n\r\ndef get_queryResults(sql_query):\r\n db = psycopg2.connect(database=DBNAME)\r\n c = db.cursor()\r\n c.execute(sql_query)\r\n results = c.fetchall()\r\n db.close()\r\n return results\r\n\r\n\r\nresult1 = get_queryResults(query1)\r\nresult2 = get_queryResults(query2)\r\nresult3 = get_queryResults(query3)\r\n\r\n\r\n# Funcao para imprimir os resultados\r\n\r\n\r\ndef print_results(q_list):\r\n for i in range(len(q_list)):\r\n title = q_list[i][0]\r\n res = q_list[i][1]\r\n print(\"%s - %d\" % (title, res) + \" views\")\r\n print(\"\\n\")\r\n\r\n\r\nprint(q1)\r\nprint_results(result1)\r\nprint(q2)\r\nprint_results(result2)\r\nprint(q3)\r\nprint(str(result3[0][0]) + \" - \" + str(result3[0][1]) + \"%\")\r\n","repo_name":"mistermagson/logreport","sub_path":"projetolog.py","file_name":"projetolog.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72314611986","text":"import socket\n\nimport aiohttp\nimport asyncio\nimport aiofiles\n\n\nclass Downloader:\n def __init__(self, loop):\n self.loop = loop\n\n async def save(self, data, post):\n async with aiofiles.open(\"{}.jpg\".format(post['node']['id']), 'wb', loop=self.loop) as f:\n return await f.write(data)\n\n async def download(self, session, post):\n with aiohttp.Timeout(10):\n async with session.get(post['node']['images']['standard_resolution']['url']) as response:\n if response.status != 200:\n raise Exception('Bad status code {}'.format(response.status))\n return await response.read()\n\n async def process(self, session, post):\n image_data = await self.download(session, post)\n return await self.save(image_data, post)\n\n async def create_tasks(self, posts):\n async with aiohttp.ClientSession(loop=self.loop,\n connector=aiohttp.TCPConnector(family=socket.AF_INET)) as session:\n tasks = [self.process(session, post) for post in posts]\n await asyncio.gather(*tasks, return_exceptions=True)\n\n def start_processing(self, posts):\n self.loop.run_until_complete(self.create_tasks(posts))\n","repo_name":"TheMickeyMike/insta-mirror","sub_path":"insta-mirror/insta_downloader.py","file_name":"insta_downloader.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"4746304411","text":"import json\nfrom typing import Dict, Optional, Set\n\nimport vimbufferutil\n\n\nclass FunctionAna:\n\n def __init__(self, path: Optional[str] = None) -> None:\n if path is None:\n return\n else:\n self.readfromfile(path)\n\n def readfromfile(self, path: str) -> None:\n with open(path) as file:\n self.FUNCTION: Dict[str, Dict] = json.loads(file.read())\n\n def getparamtype(self, func: str, parnum: int) -> list:\n return self.FUNCTION[func][\"parameters\"][parnum]\n\n def getparamnumber(self, func: str) -> int:\n return len(self.FUNCTION[func][\"parameters\"])\n\n def getreturntype(self, func: str) -> list:\n return self.FUNCTION[func][\"return\"]\n\n def getcallnumber(self, func: str) -> int:\n return self.FUNCTION[func][\"analysis\"][\"callnumber\"]\n\n def getcollapsetime(self, func: str) -> float:\n return self.FUNCTION[func][\"analysis\"][\"collapse\"]\n\n\ndef adddocstring_paramtype(\n buffer, returnflag=False, doctype: str = \"Epydoc\"\n) -> None:\n functionlog = FunctionAna(\".AGFPparameters.log\")\n\n functioncode = vimbufferutil.AllFunctions()\n functioncode.regist(buffer)\n\n abc = vimbufferutil.AddBufferContent()\n for func in functionlog.FUNCTION:\n funcdict = functionlog.FUNCTION[func]\n for _fc in functioncode.functions:\n fc: vimbufferutil.FunctionCode = _fc\n if fc.functionname == func:\n tfc: vimbufferutil.FunctionCode = fc\n commentspace = \" \" * (4 * (fc.indentlevel + 1))\n abc.addandwait(commentspace + '\"\"\"', fc.endline + 1)\n for index, param in enumerate(fc.functionargs):\n if str(index) in funcdict[\"parameters\"]:\n abc.addandwait(\n commentspace + \"@type \" + param + \": \" +\n \", \".join(funcdict[\"parameters\"][str(index)]),\n fc.endline + 1\n )\n if returnflag:\n abc.addandwait(\n commentspace + \"@rtype: \" +\n \", \".join(funcdict[\"return\"]), fc.endline + 1\n )\n if tfc.containdocstring:\n for i in range(tfc.docstartline, tfc.docendline + 1):\n abc.removeandwait(i + 1)\n for docline in tfc.docstring:\n if \"@type\" not in docline and \"@rtype\" not in docline:\n abc.addandwait(\n commentspace + docline, fc.endline + 1\n )\n abc.addandwait(commentspace + '\"\"\"', fc.endline + 1)\n abc.conduct(buffer)\n\n\ndef adddocstring_runtime_info(buffer) -> None:\n functionlog = FunctionAna(\".AGFPparameters.log\")\n\n functioncode = vimbufferutil.AllFunctions()\n functioncode.regist(buffer)\n\n abc = vimbufferutil.AddBufferContent()\n for func in functionlog.FUNCTION:\n funcdict = functionlog.FUNCTION[func]\n for _fc in functioncode.functions:\n fc: vimbufferutil.FunctionCode = _fc\n if fc.functionname == func:\n tfc: vimbufferutil.FunctionCode = fc\n commentspace = \" \" * (4 * (fc.indentlevel + 1))\n abc.addandwait(commentspace + '\"\"\"', tfc.endline + 1)\n if tfc.containdocstring:\n for i in range(tfc.docstartline, tfc.docendline + 1):\n abc.removeandwait(i + 1)\n for docline in tfc.docstring:\n if \"called number: \" not in docline and \"total time: \" not in docline:\n abc.addandwait(\n commentspace + docline, fc.endline + 1\n )\n abc.addandwait(\n commentspace + \"called number: \" +\n str(funcdict[\"analysis\"][\"callnumber\"]),\n tfc.endline + 1\n )\n abc.addandwait(\n commentspace + \"total time: \" +\n str(funcdict[\"analysis\"][\"collapsetime\"]) + \"s\",\n tfc.endline + 1\n )\n abc.addandwait(commentspace + '\"\"\"', tfc.endline + 1)\n abc.conduct(buffer)\n\n\n\ndef main() -> None:\n fa = FunctionAna()\n fa.readfromfile(\"testlog.log\")\n print(fa.FUNCTION)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sillybun/vim-agfp","sub_path":"ftplugin/python/agfp.py","file_name":"agfp.py","file_ext":"py","file_size_in_byte":4460,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"71517772947","text":"import torch \nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom utils import sort_by_seq_lens, get_mask\n\nclass SentenceEncoder(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers=2, bias=True, dropout=0.0, bidirectional=True):\n super(SentenceEncoder, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.bias = bias\n self.dropout = dropout\n self.bidirectional = bidirectional\n self._encoder = nn.LSTM(\n input_size=self.input_size, \n hidden_size=self.hidden_size, \n num_layers=self.num_layers, \n bias=self.bias, \n bidirectional=self.bidirectional,\n batch_first=True\n )\n\n def forward(self, sequences_batch, sequnces_lengths):\n # 用于处理 padding token \n max_len = sequences_batch.shape[1]\n sorted_batch, sorted_length, _, restoration_idx = sort_by_seq_lens(sequences_batch, sequnces_lengths) #长度排序\n packed_batch = nn.utils.rnn.pack_padded_sequence(sorted_batch, sorted_length, batch_first=True) #压缩\n outputs, _ = self._encoder(packed_batch) # LSTM\n outputs, _ = nn.utils.rnn.pad_packed_sequence(outputs, batch_first=True, total_length=max_len) #还原 [batch_size, max_len, dim]\n # restore order\n reorder_outputs = outputs.index_select(0, restoration_idx) #还原顺序\n return reorder_outputs\n","repo_name":"LeslieOverfitting/nlp_AFQMC","sub_path":"model/sentenceEncoder.py","file_name":"sentenceEncoder.py","file_ext":"py","file_size_in_byte":1493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6512857006","text":"from django.urls import path\n\nfrom .views import DummyPersonView, EmailPersonView, PersonView\n\nurlpatterns = [\n path(\n \"dummy-profile//\",\n DummyPersonView.as_view(),\n name=\"dummy-profile\",\n ),\n path(\n \"/email/\",\n EmailPersonView.as_view(),\n name=\"email_person_view\",\n ),\n path(\n \"/\",\n PersonView.as_view(),\n name=\"person_view\",\n ),\n path(\n \"\",\n PersonView.as_view(),\n name=\"person_view\",\n ),\n]\n","repo_name":"DemocracyClub/WhoCanIVoteFor","sub_path":"wcivf/apps/people/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"48"} +{"seq_id":"25871845441","text":"import sys\n\ninput = sys.stdin.readline\ndef al(s):\n\treturn ord(s)-ord('a')\ndef check(a,b):\n\tif len(a) != len(b):\n\t\treturn False\n\t\n\t\n\tmax_len = len(a)\n\t\n\tfor i in range(max_len):\n\t\tw1 = al(a[i])+1\n\t\tw2 = al(b[i])+1\n\t\tif not vi1[w1] and not vi2[w2]:\n\t\t\tvi1[w1] = w2\n\t\t\tvi2[w2] = w1\n\t\telif vi1[w1] != w2 or vi2[w2] != w1:\n\t\t\treturn False\n\treturn True\n\t\t\t\n\t\n\t\t\nn = int(input())\n\nwords = [list(input().strip()) for _ in range(n)]\nanswer = 0\nfor ind1 in range(n-1):\n\tfor ind2 in range(ind1+1,n):\n\t\tvi1 = [0 for i in range(27)]\n\t\tvi2 = [0 for i in range(27)]\n\t\tkk =check(words[ind1],words[ind2])\n\t\tif kk:\n\t\t\tanswer +=1\nprint(answer)","repo_name":"gkgg123/TIL_new","sub_path":"알고리즘/백준/1411_비슷한_단어_version1.py","file_name":"1411_비슷한_단어_version1.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70206860947","text":"'''Set Band descriptions from https://gis.stackexchange.com/questions/290796/how-to-edit-the-metadata-for-individual-bands-of-a-multiband-raster-preferably\nUsage:\n python set_band_desc.py /path/to/file.ext band1 desc1 [band2 desc2] .. [band-n desc-n]\nWhere:\n band = band number to set (starting from 1)\n desc = band description string (enclose in \"double quotes\" if it contains spaces)\nExample:\n python set_band_desc.py /path/to/dem.tif 1 \"Band 1 desc\" 2 \"Band 2 desc\" 3 \"Band 3 desc\"\n'''\nimport sys\nfrom osgeo import gdal\n\ndef set_band_descriptions(filepath, bands):\n \"\"\"\n filepath: path/virtual path/uri to raster\n bands: ((band, description), (band, description),...)\n \"\"\"\n ds = gdal.Open(filepath, gdal.GA_Update)\n for band, desc in bands:\n print(\"Setting band description of band: \" + str(band) + \" to: \" + desc)\n rb = ds.GetRasterBand(band)\n rb.SetDescription(desc)\n del ds\n\nif __name__ == '__main__':\n if len(sys.argv) < 4:\n print('''Usage:\n python set_band_desc.py /path/to/file.ext band desc [band desc...]\nWhere:\n band = band number to set (starting from 1)\n desc = band description string (enclose in \"double quotes\" if it contains spaces)\nExample:\n python set_band_desc.py /path/to/dem.tif 1 \"Band 1 desc\" 2 \"Band 2 desc\" 3 \"Band 3 desc\"''')\n sys.exit(1)\n filepath = sys.argv[1]\n bands = [int(i) for i in sys.argv[2::2]]\n names = sys.argv[3::2]\n set_band_descriptions(filepath, zip(bands, names))\n\n\n","repo_name":"bcgov/wps-research","sub_path":"py/raster_set_band_desc.py","file_name":"raster_set_band_desc.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"74754318865","text":"from ai_nodes.ainodes_engine_base_nodes.ainodes_backend import pixmap_to_tensor, tensor_image_to_pixmap, tensor2pil, \\\n pil2tensor\nfrom ainodes_frontend.base import register_node, get_next_opcode\nfrom ainodes_frontend.base import AiNode\nfrom ainodes_frontend.base.settings import handle_ainodes_exception\nfrom ainodes_frontend.node_engine.node_content_widget import QDMNodeContentWidget\nfrom ainodes_frontend import singleton as gs\n\nOP_NODE_IMAGE_PASTE = get_next_opcode()\nOP_NODE_IMAGE_CROP = get_next_opcode()\n\n\n\nclass ImagePasteWidget(QDMNodeContentWidget):\n def initUI(self):\n self.scale_value = self.create_double_spin_box(\"Scale\", min_val=0.01, max_val=10.0, default_val=1.0, step=0.1)\n self.create_main_layout(grid=1)\nclass ImageCropWidget(QDMNodeContentWidget):\n def initUI(self):\n self.top = self.create_spin_box(\"Top\", min_val=0, max_val=4096, default_val=0, step=1)\n self.left = self.create_spin_box(\"Left\", min_val=0, max_val=4096, default_val=0, step=1)\n self.bottom = self.create_spin_box(\"Bottom\", min_val=0, max_val=4096, default_val=0, step=1)\n self.right = self.create_spin_box(\"Right\", min_val=0, max_val=4096, default_val=0, step=1)\n self.create_main_layout(grid=1)\n\n\n@register_node(OP_NODE_IMAGE_PASTE)\nclass ImagePasteNode(AiNode):\n icon = \"ainodes_frontend/icons/base_nodes/v2/experimental.png\"\n help_text = \"Data objects in aiNodes are simple dictionaries,\\n\" \\\n \"that can hold any values under any name.\\n\" \\\n \"In most cases, you'll find them drive parameters,\\n\" \\\n \"or hold sequences of images. For an example, the\\n\" \\\n \"OpenAI node emits it's prompt in a data line,\\n\" \\\n \"but you'll find this info in all relevant places.\"\n op_code = OP_NODE_IMAGE_PASTE\n op_title = \"Paste Image\"\n content_label_objname = \"imagepaste_node\"\n category = \"aiNodes Base/Image\"\n NodeContent_class = ImagePasteWidget\n dim = (340, 180)\n output_data_ports = [0]\n exec_port = 1\n\n #custom_input_socket_name = [\"LOGO_IMAGE\", \"TARGET_IMAGE\"]\n\n def __init__(self, scene):\n super().__init__(scene, inputs=[5,5,1], outputs=[5,1])\n\n def evalImplementation_thread(self, index=0):\n\n result = [None]\n self.busy = True\n\n\n pixmap1 = self.getInputData(0)\n pixmap2 = self.getInputData(1)\n\n if pixmap1 and pixmap2:\n img1 = tensor2pil(pixmap1[0])\n img2 = tensor2pil(pixmap2[0])\n width, height = img2.size\n width = int(width * self.content.scale_value.value())\n height = int(height * self.content.scale_value.value())\n result = paste_image_center(img1, img2, width, height)\n print(result)\n result = [pil2tensor(result)]\n\n return [result]\n\n@register_node(OP_NODE_IMAGE_CROP)\nclass ImageCropNode(AiNode):\n icon = \"ainodes_frontend/icons/base_nodes/v2/experimental.png\"\n help_text = \"Data objects in aiNodes are simple dictionaries,\\n\" \\\n \"that can hold any values under any name.\\n\" \\\n \"In most cases, you'll find them drive parameters,\\n\" \\\n \"or hold sequences of images. For an example, the\\n\" \\\n \"OpenAI node emits it's prompt in a data line,\\n\" \\\n \"but you'll find this info in all relevant places.\"\n op_code = OP_NODE_IMAGE_CROP\n op_title = \"Crop Image\"\n content_label_objname = \"imagecrop_node\"\n category = \"aiNodes Base/Image\"\n NodeContent_class = ImageCropWidget\n dim = (340, 180)\n output_data_ports = [0]\n exec_port = 1\n\n #custom_input_socket_name = [\"LOGO_IMAGE\", \"TARGET_IMAGE\"]\n\n def __init__(self, scene):\n super().__init__(scene, inputs=[5,1], outputs=[5,1])\n\n def evalImplementation_thread(self, index=0):\n\n result = [None]\n self.busy = True\n\n\n pixmap1 = self.getInputData(0)\n\n if pixmap1:\n img1 = tensor2pil(pixmap1[0])\n\n top = self.content.top.value()\n left = self.content.left.value()\n bottom = self.content.bottom.value()\n right = self.content.right.value()\n\n result = crop_image(img1, top, left, bottom, right)\n print(result)\n result = [pil2tensor(result)]\n\n return [result]\n\n\n\ndef paste_image_center(img1, img2, width, height):\n # Resize second image\n img2_resized = img2.resize((width, height))\n\n # Calculate the position to paste, which is the center of img1\n paste_position = ((img1.width - img2_resized.width) // 2, (img1.height - img2_resized.height) // 2)\n\n # Paste img2_resized into img1 at the calculated position\n img1.paste(img2_resized, paste_position)\n\n # Return the result image\n return img1\n\ndef crop_image(img, upper_crop, left_crop, lower_crop, right_crop):\n \"\"\"Crop a PIL image by a certain amount from each side.\n\n Args:\n img (PIL Image): Image to be cropped.\n left_crop (int): Amount of pixels to crop from the left side.\n upper_crop (int): Amount of pixels to crop from the upper side.\n right_crop (int): Amount of pixels to crop from the right side.\n lower_crop (int): Amount of pixels to crop from the lower side.\n\n Returns:\n PIL Image: Cropped image.\n \"\"\"\n width, height = img.size\n left = left_crop\n upper = upper_crop\n right = width - right_crop\n lower = height - lower_crop\n\n cropped_image = img.crop((left, upper, right, lower))\n return cropped_image","repo_name":"XmYx/ainodes_engine_base_nodes","sub_path":"image_nodes/paste_in_node.py","file_name":"paste_in_node.py","file_ext":"py","file_size_in_byte":5535,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"48"} +{"seq_id":"39691112497","text":"import requests\nimport json\n\n\nclass RunMethod:\n def post_main(self, url, data, cookies=None, data_type=None):\n res = None\n if data_type == 'json':\n if cookies is None:\n res = requests.post(url=url, json=data, verify=False)\n else:\n res = requests.post(url=url, json=data, cookies=cookies, verify=False)\n return res.json()\n else:\n if cookies is not None:\n res = requests.post(url=url, json=data, cookies=cookies, verify=False)\n else:\n res = requests.post(url=url, json=data, verify=False)\n return res.json()\n\n def get_main(self, url, data=None, cookies=None):\n res = None\n if cookies is not None:\n res = requests.get(url=url, data=data, cookies=cookies, verify=False)\n else:\n res = requests.get(url=url, data=data, verify=False)\n return res.json()\n\n def run_main(self, method, url, data=None, cookies=None, data_type=None):\n res = None\n if method == 'post':\n res = self.post_main(url=url, data=data, cookies=cookies, data_type=data_type)\n else:\n res = self.get_main(url=url, data=data, cookies=cookies)\n # return json.dumps(res, ensure_ascii=False, sort_keys=True, indent=2)\n return json.dumps(res, ensure_ascii=False)\n\n\nif __name__ == '__main__':\n ru = RunMethod()\n url = 'http://192.168.2.153/yyjapi/api/purchorder/getpurchorder'\n data = {\"ticketno\": \"PO20181114000481\",\n \"flag\": 0\n }\n cookies = {\n \"SESSION\": \"783c9616-6f70-4aef-b734-587921c7f59f\"\n }\n res = ru.run_main(method='post', url=url, data=data, cookies=cookies, data_type='json')\n print(res)\n","repo_name":"an5456/test1234","sub_path":"NewInterface/base/run_method.py","file_name":"run_method.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"8433229104","text":"import os\nfrom PIL import Image\n\n# Use the os module. Works for UNIX, Windows, MacOs...\n# e.g, to get working directory\nos.getcwd()\n\n# where are the images?\nold_folder = \"C:\\\\Users\\\\sopsla\\\\Desktop\\\\session2a-image\\\\raw\"\n\n# load an image from file\nimg_path = os.path.join(old_folder, \"bird.jpg\")\nimg = Image.open(img_path)\n\n# because of using 'Image', the variable 'img' is now of class 'Image'.\n# what a class is, we will discuss later. For now, it is only necessary to know\n# that a class has ATTRIBUTES. 'Image' has attributes like 'format', 'size', and 'mode'.\n# Let's see what that means.\nprint(img.size)\n\n# If you want to access all of them in one line\n# Size: size in pixels\n# Format: is it JPEG, PNG, ...?\n# Mode: depth of a pixel. E.g. 1 (1-bit pixel, black & white); RGB (3x8 pixels, true color)\nprint(img.size, img.format, img.mode)\n\n# important to know is that \"size\" is of dta type \"tuple\". A tuple is like a list, but it cannot be changed.\n# you can access the elements in the tuple in the same way as you would in a list.\n# you can use a\n\n# display the image\nimg.show()\n\n# save the image to a new file extension: PNG to JPG\n# first: split the extension and the name using os.path.splitext\n# N.B.: the function os.path.split() splits the path to the folder from the filename.\nfilename, extension = os.path.splitext(img_path)\n\n# now we're generating the filename with .jpg extension\noutfile = filename + \".png\"\n\n# save the file\nimg.save(outfile, \"PNG\")\n\n# note that you can also change the name and save the file again.\nnew_img_path = os.path.join(img_path, \"a_Python_will_eat_this_bird.png\")\nimg.save(new_img_path)\n\n# delete the files again\nos.remove(outfile + \".png\")\nos.remove(new_img_path)\n\n# now let's save an image to a new folder\n# make a new folder\nnew_folder = \"C:\\\\Users\\\\sopsla\\\\Desktop\\\\session2a-image\\\\tmp\"\nos.mkdir(new_folder)\n\n# save the file\nimg.save(os.path.join(new_folder, \"copy.jpg\"))\n\n# this demonstrates that everything you do to the image in Python, is done to the data loaded onto the variable\n# in Python. Only when you save it, it will become part of the actual file.\n\n# how to get all files in a folder? os.listdir\nprint(os.listdir(old_folder))\n\n# EXERCISE: COPY ALL THE FILES TO THE NEW FOLDER\n","repo_name":"IMPRS-Python-course/session2a-image","sub_path":"fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6880327225","text":"#-*- coding:utf-8 -*-\n#create by Fancy265\nimport cv2\nimport numpy as np\nimport random\n# import numbers\n# from PIL import Image,ImageFilter,ImageEnhance\n\nimport caffe\n\nnew_width = 64\nnew_height = 64\nisshuffle = True\n# meanvalue = [123.68,116.779,103.939]\n# scale = 0.0078125\nclass DataLayer(caffe.Layer):\n def setup(self,bottom,top):\n print(\"DataLayer setup!!\")\n # params is a python dictionary with layer parameters.\n params = eval(self.param_str)\n # store input as class variables\n self.batch_size = params['batch_size']\n self.source_dir = params['source_dir']\n self.batch_loader = BatchLoader(params,None)\n top[0].reshape(self.batch_size,3,new_height,new_width)\n top[1].reshape(self.batch_size,3)\n\n def forward(self,bottom,top):\n \"\"\"Get blobs and copy them into this layer's top blob vector.\"\"\"\n # imgmaps = self.batch_loader.mixup_gen()\n # print(\"DataLayer forward!!\")\n trainX, trainY = self.batch_loader.batch_imgs()\n # print(\"trainX:\",trainX.shape)\n # print(\"trainY:\",trainY.shape)\n # print(\"trainY:\", trainY)\n # print(\"top[0].data.shape:\",top[0].data.shape)\n # print(\"top[1].data.shape:\", top[1].data.shape)\n top[0].data[:, ...] = trainX\n top[1].data[:, ...] = trainY\n # print(\"DataLayer forward!!\")\n\n def reshape(self,bottom,top):\n pass\n def backward(self,top,propagate_down,bottom):\n pass\n\n\nclass BatchLoader(object):\n def __init__(self,params,result):\n self.result = result\n self.source = params['source_dir']\n self.batch_size = params['batch_size']\n self.new_width = new_width\n self.new_height = new_height\n self.alpha = 0.2\n\n self.isshuffle = isshuffle\n self.imagelist = open(self.source,'r').read().splitlines()\n self.sample_num = len(self.imagelist)\n if self.isshuffle:\n random.shuffle(self.imagelist)\n self._curIter = 0 # current iter\n self._totalIter = int(self.sample_num // (self.batch_size))\n\n\n def batch_imgs(self):\n\n if self._curIter >= self._totalIter:\n self._curIter = 0\n\n begin = self._curIter * self.batch_size\n end = (self._curIter+2)*self.batch_size\n if end>\"))\nkk = int(input(\"첫번째 행렬의 열(k) 수를 입력하세요>>\"))\njj = int(input(\"두번째 행렬의 열(j) 수를 입력하세요>>\"))\n\narr1 = [[0]*(kk) for i in range(ii)] # arr1은 ii행*kk열의 사이즈로 0으로 채워진 MATRIX\narr2 = [[0]*(jj) for i in range(kk)] # arr2는 kk행*jj열의 사이즈로 0으로 채워진 MATRIX\narr_result = [[0]*(jj) for i in range(ii)] # 행렬 곱셈의 결과는 ii행 jj열이다.\n\nprint(\"\\n곱셈의 결과는 %dx%d행렬입니다\" % (ii, jj))\n\nprint(\"\\n첫 번째 행렬에 대해 조사받겠습니다.\") # arr2 입력반복문\nfor i in range(0, ii):\n for k in range(0, kk):\n arr1[i][k] = int(input(\"%d행 %d열>\" %(i+1, k+1))) # 파이썬은 0행0열부터라서 +1\n\nprint(\"\\n입력받은 첫 번째 행렬입니다.\") # arr1 출력반복문, 위에다 합치면 안됨..\nfor i in range(0, ii):\n for k in range(0, kk):\n print(\"%3d\" %(arr1[i][k]), end = \" \") # %3d로 두자리 수일때 밀림 방지, end구분은 스페이스\n print(\"\") # 행 표시후 다음 열에 행 표시하기 위한 \n\nprint(\"\\n두 번째 행렬에 대해 조사받겠습니다.\")\nfor k in range(0, kk):\n for j in range(0, jj):\n arr2[k][j] = int(input(\"%d행 %d열>\" %(k+1, j+1)))\n\nprint(\"\\n입력받은 두 번째 행렬입니다.\")\nfor k in range(0, kk):\n for j in range(0, jj):\n print(\"%3d\" %(arr2[k][j]), end = \" \")\n print(\"\")\n\nprint(\"\\n※행렬의 곱셈 결과는 다음과 같습니다.※\")\nfor k in range(0, kk):\n for j in range(0, jj):\n for i in range(0, ii):\n arr_result[i][j] = arr_result[i][j] + arr1[i][k] * arr2[k][j]\n\nfor i in range(0, ii): # 출력 반복문은 어쩔 수 없이 따로 만들어야함\n for j in range(0, jj): \n print(\"%5d\" %arr_result[i][j], end=\"\") # 깰끔!하게 출력하기 위해 %5d를 썼다\n print(\"\")\n","repo_name":"nexus-bit/python_practice","sub_path":"Projects/03_array_multiple.py","file_name":"03_array_multiple.py","file_ext":"py","file_size_in_byte":2345,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32376187576","text":"a = abs(-20)\na = max(1, 4, -5, 0)\na = hex(16)\nprint(a)\n\na = int('123')\na = int(12.34)\na = float('12.1234567890123456789') #得到15位小数\n\nb = str(1.23)\nb = str(100)\n\nc = bool(1) #True\nc = bool('') #False\n\n#函数名其实就是指向一个函数对象的引用,完全可以把函数名赋给一个变量,相当于别名\na = abs\nb = a(-1)\n\n\ndef my_abs(x):\n if not isinstance(x, (int, float)):\n raise TypeError('bad operand type')\n if x >= 0:\n return x\n else:\n return -x\n\n#如果没有return语句,函数执行完毕后也会返回结果,只是结果为None。return None可以简写为return\n\n# c = my_abs('-10')\n# print(c)\n\n#空函数,pass可以用来作为占位符,以后想好再写\ndef nop():\n pass\n#多值返回\nimport math\ndef move(x, y, step, angle=0):\n nx = x + step * math.cos(angle)\n ny = y - step * math.sin(angle)\n return nx, ny\n\nx, y = move(100, 100, 30, math.pi / 6)\nprint(x, y)\n# 实际返回是一个值,是个tuple\nr = move(100, 100, 30, math.pi / 6)\nprint(r)\n\n#默认参数一般放后面,默认参数必须指向不变对象\ndef power(x, n=2):\n s = 1\n while n > 0:\n n = n - 1\n s = s * x\n return s\n\nprint('power(5,2)= %s' % (power(5, 2)))\nprint('power(5)= %s' % (power(5)))\n\ndef add_end(L=None):\n if L is None:\n L = []\n L.append('END')\n return L\nl = add_end()\nl = add_end()\nprint(l)\n\n#可变参数,参数 numbers 接收到的是一个tuple\ndef calc(*numbers):\n sum = 0\n for n in numbers:\n sum = sum + n * n\n return sum\nd = calc(1, 2, 3)\n\n#list或tuple的元素变成可变参数,前面加*\nnums = [1, 2, 3]\nd = calc(*nums)\nprint(d)\n\n#关键字参数允许你传入0个或任意个含参数名的参数,这些关键字参数在函数内部自动组装为一个dict\ndef person(name, age, **kw):\n if 'city' in kw:\n pass\n if 'job' in kw:\n pass\n print('name:', name, 'age:', age, 'other:', kw)\n\nperson('lily', 30, city='Beijing')\nextra = {'city':\"Beijing\", 'job': 'Engineer'}\nperson('jack', 24, city=extra['city'], job=extra['job'])\nperson('jack', 24, **extra) #获取extra的一份拷贝\n\n#命名关键字参数,*后面只接收city和job作为关键字参数,如果有默认值则可以不传入city参数\ndef person2(name, age, *, city='Shanghai', job):\n print(name, age, city, job)\n\nperson2('jack', 24, city='Beijing', job='Engineer')\n\n#如果函数定义中已经有了一个可变参数,后面跟着的命名关键字参数就不再需要一个特殊分隔符*\ndef person3(name, age, *args, city, job):\n print(name, age, args, city, job)\n\nperson3('jack', 24, 'man', '168', city='Beijing', job='Engineer')\n\n#在Python中定义函数,可以用必选参数、默认参数、可变参数、关键字参数和命名关键字参数\n#这5种参数都可以组合使用。但是请注意,参数定义的顺序必须是:必选参数、默认参数、可变参数、命名关键字参数和关键字参数\n\n#递归函数\ndef fact(n):\n if n == 1:\n return 1\n return n * fact(n - 1)\n\n#尾递归,在函数返回的时候,调用自身本身,每一步的乘积传入到递归函数中\n#Python标准的解释器没有���对尾递归做优化,任何递归函数都存在栈溢出的问题\ndef fact2(n):\n return fact2_iter(n, 1)\n \ndef fact2_iter(num, product):\n if num == 1:\n return product\n return fact2_iter(num - 1, num * product)\n","repo_name":"ryan1943/learn-python","sub_path":"func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70400083345","text":"from django.contrib.auth import get_user_model\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import get_object_or_404, redirect, render\n\nfrom .forms import CommentForm, PostForm\nfrom .models import Follow, Group, Post\n\nfrom yatube.settings import PAGE_COUNT # isort:skip\n\n\ndef paginator_func(request, object, count_post):\n paginator = Paginator(object, count_post)\n page_number = request.GET.get('page')\n return paginator.get_page(page_number)\n\n\nUser = get_user_model()\n\n\ndef index(request):\n post_list = Post.objects.all()\n page_obj = paginator_func(request, post_list, PAGE_COUNT)\n # Отдаем в словаре контекста\n context = {\n 'page_obj': page_obj,\n }\n return render(request, 'posts/index.html', context)\n\n\ndef group_posts(request, slug):\n group = get_object_or_404(Group, slug=slug)\n posts = group.posts.all()\n page_obj = paginator_func(request, posts, PAGE_COUNT)\n context = {\n 'group': group,\n 'page_obj': page_obj,\n }\n return render(request, 'posts/group_list.html', context)\n\n\ndef profile(request, username):\n author = get_object_or_404(User, username=username)\n follow_user = request.user\n posts = author.posts.all()\n count = posts.count()\n page_obj = paginator_func(request, posts, PAGE_COUNT)\n following = False\n if not follow_user.is_anonymous:\n follow = Follow.objects.filter(author=author)\n if follow.filter(user=follow_user).exists():\n following = True\n context = {\n 'posts': posts,\n 'count': count,\n 'page_obj': page_obj,\n 'author': author,\n 'following': following,\n 'follow_user': follow_user\n }\n return render(request, 'posts/profile.html', context)\n\n\ndef post_detail(request, post_id):\n post = get_object_or_404(Post, id=post_id)\n author = post.author\n posts = author.posts\n count = posts.count()\n comments = post.comments.all()\n form = CommentForm(request.POST or None)\n # Здесь код запроса к модели и создание словаря контекста\n context = {\n 'post': post,\n 'count': count,\n 'author': author,\n 'comments': comments,\n 'form': form\n }\n return render(request, 'posts/post_detail.html', context)\n\n\n@login_required\ndef post_create(request):\n form = PostForm(request.POST or None, files=request.FILES or None)\n if form.is_valid():\n new_post = form.save(commit=False)\n new_post.author = request.user\n new_post.save()\n return redirect('posts:profile', request.user.username)\n\n context = {\n 'form': form\n }\n return render(request, 'posts/create_post.html', context)\n\n\n@login_required\ndef post_edit(request, post_id):\n edit_post = get_object_or_404(Post, id=post_id)\n\n if request.user != edit_post.author:\n return redirect('posts:post_detail', edit_post.id)\n form = PostForm(request.POST or None,\n files=request.FILES or None, instance=edit_post)\n if form.is_valid():\n form.save()\n return redirect('posts:post_detail', edit_post.id)\n\n context = {\n 'form': form,\n 'is_edit': True\n }\n return render(request, 'posts/create_post.html', context)\n\n\n@login_required\ndef add_comment(request, post_id):\n post = get_object_or_404(Post, id=post_id)\n form = CommentForm(request.POST or None)\n if form.is_valid():\n comment = form.save(commit=False)\n comment.author = request.user\n comment.post = post\n comment.save()\n return redirect('posts:post_detail', post_id=post_id)\n\n\n@login_required\ndef follow_index(request):\n # информация о текущем пользователе доступна в переменной request.user\n post_list = Post.objects.filter(author__following__user=request.user)\n page_obj = paginator_func(request, post_list, PAGE_COUNT)\n # Отдаем в словаре контекста\n context = {\n 'page_obj': page_obj,\n }\n return render(request, 'posts/follow.html', context)\n\n\n@login_required\ndef profile_follow(request, username):\n author = get_object_or_404(User, username=username)\n follow = Follow.objects.filter(\n author=author).filter(user=request.user).exists()\n # Подписаться на автора\n if request.user != author and not follow:\n Follow.objects.create(\n user=request.user,\n author=author\n )\n return redirect('posts:profile', username=username)\n\n\n@login_required\ndef profile_unfollow(request, username):\n # Дизлайк, отписка\n author = get_object_or_404(User, username=username)\n Follow.objects.filter(author=author.id).delete()\n return redirect('posts:profile', username=username)\n","repo_name":"Andrei191/hw05_final","sub_path":"yatube/posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"31733296257","text":"import openpyxl\n\ndef create_workbook(target_date, star_dict, feedback_list):\n wb = openpyxl.Workbook() # Create new excel workbook\n filename = \"today_feedback.xlsx\" # Workbook name\n # filename = \"test.xlsx\" # Workbook name\n\n ws = wb.active # Activate the worksheet of the workbook\n\n # The Content for this Worksheet\n ws['a1'] = \"Date\"\n ws['b1'] = str(target_date)\n ws['a2'] = \"Star Ratings\"\n for i, (star, count) in enumerate(star_dict.items(), 3):\n ws[f'a{i}'] = f\"{star} Stars\"\n ws[f'b{i}'] = count\n if star == 1:\n ws[f'a{i}'] = f\"{star} Star\"\n\n ws['a9'] = \"Performance Count(s) Received Today\"\n ws['a10'] = \"The chatbot experience (e.g. intuitive)\"\n ws['a11'] = \"Clarity of the announcement (e.g. volume)\"\n ws['a12'] = \"Ease of understanding the FAQ page (e.g. comprehensive)\"\n ws['a13'] = \"Navigation feature (e.g. directing me to the appropriate machine)\"\n for i, sum in enumerate(feedback_list[:4],10):\n ws[f'b{i}'] = sum\n \n ws['a15'] = \"Features Count(s) Received Today\"\n ws['a16'] = \"Video call with staff for General Enquiry\"\n ws['a17'] = \"Show locations of nearby branches & ATM\"\n ws['a18'] = \"Notify customers of their unattended items (e.g. Wallet, Bag)\"\n ws['a19'] = \"Step-by-step guide to perform a particular transaction (e.g. Top-up of Ezlink card)\"\n for i, sum in enumerate(feedback_list[-4:],16):\n ws[f'b{i}'] = sum\n \n wb.save(filename)\n\nif __name__=='__main__':\n import MySQLdb\n\n def get_stars(sending_date, i):\n cursor.execute(f\"select count(stars) from feedback where time like '{sending_date}%' and stars like {i}\")\n star = cursor.fetchall()\n return star[0][0]\n\n db = MySQLdb.connect('172.18.0.3', 'username', 'password', db='dbs')\n cursor = db.cursor()\n target_date = '2021-10-07'\n cursor.execute(f\"select cast(avg(f.stars) as decimal(10,1)), sum(p.intuitive), sum(p.volume), sum(p.comprehensive), \\\n sum(p.directing), sum(ft.video_call), sum(ft.show_location), sum(ft.unattended), sum(ft.transaction) \\\n from feedback f join performance p on f.id=p.id join features ft on f.id=ft.id where f.time like '{target_date}%'\")\n fetched = cursor.fetchall() \n stars = {i : get_stars(target_date, i) for i in range(1, 6)} \n feedback_sum = [int(i) for i in fetched[0][1:]]\n create_workbook(target_date, stars, feedback_sum)\n","repo_name":"JitJuanC/Innovate","sub_path":"examples/excel_write.py","file_name":"excel_write.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"36151909148","text":"import mongodfbuilder\nfrom pandas import set_option\nimport preprocessingutils as pre\n\n\n\nif __name__ == '__main__':\n mr = mongodfbuilder.MongoDFBuilder()\n dfg = mr.build_readings_dataframe()\n dfm = mr.build_task_attempts_dataframe()\n dfj = mr.build_jobs_dataframe()\n dfs = mr.build_stage_attempts_dataframe()\n dfe = mr.build_stage_executors_dataframe()\n dfenv = mr.build_environment_dataframe()\n dfapps = mr.build_apps_dataframe()\n dfapps = dfapps.merge(dfenv,on='appId').sort('start')\n dfs = dfs.merge(dfenv,on='appId').sort('start')","repo_name":"Brandonage/DataRetriever","sub_path":"debugScript.py","file_name":"debugScript.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"20689982765","text":"import math\nfrom typing import Sequence, Union, cast\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torch.autograd import Function\n\nfrom medical_seg.networks.layers.convutils import gaussian_1d, same_padding\nfrom medical_seg.utils import SkipMode, ensure_tuple_rep, optional_import\n\n_C, _ = optional_import(\"monai._C\")\n\n__all__ = [\"SkipConnection\", \"Flatten\", \"GaussianFilter\", \"LLTM\", \"Reshape\", \"separable_filtering\"]\n\n\nclass SkipConnection(nn.Module):\n \"\"\"\n Combine the forward pass input with the result from the given submodule::\n\n --+--submodule--o--\n |_____________|\n\n The available modes are ``\"cat\"``, ``\"add\"``, ``\"mul\"``.\n \"\"\"\n\n def __init__(self, submodule, dim: int = 1, mode: Union[str, SkipMode] = \"cat\") -> None:\n \"\"\"\n\n Args:\n submodule: the module defines the trainable branch.\n dim: the dimension over which the tensors are concatenated.\n Used when mode is ``\"cat\"``.\n mode: ``\"cat\"``, ``\"add\"``, ``\"mul\"``. defaults to ``\"cat\"``.\n \"\"\"\n super().__init__()\n self.submodule = submodule\n self.dim = dim\n self.mode = SkipMode(mode).value\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n y = self.submodule(x)\n\n if self.mode == \"cat\":\n return torch.cat([x, y], dim=self.dim)\n if self.mode == \"add\":\n return torch.add(x, y)\n if self.mode == \"mul\":\n return torch.mul(x, y)\n raise NotImplementedError(f\"Unsupported mode {self.mode}.\")\n\n\nclass Flatten(nn.Module):\n \"\"\"\n Flattens the given input in the forward pass to be [B,-1] in shape.\n \"\"\"\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return x.view(x.size(0), -1)\n\n\nclass Reshape(nn.Module):\n \"\"\"\n Reshapes input tensors to the given shape (minus batch dimension), retaining original batch size.\n \"\"\"\n\n def __init__(self, *shape: int) -> None:\n \"\"\"\n Given a shape list/tuple `shape` of integers (s0, s1, ... , sn), this layer will reshape input tensors of\n shape (batch, s0 * s1 * ... * sn) to shape (batch, s0, s1, ... , sn).\n\n Args:\n shape: list/tuple of integer shape dimensions\n \"\"\"\n super().__init__()\n self.shape = (1,) + tuple(shape)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n shape = list(self.shape)\n shape[0] = x.shape[0] # done this way for Torchscript\n return x.reshape(shape)\n\n\ndef separable_filtering(x: torch.Tensor, kernels: Union[Sequence[torch.Tensor], torch.Tensor]) -> torch.Tensor:\n \"\"\"\n Apply 1-D convolutions along each spatial dimension of `x`.\n\n Args:\n x: the input image. must have shape (batch, channels, H[, W, ...]).\n kernels: kernel along each spatial dimension.\n could be a single kernel (duplicated for all dimension), or `spatial_dims` number of kernels.\n\n Raises:\n TypeError: When ``x`` is not a ``torch.Tensor``.\n \"\"\"\n if not torch.is_tensor(x):\n raise TypeError(f\"x must be a torch.Tensor but is {type(x).__name__}.\")\n\n spatial_dims = len(x.shape) - 2\n _kernels = [\n torch.as_tensor(s, dtype=torch.float, device=s.device if torch.is_tensor(s) else None)\n for s in ensure_tuple_rep(kernels, spatial_dims)\n ]\n _paddings = [cast(int, (same_padding(k.shape[0]))) for k in _kernels]\n n_chns = x.shape[1]\n\n def _conv(input_: torch.Tensor, d: int) -> torch.Tensor:\n if d < 0:\n return input_\n s = [1] * len(input_.shape)\n s[d + 2] = -1\n _kernel = kernels[d].reshape(s)\n _kernel = _kernel.repeat([n_chns, 1] + [1] * spatial_dims)\n _padding = [0] * spatial_dims\n _padding[d] = _paddings[d]\n conv_type = [F.conv1d, F.conv2d, F.conv3d][spatial_dims - 1]\n return conv_type(input=_conv(input_, d - 1), weight=_kernel, padding=_padding, groups=n_chns)\n\n return _conv(x, spatial_dims - 1)\n\n\nclass GaussianFilter(nn.Module):\n def __init__(\n self,\n spatial_dims: int,\n sigma: Union[Sequence[float], float, Sequence[torch.Tensor], torch.Tensor],\n truncated: float = 4.0,\n approx: str = \"erf\",\n requires_grad: bool = False,\n ) -> None:\n \"\"\"\n Args:\n spatial_dims: number of spatial dimensions of the input image.\n must have shape (Batch, channels, H[, W, ...]).\n sigma: std. could be a single value, or `spatial_dims` number of values.\n truncated: spreads how many stds.\n approx: discrete Gaussian kernel type, available options are \"erf\", \"sampled\", and \"scalespace\".\n\n - ``erf`` approximation interpolates the error function;\n - ``sampled`` uses a sampled Gaussian kernel;\n - ``scalespace`` corresponds to\n https://en.wikipedia.org/wiki/Scale_space_implementation#The_discrete_Gaussian_kernel\n based on the modified Bessel functions.\n\n requires_grad: whether to store the gradients for sigma.\n if True, `sigma` will be the initial value of the parameters of this module\n (for example `parameters()` iterator could be used to get the parameters);\n otherwise this module will fix the kernels using `sigma` as the std.\n \"\"\"\n super().__init__()\n self.sigma = [\n torch.nn.Parameter(\n torch.as_tensor(s, dtype=torch.float, device=s.device if torch.is_tensor(s) else None),\n requires_grad=requires_grad,\n )\n for s in ensure_tuple_rep(sigma, int(spatial_dims))\n ]\n self.truncated = truncated\n self.approx = approx\n for idx, param in enumerate(self.sigma):\n self.register_parameter(f\"kernel_sigma_{idx}\", param)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Args:\n x: in shape [Batch, chns, H, W, D].\n \"\"\"\n _kernel = [gaussian_1d(s, truncated=self.truncated, approx=self.approx) for s in self.sigma]\n return separable_filtering(x=x, kernels=_kernel)\n\n\nclass LLTMFunction(Function):\n @staticmethod\n def forward(ctx, input, weights, bias, old_h, old_cell):\n outputs = _C.lltm_forward(input, weights, bias, old_h, old_cell)\n new_h, new_cell = outputs[:2]\n variables = outputs[1:] + [weights]\n ctx.save_for_backward(*variables)\n\n return new_h, new_cell\n\n @staticmethod\n def backward(ctx, grad_h, grad_cell):\n outputs = _C.lltm_backward(grad_h.contiguous(), grad_cell.contiguous(), *ctx.saved_tensors)\n d_old_h, d_input, d_weights, d_bias, d_old_cell = outputs[:5]\n\n return d_input, d_weights, d_bias, d_old_h, d_old_cell\n\n\nclass LLTM(nn.Module):\n \"\"\"\n This recurrent unit is similar to an LSTM, but differs in that it lacks a forget\n gate and uses an Exponential Linear Unit (ELU) as its internal activation function.\n Because this unit never forgets, call it LLTM, or Long-Long-Term-Memory unit.\n It has both C++ and CUDA implementation, automatically switch according to the\n target device where put this module to.\n\n Args:\n input_features: size of input feature data\n state_size: size of the state of recurrent unit\n\n Referring to: https://pytorch.org/tutorials/advanced/cpp_extension.html\n \"\"\"\n\n def __init__(self, input_features: int, state_size: int):\n super(LLTM, self).__init__()\n self.input_features = input_features\n self.state_size = state_size\n self.weights = nn.Parameter(torch.empty(3 * state_size, input_features + state_size))\n self.bias = nn.Parameter(torch.empty(1, 3 * state_size))\n self.reset_parameters()\n\n def reset_parameters(self):\n stdv = 1.0 / math.sqrt(self.state_size)\n for weight in self.parameters():\n weight.data.uniform_(-stdv, +stdv)\n\n def forward(self, input, state):\n return LLTMFunction.apply(input, self.weights, self.bias, *state)\n","repo_name":"920232796/MedicalSeg","sub_path":"medical_seg/networks/layers/simplelayers.py","file_name":"simplelayers.py","file_ext":"py","file_size_in_byte":8103,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"48"} +{"seq_id":"73133625745","text":"# -*- coding: utf-8 -*-\n\n\nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\n\n\nclass Model(object):\n def __init__(self, num_layers, seq_length, embedding_size, vocab_size,\n rnn_size, label_size, embedding=None, use_bilstm=False):\n\n self.input_x = tf.placeholder(tf.int64, [None, seq_length], name=\"input_x\")\n self.input_y = tf.placeholder(tf.int64, [None, 1])\n self.dropout_keep_prob = tf.placeholder(tf.float32, name=\"dropout_keep_prob\")\n\n with tf.name_scope('embeddingLayer'):\n if embedding:\n embedded = tf.nn.embedding_lookup(embedding, self.input_x)\n else:\n W = tf.get_variable('W', [vocab_size, embedding_size])\n embedded = tf.nn.embedding_lookup(W, self.input_x)\n\n inputs = tf.split(embedded, seq_length, 1)\n inputs = [tf.squeeze(input_, [1]) for input_ in inputs]\n\n def get_cell(rnn_size, dropout_keep_prob):\n cell = rnn.LSTMCell(rnn_size, initializer=tf.truncated_normal_initializer(stddev=0.3))\n cell = rnn.DropoutWrapper(cell, output_keep_prob=dropout_keep_prob) # dropout比例\n return cell\n\n with tf.name_scope('lstm_layer'):\n if use_bilstm:\n cell_fw = rnn.MultiRNNCell([get_cell(rnn_size, self.dropout_keep_prob) for _ in range(num_layers)])\n cell_bw = rnn.MultiRNNCell([get_cell(rnn_size, self.dropout_keep_prob) for _ in\n range(num_layers)])\n self.outputs, _, _ = rnn.static_bidirectional_rnn(cell_fw=cell_fw,\n cell_bw=cell_bw,\n inputs=inputs,\n dtype=tf.float32)\n else:\n cell = rnn.MultiRNNCell([get_cell(rnn_size, self.dropout_keep_prob) for _ in range(num_layers)])\n self.outputs, _ = rnn.static_rnn(cell, inputs, dtype=tf.float32)\n\n with tf.name_scope('softmaxLayer'):\n logits = tf.layers.dense(self.outputs[-1], label_size)\n self.probs = tf.nn.softmax(logits, dim=1, name=\"probs\")\n\n with tf.name_scope('loss'):\n self.loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=tf.squeeze(\n self.input_y)))\n tf.summary.scalar(\"loss\", self.loss)\n\n with tf.name_scope('predict'):\n self.correct_pred = tf.equal(tf.argmax(self.probs, 1), self.input_y)\n self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))\n tf.summary.scalar(\"accuracy\", self.accuracy)\n","repo_name":"xiaogp/track_sequence_anomaly_detection","sub_path":"rnn/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"48"} +{"seq_id":"41600713975","text":"import copy\n\ndef lift_up(stack, n):\n return stack[n:], stack[:n]\n\ndef lift_down(stack, moved):\n return moved[::-1] + stack\n\ndef lift_down_9001(stack, moved):\n return moved + stack\n \n \n# STFU --- deal with it \nsome_string = \"\"\" [G] [D] [Q] \n[P] [T] [L] [M] [Z] \n[Z] [Z] [C] [Z] [G] [W] \n[M] [B] [F] [P] [C] [H] [N]\n[T] [S] [R] [H] [W] [R] [L] [W]\n[R] [T] [Q] [Z] [R] [S] [Z] [F] [P]\n[C] [N] [H] [R] [N] [H] [D] [J] [Q]\n[N] [D] [M] [G] [Z] [F] [W] [S] [S]\"\"\"\nstacks_data = some_string.split(\"\\n\")\n\nstacks = [[], [], [], [], [], [], [], [], []] \nfor row in stacks_data:\n index = 0\n for col in range(1, len(row), 4):\n if row[col] != \" \":\n stacks[index].append(row[col])\n index += 1\nprint(stacks)\n\nwith open(\"moves.txt\", \"r\") as moves:\n for move in moves:\n stacks = copy.deepcopy(stacks)\n move = move.split()\n n = int(move[1])\n from_stack = int(move[3]) - 1\n to_stack = int(move[5]) - 1\n stacks[from_stack], move_me = lift_up(stacks[from_stack], n)\n #stacks[to_stack] = lift_down(stacks[to_stack], move_me)\n stacks[to_stack] = lift_down_9001(stacks[to_stack], move_me)\n\nfor stack in stacks:\n print(stack[0])\n","repo_name":"jameshughes89/advent_of_code","sub_path":"day_5.py","file_name":"day_5.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30700113054","text":"# Training\r\n\r\n\r\n# Import Libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nimport statsmodels.formula.api as smf\r\nfrom patsy import dmatrices\r\nfrom statsmodels.stats.diagnostic import het_white\r\nimport psycopg2\r\nfrom dotenv import load_dotenv\r\nimport pandas.io.sql as sqlio\r\nimport os\r\n\r\ndef OLStrain(fechai, fechaf): \r\n '''\r\n Train a OLS model using historic PQRS data between fechai and fechaf\r\n The arguments (fechai and fecha) are STRINGS with the format '%Y-%m-%d', for example: 2022-01-25\r\n '''\r\n # =============================================================================\r\n # Read data from database\r\n # =============================================================================\r\n \r\n query = f'''SELECT TO_DATE(fecha_radicacion, 'DD/MM/YYYY' ) AS fecha_radicacion,\r\n glb_dependencia_id,\r\n pqr_tipo_derechos_id,\r\n ase_tipo_poblacion_id,\r\n glb_entidad_id,\r\n TO_DATE(fecha_vencimiento, 'DD/MM/YYYY' ) AS fecha_vencimiento,\r\n TO_DATE(fecha_respuesta, 'DD/MM/YYYY' ) AS fecha_respuesta\r\n FROM modulo_pqr_sector_salud \r\n WHERE TO_DATE(fecha_radicacion, 'DD/MM/YYYY') > TO_DATE('{fechai}', 'YYYY-MM-DD')\r\n AND TO_DATE(fecha_radicacion, 'DD/MM/YYYY') < TO_DATE('{fechaf}', 'YYYY-MM-DD')\r\n ORDER BY TO_DATE(fecha_radicacion, 'DD/MM/YYYY' )\r\n '''\r\n print(f'Reading data from database between {fechai} and {fechaf}')\r\n df_model = querier(query) \r\n \r\n # =============================================================================\r\n # Data Cleaning and pre-processing\r\n # =============================================================================\r\n print('************************')\r\n print('Cleaning the data...')\r\n print('************************')\r\n \r\n # Drop the rows where there are nan.\r\n df_model = df_model.dropna()\r\n \r\n # Create columns with days between \"fecha_radicado\" and \"fecha_respuesta\"\r\n df_model['tiempo_respuesta'] = (df_model.fecha_respuesta - df_model.fecha_radicacion).dt.days # This may be possible to do in the Query\r\n \r\n # Delete data where tiempo_respuesta is negative (corrupte data)\r\n df_model = df_model[df_model.tiempo_respuesta>=0] # This can be done in the Query\r\n \r\n # Create a column of category for plazo where:\r\n # plazo < 100: 0\r\n # plazo between 100 and 180: 1\r\n # plazo > 180: 2\r\n df_model['plazo_categoria'] = (df_model.fecha_vencimiento - df_model.fecha_radicacion).dt.days.apply(lambda x: 0 if x <100 else 1 if (x>100) & (x <180) else 2 )\r\n \r\n # Drop 'fecha' columns. They are no longer needed\r\n df_model.drop(['fecha_radicacion','fecha_respuesta','fecha_vencimiento'], axis=1, inplace =True)\r\n \r\n # Assign data type (dtype) for each column\r\n df_model = df_model.astype({ \r\n \r\n 'glb_dependencia_id': 'category',\r\n 'pqr_tipo_derechos_id': 'category',\r\n 'ase_tipo_poblacion_id': 'category',\r\n 'glb_entidad_id': 'category',\r\n 'plazo_categoria': 'category'\r\n })\r\n \r\n # Delete unused categories:\r\n df_model[df_model.select_dtypes(include='category').columns] = df_model.select_dtypes(include='category').apply(lambda x: x.cat.remove_unused_categories(), axis=0)\r\n \r\n \r\n # =============================================================================\r\n # Train the model\r\n # =============================================================================\r\n print('************************')\r\n print('Traning the model...')\r\n print('************************')\r\n \r\n # Define train and test datasets (80% - 20%)\r\n train = df_model.sample(frac=0.8) \r\n test = df_model.drop(train.index).sample(frac=1.0)\r\n \r\n \r\n # List of dependent variables\r\n col_list = list(train.columns)\r\n col_list.remove('tiempo_respuesta')\r\n \r\n # list with the variables for the OLS formula (apply C(var) syntax for categorical variables)\r\n col_list_all = ['C(%s)'%(x) for x in col_list ]\r\n \r\n # Fit the OLS Linear Regression model using statsmodel library\r\n model_plazo_log_noplazo_nosol, formula = apply_reg(col_list_all, df = train, trans='sqrt')\r\n \r\n # Apply White Test to verify heteroscedasticity in the model\r\n y, X = dmatrices(formula, train, return_type='dataframe')\r\n results = het_white(model_plazo_log_noplazo_nosol.resid, X)\r\n \r\n print('******************************')\r\n print('Results from White Test to detect potential heteroscedasticity:')\r\n print(f' - F-Statistic for White Test is {results[1]}' )\r\n print(f' - p-value for White Test is {results[2]}' )\r\n if results[2] < 0.05:\r\n print('With an alpha of 0.05 the null hypothesisis rejected so there is potential heteroscedasticity in the dataset')\r\n else:\r\n print('With an alpha of 0.05 we can not reject the null hypothesisis, so there is not proof of significant heteroscedasticity in the dataset')\r\n \r\n \r\n # Error Metrics for train and test datasets\r\n eval_df = pd.DataFrame()\r\n for df in ['train','test']:\r\n \r\n true_values =eval(df).tiempo_respuesta # True Values\r\n prediction = model_plazo_log_noplazo_nosol.predict(eval(df))**2 # Predicted values\r\n \r\n # Calculate metrics\r\n MAE = np.mean(np.abs(prediction-true_values))\r\n RMSE = np.sqrt(np.mean(np.square(prediction-true_values)))\r\n MAPE = np.mean(np.abs((prediction-true_values)/true_values)*100)\r\n # save the results in a pandas column\r\n eval_serie = pd.Series({'MAE':MAE,'RMSE':RMSE,'MAPE':MAPE})\r\n # Built a DataFrame with results for train and test\r\n eval_df[df] = eval_serie\r\n \r\n print('*********************************')\r\n print('*********************************')\r\n print('Error Metrics Results for train and tests datasets:')\r\n print(eval_df)\r\n \r\n \r\n # Save the model to a pickle file\r\n model_plazo_log_noplazo_nosol.save('LR_model_trained_%s_%s.pickle'%(fechai, fechaf))\r\n \r\n\r\n# Functions to apply the OLS model to a df\r\ndef apply_reg(lista, df, trans='no'):\r\n \r\n '''\r\n Create a OLS model with the vars in \"lista\" and the data in \"df\"\r\n Arguments: \r\n lista: list with the dependent variables to be included in the ols smf.formula (with format C(var) for categorical variables)\r\n df: dataframe that contains the independent and the dependent variables. \r\n trans: 'log' or 'sqrt' if the independen variable will be transformed in the model to log(var) or sqrt(var). Otherwise, apply no transformation\r\n \r\n Returns:\r\n model_fit: the statsmodel object containing the result of the Linear Regression model\r\n formula: string with the formula used in the linear regression\r\n '''\r\n \r\n columns_str = \" + \".join(lista)\r\n \r\n # Create the model and print its summary\r\n if trans == 'log':\r\n formula = 'np.log(tiempo_respuesta+1) ~ ' + columns_str \r\n elif trans == 'sqrt':\r\n formula = 'np.sqrt(tiempo_respuesta) ~ ' + columns_str \r\n else:\r\n formula = 'tiempo_respuesta ~ ' + columns_str \r\n model = smf.ols(formula = formula, data = df)\r\n model_fit = model.fit()\r\n \r\n print('*********************************')\r\n print('The OLS formula is: %s'%formula)\r\n print('*********************************')\r\n print(model_fit.summary()) \r\n print('*********************************')\r\n print('AIC from model_all is ' + str(int(model_fit.aic))); # print AIC value\r\n \r\n return model_fit, formula\r\n\r\n\r\ndef querier(transaccion):\r\n \r\n load_dotenv()\r\n \r\n DB_NAME = os.getenv('DB_NAME') \r\n DB_USER = os.getenv('DB_USER')\r\n DB_HOST = os.getenv('DB_HOST')\r\n DB_PSW = os.getenv('DB_PSW')\r\n DB_PORT = os.getenv('DB_PORT')\r\n \r\n conn=psycopg2.connect(dbname=DB_NAME, user=DB_USER, host=DB_HOST, password=DB_PSW, port=DB_PORT,connect_timeout=300)\r\n cursor=conn.cursor()\r\n \r\n data = sqlio.read_sql_query(transaccion, conn)\r\n conn.close()\r\n\r\n return data\r\n\r\n\r\n\r\n \r\n \r\n\r\n","repo_name":"juandibanezc/ds4a-project-team-41","sub_path":"models/Linear_Regression/training/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8172,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"71573425745","text":"L = int(input())\nop = input() \nsoma = 0\n\nfor i in range(12):\n for j in range(12):\n valor = float(input())\n if(i == L):\n soma += valor\n \nif(op == 'S'):\n print(\"%.1f\" %soma)\nelse:\n print(\"%.1f\" %(soma/12.0))","repo_name":"Dakarthh/Prog-I","sub_path":"Python/URI em python/1181.py","file_name":"1181.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"25593903882","text":"import shapefile\nimport trimesh\nimport numpy as np\nimport cv2\nimport glob\nimport tqdm\nimport os\nnp.set_printoptions(suppress=True)\n\ndef read_labelme_json(path):\n import json\n f = open(path, encoding='utf-8')\n text = f.read()\n data = json.loads(text)\n shapes=[]\n for s in data['shapes']:\n shapes.append(np.asarray(s['points']))\n # shapes=np.asarray(data['shapes'][0]['points'])\n return shapes\n\nclass camera_param:\n name=\"\"\n K=0\n D=0\n XsYsZs=0\n R=0\n\ndef find_cp(camera_params,name):\n for cp in camera_params:\n if(cp.name==name):\n return cp\n return 0\n\ndef save_shapefile(nparray_list,path):\n # w = shapefile.Writer(path)\n # w.autoBalance = 1\n\n w = shapefile.Writer(path,shapeType=shapefile.POLYGON)\n w.field('FIRST_FLD', 'C', '40')\n for i,n in enumerate(nparray_list):\n w.poly([n.tolist()])\n w.record('FIRST_FLD',str(i) )\n w.close()\n\nif __name__==\"__main__\":\n # 先把pix4d的数据读入\n camera_params=[]\n with open(r\"I:\\20141215大岗山\\all\\1_initial\\params\\all_calibrated_camera_parameters.txt\", \"r\") as f: # 打开文件\n internal_camera_params = f.read().split('\\n') # 读取文件\n with open(r\"I:\\20141215大岗山\\all\\1_initial\\params\\all_calibrated_external_camera_parameters.txt\", \"r\") as f: # 打开文件\n external_camera_params = f.read().split('\\n') # 读取文件\n\n for i in range(1,len(external_camera_params)):\n external_camera_param=external_camera_params[i].split(' ')\n if(external_camera_param==['']):\n break\n cp=camera_param()\n cp.name=external_camera_param[0][:-4]\n cp.XsYsZs=np.asarray(external_camera_param[1:4]).astype(np.float)\n\n internal_camera_param=internal_camera_params[(i-1)*10+8:(i-1)*10+18]\n if(internal_camera_param[0].split(' ')[0]!=external_camera_param[0]):\n print('错误')\n # break\n cp.K=internal_camera_param[1:4]\n for i in range(len(cp.K)):\n cp.K[i]=cp.K[i].split(' ')\n cp.K=np.asarray(cp.K).astype(np.float)\n\n radial_distortion=internal_camera_param[4].split(' ')\n tangential_distortion=internal_camera_param[5].split(' ')\n\n cp.D=np.asarray([radial_distortion[0],radial_distortion[1],tangential_distortion[0],tangential_distortion[1],radial_distortion[2]]).astype(np.float)\n\n cp.R=internal_camera_param[-3:]\n for i in range(len(cp.R)):\n cp.R[i]=cp.R[i].split(' ')\n cp.R=np.asarray(cp.R).astype(np.float)\n\n camera_params.append(cp)\n\n json_files=[]\n json_files.extend(glob.glob(r\"I:\\wurenji0203\\data\\*\\*\\*.json\"))\n json_files.sort()\n tri = trimesh.load(r\"I:\\20141215大岗山\\all\\2_densification\\point_cloud\\Mesh.obj\")\n\n huapo=[]\n nishiliu=[]\n bengta=[]\n elses=[]\n\n for i,j in enumerate(tqdm.tqdm(json_files)):\n shapess = read_labelme_json(j)\n name=os.path.split(j)[1][:8]\n cp=find_cp(camera_params,name)\n if(cp!=0):\n for shapes in shapess:\n shapes = np.reshape(shapes, [-1, 1, 2])\n\n shapes2 = cv2.undistortPoints(shapes.copy(), cp.K, cp.D)\n shapes2 = np.reshape(shapes2, [shapes.shape[0], 2])\n\n # shapes3=np.asarray([[0.001,0.001],[7359.0,4911.0]])\n # shapes3=np.reshape(shapes3,[-1,1,2])\n # shapes3=cv2.undistortPoints(shapes3,K,D)\n\n f = np.asarray([1] * shapes.shape[0]).reshape([-1, 1])\n\n shapes2 = np.concatenate([shapes2, f], axis=1)\n # print(shapes2)\n\n # shapes1=cv2.undistortPoints(shapes.copy(),K,D,None,None,K)\n # shapes1=np.reshape(shapes1,[shapes.shape[0],2])\n # shapes1=np.concatenate([shapes1,f],axis=1)\n # shapes1=shapes1@np.linalg.inv(K).T\n\n # 从3_calibrated_external_camera_parameters.txt读到的这张影像的外参\n # imageName X Y Z Omega Phi Kappa\n # DSC_0157.JPG 332081.126000 3488911.819856 4093.688402 -2.946680 3.053212 119.804001\n\n xyz = shapes2.copy()\n\n XsYsZs = cp.XsYsZs\n # OmegaPhiKappa=np.asarray([-2.946680 ,3.053212, 119.804001])\n # OmegaPhiKappa=np.deg2rad(OmegaPhiKappa)\n\n # R=calR(OmegaPhiKappa[0],OmegaPhiKappa[1],OmegaPhiKappa[2])\n # R1=np.linalg.inv(R)\n\n # 这个是内参的最后三行\n R2 = cp.R\n\n # -0.49632901575943477734 0.86794436424468834890 -0.01816834310427051929\n # 0.86649900151927328196 0.49400145706385401034 -0.07170802455019638366\n # -0.05326338781351704077 -0.05133362440616233424 -0.99726018196053234366\n\n # print(R.T)\n # print(R1.T)\n # print(R2.T)\n\n # print('xyz:', xyz)\n\n XwYwZw = xyz @ R2\n # XwYwZw=xyz@R1\n\n # XwYwZw1=np.zeros(XwYwZw.shape)\n # XwYwZw1[:,0]=XwYwZw[:,1].copy()\n # XwYwZw1[:,1]=XwYwZw[:,0].copy()\n # XwYwZw1[:,2]=XwYwZw[:,2].copy()\n XwYwZw1 = XwYwZw.copy()\n XwYwZw1 = XwYwZw1 / XwYwZw1[:, 2:]\n # XwYwZw1[:,0]=-XwYwZw1[:,0]\n # print('XwYwZw1:', XwYwZw1)\n\n # 三维模型,最好是简化过的,这样更快\n ray1 = trimesh.ray.ray_triangle.RayMeshIntersector(tri)\n\n ray_origins = XsYsZs.copy().reshape([1, 3])\n ray_origins = np.tile(ray_origins, [XwYwZw.shape[0], 1])\n\n # delta=np.asarray([331704,3489621,2240])-XsYsZs\n # delta=np.asarray([331967.06,3488607.75,1637.374])-XsYsZs\n # delta=delta/delta[2]\n # delta=delta.reshape([-1,3])*-1\n # print('real delta:',delta)\n\n # locations, index_ray, index_tri=ray1.intersects_location(np.asarray([332081.126000 ,3488911.819856, 4093.688402 ]).reshape([1,3]),\n # delta)\n # print(locations)\n\n ray_directions = np.asarray([0, 0, -3]).reshape([1, 3])\n locations, index_ray, index_tri = ray1.intersects_location(ray_origins, -XwYwZw1)\n # print('locations:',locations)\n\n index_ray = index_ray.reshape([-1, 1])\n index_loc = np.concatenate([index_ray, locations], axis=1)\n\n index_loc = index_loc[np.argsort(index_loc[:, 0])]\n # print(index_loc)\n locations=index_loc[:,1:3]\n if ('滑坡' in j):\n huapo.append(locations)\n # 泥石流\n elif ('泥石流' in j):\n nishiliu.append(locations)\n # 崩塌\n elif ('崩塌' in j):\n bengta.append(locations)\n else:\n elses.append(locations)\n # if(i>10):\n # break\n\n save_shapefile(huapo,'huapo')\n save_shapefile(nishiliu,'nishiliu')\n save_shapefile(bengta,'bengta')\n save_shapefile(elses,'elses')\n print()\n # np.savetxt(r'D:\\desktop\\files\\codes\\PycharmProjects\\project_test\\pc\\intersect.xyz', locations)\n","repo_name":"ZivKidd/uav_project","sub_path":"uav_project/重构的.py","file_name":"重构的.py","file_ext":"py","file_size_in_byte":7352,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"899408048","text":"import string\n\nimport requests\n\nfrom ast import literal_eval\n\n\nclass Groups:\n GROUPS_LIST = r'https://ruz.fa.ru/api/search?term={}&type=group'\n\n @classmethod\n def __get_groups(cls, item: str) -> dict:\n request = requests.get(cls.GROUPS_LIST.format(item), verify=False)\n if request.status_code == 200:\n return request.json()\n\n @classmethod\n def get_groups_on_one_letter(cls):\n low = [chr(i) for i in range(ord('а'), ord('а') + 32)]\n dictionary = dict()\n for first_letter in low:\n result = Groups.__get_groups(first_letter)\n for res in result:\n if 'факультет' in res['description'].lower():\n desc = res['description'][:res['description'].find('|') - 1]\n if desc not in dictionary.keys():\n dictionary.setdefault(desc, [res['label']])\n else:\n dictionary[desc].append(res['label'])\n\n return dictionary\n\n @classmethod\n def get_groups_on_two_letters(cls):\n low = [chr(i) for i in range(ord('а'), ord('а') + 32)]\n dictionary = dict()\n for first_letter in low:\n for second_letter in low:\n result = Groups.__get_groups(first_letter + second_letter)\n for res in result:\n if 'факультет' in res['description'].lower():\n desc = res['description'][:res['description'].find('|') - 1]\n if desc not in dictionary.keys():\n dictionary.setdefault(desc, [res['label']])\n else:\n dictionary[desc].append(res['label'])\n\n return dictionary\n\n @classmethod\n def get_groups_on_two_letters_with_digits(cls):\n low = [chr(i) for i in range(ord('а'), ord('а') + 32)]\n nums = string.digits\n dictionary = dict()\n for first_letter in low:\n for second_letter in low:\n for number in nums:\n result = Groups.__get_groups(first_letter + second_letter + number)\n for res in result:\n if 'факультет' in res['description'].lower():\n desc = res['description'][:res['description'].find('|') - 1]\n label = res['label']\n if desc not in dictionary.keys():\n dictionary.setdefault(desc, [label])\n elif '17' not in label and '18' not in label:\n dictionary[desc].append(label)\n\n return dictionary\n\n @classmethod\n def __get_all_groups(cls): # TODO\n one_letter = cls.get_groups_on_one_letter()\n two_letters = cls.get_groups_on_two_letters()\n two_letters_and_digits = cls.get_groups_on_two_letters_with_digits()\n result = one_letter | two_letters | two_letters_and_digits\n result = {key: list(set(value)) for key, value in result.items()}\n return result\n\n @staticmethod\n def __read_log(path: str = 'Utils/Groups.txt') -> list:\n result = list()\n with open(path, 'r', encoding='utf-8') as file:\n for row in file.readlines():\n result.append(literal_eval(row))\n return result\n\n @staticmethod\n def clean_log(path: str = 'Utils/Groups.txt'):\n with open(path, 'r+', encoding='utf-8') as file:\n for row in file.readlines():\n result = literal_eval(row)\n for group in result[1]:\n print(group)\n from Schedule import Schedule\n schedule = Schedule.get_group_schedule(group)\n if not schedule:\n result[1].remove(group)\n file.write(str(result) + '\\n')\n\n @classmethod\n def clean_group_types(cls, path: str = 'Utils/Groups.txt'):\n with open(path, 'r+', encoding='utf-8') as file:\n for row in file.readlines():\n result = list(literal_eval(row))\n initials = cls.get_groups_types(result[1])\n for initial in initials:\n from transliterate import translit\n tr_initial = translit(translit(initial, language_code='ru', reversed=True), language_code='ru')\n if not cls.get_groups_by_initial(tr_initial):\n result[1] = list(filter(lambda x: initial not in x, result[1]))\n file.write(str(tuple(result)) + '\\n')\n\n @classmethod\n def get_groups(cls): # TODO\n return cls.__read_log()\n\n @classmethod\n def get_faculties_list(cls): # TODO\n return list(map(lambda x: x[0], cls.__read_log()))\n\n @classmethod\n def get_groups_by_faculty(cls, faculty: str): # TODO\n return [item[1] for item in cls.__read_log() if item[0] == faculty][0]\n\n @classmethod\n def get_groups_by_initial(cls, initial):\n result = list()\n for item in cls.__read_log():\n for i in item[1]:\n if i[:cls.__first_digit(i)].lower() == initial.lower() and '17' not in i and '18' not in i:\n result.append(i)\n return sorted(result)\n\n @staticmethod\n def __first_digit(element):\n for ind, let in enumerate(element):\n if let.isdigit():\n return ind\n\n @classmethod\n def get_groups_types(cls, array):\n return list(set(map(lambda x: x[:cls.__first_digit(x)], array)))\n","repo_name":"PI21-7/FA","sub_path":"Course_1/Telegram_Bot/Utils/Groups.py","file_name":"Groups.py","file_ext":"py","file_size_in_byte":5580,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"6308174842","text":"from typing_extensions import OrderedDict\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http.response import JsonResponse\nfrom django.shortcuts import redirect, render\nfrom django.views.decorators.cache import never_cache\nfrom cart.models import CartItem\nfrom offer.models import Coupen, RedeemedCoupen\nfrom order.forms import OrderForm\nfrom order.models import Order, OrderProduct, Payment\nfrom store.models import Variation\nimport datetime\nimport json\nfrom django.urls import reverse\nimport razorpay\nfrom django.conf import settings\nfrom django.views.decorators.csrf import csrf_exempt\n\n\nrazorpay_client = razorpay.Client(\n auth=(settings.RAZOR_KEY_ID, settings.RAZOR_KEY_SECRET)\n)\n\n# Create your views here.\n\n\n@never_cache\ndef payments(request):\n curr_user = request.user\n body = json.loads(request.body)\n print(body)\n iorder = Order.objects.get(\n user=curr_user, is_ordered=False, order_number=body[\"orderID\"]\n )\n print(iorder)\n print(iorder.id)\n\n # save payment informations\n payment = Payment(\n user=curr_user,\n payment_id=body[\"transID\"],\n payment_method=body[\"payment_method\"],\n amount_paid=iorder.order_total,\n status=body[\"status\"],\n )\n payment.save()\n iorder.payment = payment\n iorder.is_ordered = True\n iorder.save()\n\n # move cart items to order product table\n\n if \"quick_buy\" in request.session:\n id = request.session[\"quick_buy\"]\n varient = Variation.objects.get(id=id)\n\n orderproduct = OrderProduct()\n orderproduct.order_id = iorder.id\n orderproduct.payment = payment\n orderproduct.user_id = curr_user.id\n orderproduct.vendor = varient.product.vendor\n orderproduct.products_id = varient.product.id\n orderproduct.variation_id = varient.id\n orderproduct.quantity = 1\n if varient.offer_price():\n off_price = Variation.offer_price(varient)\n price = int(off_price[\"new_price\"])\n orderproduct.price = price\n else:\n orderproduct.price = varient.price\n orderproduct.ordered = True\n orderproduct.save()\n\n # reduce the stock\n\n varient = Variation.objects.get(id=varient.id)\n varient.stock -= 1\n varient.save()\n\n else:\n cart_items = CartItem.objects.filter(user=curr_user)\n\n for item in cart_items:\n orderproduct = OrderProduct()\n orderproduct.order_id = iorder.id\n orderproduct.payment = payment\n orderproduct.user_id = curr_user.id\n orderproduct.vendor = item.varient.product.vendor\n orderproduct.products_id = item.varient.product.id\n orderproduct.variation_id = item.varient.id\n orderproduct.quantity = item.quantity\n if item.varient.offer_price():\n off_price = Variation.offer_price(item.varient)\n price = int(off_price[\"new_price\"])\n orderproduct.price = price\n else:\n orderproduct.price = item.varient.price\n orderproduct.ordered = True\n orderproduct.save()\n\n # reduce the stock\n\n varient = Variation.objects.get(id=item.varient_id)\n print(varient)\n print(varient.stock)\n varient.stock -= item.quantity\n varient.save()\n\n # clear the cart\n CartItem.objects.filter(user=request.user).delete()\n\n # check the coupen code\n # store coupen data\n check = redeem_coupen(request)\n\n # send transaction successfull\n data = {\n \"order_number\": iorder.order_number,\n \"trans_id\": payment.payment_id,\n }\n return JsonResponse(data)\n\n\ndef place_order(request, total=0, quantity=0):\n current_user = request.user\n cart_items = CartItem.objects.filter(user=current_user)\n cart_count = cart_items.count()\n\n if not \"quick_buy\" in request.session:\n if cart_count <= 0:\n return redirect(\"shop\")\n\n grand_total = 0\n tax = 0\n\n if \"quick_buy\" in request.session:\n varient_id = request.session[\"quick_buy\"]\n cart_items = Variation.objects.get(id=varient_id)\n check = True\n if cart_items.offer_price():\n off_price = Variation.offer_price(cart_items)\n total = off_price[\"new_price\"]\n print(total)\n else:\n total = cart_items.price\n quantity = 1\n else:\n check = False\n for cart_item in cart_items:\n if cart_item.varient.offer_price():\n off_price = Variation.offer_price(cart_item.varient)\n total += int(off_price[\"new_price\"] * cart_item.quantity)\n print(total)\n else:\n total += int(cart_item.varient.price * cart_item.quantity)\n quantity += int(cart_item.quantity)\n tax = (2 * total) / 100\n grand_total = total + tax\n if \"total_after_discount\" in request.session:\n grand_total = request.session[\"total_after_discount\"]\n grand_total = int(grand_total)\n discount = None\n if \"discount\" in request.session:\n discount = request.session[\"discount\"]\n coupen_code = None\n if \"coupen_code\" in request.session:\n coupen_code = request.session[\"coupen_code\"]\n request.session[\"total\"] = grand_total\n if request.method == \"POST\":\n form = OrderForm(request.POST)\n if form.is_valid():\n print(\"Valid Order Form\")\n # order creation\n data = Order()\n data.user = current_user\n data.first_name = form.cleaned_data[\"first_name\"]\n data.last_name = form.cleaned_data[\"last_name\"]\n data.phone = form.cleaned_data[\"phone\"]\n data.email = form.cleaned_data[\"email\"]\n data.address1 = form.cleaned_data[\"address1\"]\n data.address2 = form.cleaned_data[\"address2\"]\n data.city = form.cleaned_data[\"city\"]\n data.country = form.cleaned_data[\"country\"]\n data.state = form.cleaned_data[\"state\"]\n data.pincode = form.cleaned_data[\"pincode\"]\n data.order_note = form.cleaned_data[\"order_note\"]\n data.order_total = grand_total\n data.tax = tax\n data.ip = request.META.get(\"REMOTE_ADDR\")\n data.save()\n\n yr = int(datetime.date.today().strftime(\"%Y\"))\n mt = int(datetime.date.today().strftime(\"%m\"))\n dt = int(datetime.date.today().strftime(\"%d\"))\n d = datetime.date(yr, mt, dt)\n current_date = d.strftime(\"%Y%m%d\")\n\n payment_type = request.POST[\"payment\"]\n\n currency = \"INR\"\n amount = grand_total * 100\n request.session[\"razorpay_amount\"] = amount\n # Create a Razorpay Order\n razorpay_order = razorpay_client.order.create(\n dict(amount=amount, currency=currency, payment_capture=\"0\")\n )\n if payment_type == \"razorpay\":\n order_number = razorpay_order[\"id\"]\n data.order_number = razorpay_order[\"id\"]\n data.save()\n else:\n order_number = current_date + str(data.id)\n data.order_number = order_number\n request.session[\"order_number\"] = order_number\n data.save()\n\n # order id of newly created order.\n razorpay_order_id = razorpay_order[\"id\"]\n callback_url = \"paymenthandler/\"\n # saving the address\n\n order = Order.objects.get(\n user=current_user, is_ordered=False, order_number=order_number\n )\n context = {\n \"order\": order,\n \"cart_items\": cart_items,\n \"total\": total,\n \"tax\": tax,\n \"grand_total\": grand_total,\n \"razorpay_order_id\": razorpay_order_id,\n \"razorpay_merchant_key\": settings.RAZOR_KEY_ID,\n \"razorpay_amount\": amount,\n \"currency\": currency,\n \"callback_url\": callback_url,\n \"discount\": discount,\n \"coupen_code\": coupen_code,\n \"payment_type\": payment_type,\n \"check\": check,\n }\n return render(request, \"payment.html\", context)\n else:\n return redirect(\"checkout\")\n\n\n@csrf_exempt\n@never_cache\ndef paymenthandler(request):\n # only accept POST request.\n if request.method == \"POST\":\n try:\n # get the required parameters from post request.\n payment_id = request.POST.get(\"razorpay_payment_id\", \"\")\n razorpay_order_id = request.POST.get(\"razorpay_order_id\", \"\")\n signature = request.POST.get(\"razorpay_signature\", \"\")\n params_dict = {\n \"razorpay_order_id\": razorpay_order_id,\n \"razorpay_payment_id\": payment_id,\n \"razorpay_signature\": signature,\n }\n\n result = razorpay_client.utility.verify_payment_signature(\n params_dict\n )\n if result is None:\n amount = request.session[\"razorpay_amount\"]\n try:\n razorpay_client.payment.capture(payment_id, amount)\n messages.success(request, \"Payment Successfull\")\n ################\n curr_user = request.user\n iorder = Order.objects.get(\n user=curr_user,\n is_ordered=False,\n order_number=razorpay_order_id,\n )\n print(iorder)\n print(iorder.id)\n\n # save payment informations\n payment = Payment(\n user=curr_user,\n payment_id=payment_id,\n payment_method=\"RazorPay\",\n amount_paid=iorder.order_total,\n status=\"Paid\",\n )\n payment.save()\n iorder.payment = payment\n iorder.is_ordered = True\n iorder.save()\n\n # move cart items to order product table\n if \"quick_buy\" in request.session:\n id = request.session[\"quick_buy\"]\n varient = Variation.objects.get(id=id)\n\n orderproduct = OrderProduct()\n orderproduct.order_id = iorder.id\n orderproduct.payment = payment\n orderproduct.user_id = curr_user.id\n orderproduct.vendor = varient.product.vendor\n orderproduct.products_id = varient.product.id\n orderproduct.variation_id = varient.id\n orderproduct.quantity = 1\n if varient.offer_price():\n off_price = Variation.offer_price(varient)\n price = int(off_price[\"new_price\"])\n orderproduct.price = price\n else:\n orderproduct.price = varient.price\n orderproduct.ordered = True\n orderproduct.save()\n\n # reduce the stock\n\n varient = Variation.objects.get(id=varient.id)\n varient.stock -= 1\n varient.save()\n\n else:\n\n cart_items = CartItem.objects.filter(user=curr_user)\n\n for item in cart_items:\n orderproduct = OrderProduct()\n orderproduct.order_id = iorder.id\n orderproduct.payment = payment\n orderproduct.user_id = curr_user.id\n orderproduct.vendor = item.varient.product.vendor\n orderproduct.products_id = item.varient.product.id\n orderproduct.variation_id = item.varient.id\n orderproduct.quantity = item.quantity\n if item.varient.offer_price():\n off_price = Variation.offer_price(item.varient)\n price = int(off_price[\"new_price\"])\n orderproduct.price = price\n else:\n orderproduct.price = item.varient.price\n orderproduct.ordered = True\n orderproduct.save()\n\n # reduce the stock\n\n varient = Variation.objects.get(id=item.varient_id)\n print(varient)\n print(varient.stock)\n varient.stock -= item.quantity\n varient.save()\n\n # clear the cart\n CartItem.objects.filter(user=request.user).delete()\n # send transaction successfull\n data = {\n \"order_number\": iorder.order_number,\n \"trans_id\": payment.payment_id,\n }\n param = (\n \"order_number=\"\n + iorder.order_number\n + \"&payment_id=\"\n + payment.payment_id\n )\n ################\n # capture the payemt\n messages.success(request, \"Payment Success\")\n print(\"hello\")\n\n # save coupen offer\n # store coupen data\n\n check = redeem_coupen(request)\n print(check)\n\n redirect_url = reverse(\"order_complete\")\n return redirect(f\"{redirect_url}?{param}\")\n # render success page on successful caputre of payment\n except Exception as e:\n print(e)\n messages.error(request, \"Payment Failed\")\n # if there is an error while capturing payment.\n return redirect(\"checkout\")\n else:\n\n return redirect(\"checkout\")\n # if signature verification fails.\n\n except:\n return redirect(\"checkout\")\n # if we don't find the required parameters in POST data\n else:\n # if other than POST request is made.\n return redirect(\"checkout\")\n\n\n@never_cache\ndef cash_on_delivery(request):\n curr_user = request.user\n order_number = request.session[\"order_number\"]\n iorder = Order.objects.get(\n user=curr_user, is_ordered=False, order_number=order_number\n )\n print(iorder)\n # save payment informations\n payment = Payment(\n user=curr_user,\n payment_id=order_number,\n payment_method=\"Cash on Delivery\",\n amount_paid=iorder.order_total,\n status=\"Paid\",\n )\n payment.save()\n iorder.payment = payment\n iorder.is_ordered = True\n iorder.save()\n\n # move cart items to order product table\n\n if \"quick_buy\" in request.session:\n id = request.session[\"quick_buy\"]\n varient = Variation.objects.get(id=id)\n\n orderproduct = OrderProduct()\n orderproduct.order_id = iorder.id\n orderproduct.payment = payment\n orderproduct.user_id = curr_user.id\n orderproduct.vendor = varient.product.vendor\n orderproduct.products_id = varient.product.id\n orderproduct.variation_id = varient.id\n orderproduct.quantity = 1\n if varient.offer_price():\n off_price = Variation.offer_price(varient)\n price = int(off_price[\"new_price\"])\n orderproduct.price = price\n else:\n orderproduct.price = varient.price\n orderproduct.ordered = True\n orderproduct.save()\n\n # reduce the stock\n\n varient = Variation.objects.get(id=varient.id)\n varient.stock -= 1\n varient.save()\n\n else:\n cart_items = CartItem.objects.filter(user=curr_user)\n\n for item in cart_items:\n orderproduct = OrderProduct()\n orderproduct.order_id = iorder.id\n orderproduct.payment = payment\n orderproduct.user_id = curr_user.id\n orderproduct.vendor = item.varient.product.vendor\n orderproduct.products_id = item.varient.product.id\n orderproduct.variation_id = item.varient.id\n orderproduct.quantity = item.quantity\n if item.varient.offer_price():\n off_price = Variation.offer_price(item.varient)\n price = int(off_price[\"new_price\"])\n orderproduct.price = price\n else:\n orderproduct.price = item.varient.price\n orderproduct.ordered = True\n orderproduct.save()\n\n # reduce the stock\n\n varient = Variation.objects.get(id=item.varient_id)\n print(varient)\n print(varient.stock)\n varient.stock -= item.quantity\n varient.save()\n\n # clear the cart\n CartItem.objects.filter(user=request.user).delete()\n\n # store coupen data\n\n check = redeem_coupen(request)\n print(check)\n\n # send transaction successfull\n\n data = {\n \"order_number\": iorder.order_number,\n \"trans_id\": payment.payment_id,\n }\n param = (\n \"order_number=\"\n + iorder.order_number\n + \"&payment_id=\"\n + payment.payment_id\n )\n\n messages.success(request, \"Payment Success\")\n if \"order_number\" in request.session:\n del request.session[\"order_number\"]\n\n redirect_url = reverse(\"order_complete\")\n return redirect(f\"{redirect_url}?{param}\")\n\n\ndef redeem_coupen(request):\n\n if \"discount\" in request.session:\n coupen_code = request.session[\"coupen_code\"]\n curr_user = request.user\n coupen = Coupen.objects.get(coupen_code=coupen_code)\n coupen.coupen_count -= 1\n coupen.save()\n redeem = RedeemedCoupen()\n redeem.user = curr_user\n redeem.coupen = coupen\n redeem.save()\n\n del request.session[\"discount\"]\n del request.session[\"total_after_discount\"]\n del request.session[\"coupen_code\"]\n return True\n else:\n return False\n\n\n@never_cache\ndef order_complete(request):\n order_number = request.GET.get(\"order_number\")\n trans_id = request.GET.get(\"payment_id\")\n try:\n order = Order.objects.get(order_number=order_number, is_ordered=True)\n order_products = OrderProduct.objects.filter(order_id=order.id)\n sub_total = 0\n\n for i in order_products:\n sub_total += i.price * i.quantity\n\n context = {\n \"order\": order,\n \"sub_total\": sub_total,\n \"order_products\": order_products,\n }\n return render(request, \"order-complete.html\", context)\n except (ObjectDoesNotExist):\n return redirect(\"home\")\n","repo_name":"faizee-f/gadjeto","sub_path":"order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":19347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"69914615827","text":"import getpass\nimport inspect\nimport json\n\nimport kubernetes\nimport yaml\n\nRESOURCE_PLURAL = 'pipelines'\nVERSION = 'v1alpha1'\nGROUP = 'dataflow.argoproj.io'\n\nDEFAULT_RUNTIME = 'python3-9'\nGROUPS_VOLUME_NAME = 'groups'\nUSER = getpass.getuser()\n\n\ndef str_presenter(dumper, data):\n if '\\n' in data or '\"' in data or \"'\" in data:\n return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')\n return dumper.represent_scalar('tag:yaml.org,2002:str', data)\n\n\nyaml.add_representer(str, str_presenter)\n\n\nclass PipelineBuilder:\n def __init__(self, name):\n self._name = name\n self._resourceVersion = None\n self._namespace = None\n self._annotations = {}\n self._steps = []\n self.owner(USER)\n\n def annotate(self, name, value):\n self._annotations[name] = value\n return self\n\n def owner(self, value):\n return self.annotate('dataflow.argoproj.io/owner', value)\n\n def describe(self, value):\n return self.annotate('dataflow.argoproj.io/description', value)\n\n def namespace(self, namespace):\n self._namespace = namespace\n return self\n\n def step(self, step):\n self._steps.append(step)\n return self\n\n def dump(self):\n m = {\n 'name': self._name,\n 'annotations': self._annotations\n }\n if self._namespace:\n m['namespace'] = self._namespace\n if self._resourceVersion:\n m['resourceVersion'] = self._resourceVersion\n return {\n 'apiVersion': 'dataflow.argoproj.io/v1alpha1',\n 'kind': 'Pipeline',\n 'metadata': m,\n 'spec': {\n 'steps': [x.dump() for x in self._steps]\n }\n }\n\n def yaml(self):\n return yaml.dump(self.dump())\n\n def json(self):\n return json.dumps(self.dump())\n\n def save(self):\n with open(self._name + '-pipeline.yaml', \"w\") as f:\n f.write(self.yaml())\n\n def start(self):\n # https://github.com/kubernetes-client/python\n kubernetes.config.load_kube_config()\n\n # https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/CustomObjectsApi.md\n api = kubernetes.client.CustomObjectsApi()\n try:\n x = api.get_namespaced_custom_object(\n GROUP, VERSION, self._namespace, RESOURCE_PLURAL, self._name)\n self._resourceVersion = x['metadata']['resourceVersion']\n api.replace_namespaced_custom_object(GROUP, VERSION, self._namespace, RESOURCE_PLURAL, self._name,\n self.dump())\n print('updated pipeline ' + self._namespace + '/' + self._name)\n except kubernetes.client.rest.ApiException as e:\n if e.status == 404:\n pass\n api.create_namespaced_custom_object(\n GROUP, VERSION, self._namespace, RESOURCE_PLURAL, self.dump())\n print('created pipeline ' + self._namespace + '/' + self._name)\n return self\n\n def watch(self):\n api = kubernetes.client.CustomObjectsApi()\n\n for event in kubernetes.watch.Watch().stream(api.list_namespaced_custom_object, GROUP, VERSION, self._namespace,\n RESOURCE_PLURAL,\n field_selector='metadata.name=' + self._name, watch=True):\n status = (event['object'].get('status') or {})\n phase = status.get('phase') or 'Unknown'\n message = status.get('message') or ''\n print(phase + ': ' + message)\n\n def run(self):\n self.start()\n self.watch()\n\n\ndef pipeline(name):\n return PipelineBuilder(name)\n\n\nclass Sink:\n def __init__(self, name=None):\n self._name = name\n\n def dump(self):\n x = {}\n if self._name:\n x['name'] = self._name\n return x\n\n\nclass LogSink(Sink):\n def __init__(self, name=None):\n super().__init__(name)\n\n def dump(self):\n x = super().dump()\n x['log'] = {}\n return x\n\n\nclass HTTPSink(Sink):\n def __init__(self, url, name=None, insecureSkipVerify=None, headers=None):\n super().__init__(name)\n self._insecureSkipVerify = insecureSkipVerify\n self._url = url\n self._headers = headers\n\n def dump(self):\n x = super().dump()\n h = {'url': self._url}\n if self._headers:\n h['headers'] = self._headers\n if self._insecureSkipVerify:\n h['insecureSkipVerify'] = self._insecureSkipVerify\n x['http'] = h\n return x\n\n\nclass KafkaSink(Sink):\n def __init__(self, subject, name=None, a_sync=False, batchSize=None, linger=None, compressionType=None, acks=None,\n enableIdempotence=None, messageTimeout=None, maxInflight=None):\n super().__init__(name)\n self._subject = subject\n self._a_sync = a_sync\n self._batchSize = batchSize\n self._linger = linger\n self._compressionType = compressionType\n self._acks = acks\n self._enableIdempotence = enableIdempotence\n self._messageTimeout = messageTimeout\n self._maxInflight = maxInflight\n\n def dump(self):\n x = super().dump()\n y = {'topic': self._subject}\n if self._a_sync:\n y['async'] = True\n if self._batchSize:\n y['batchSize'] = self._batchSize\n if self._linger:\n y['linger'] = self._linger\n if self._compressionType:\n y['compressionType'] = self._compressionType\n if self._acks:\n y['acks'] = self._acks\n if self._enableIdempotence:\n y['enableIdempotence'] = self._enableIdempotence\n if self._messageTimeout:\n y['messageTimeout'] = self._messageTimeout\n if self._maxInflight:\n y['maxInflight'] = self._maxInflight\n x['kafka'] = y\n return x\n\n\nclass STANSink(Sink):\n def __init__(self, topic, name=None):\n super().__init__(name)\n self._topic = topic\n\n def dump(self):\n x = super().dump()\n x['stan'] = {'subject': self._topic}\n return x\n\n\nclass JetStreamSink(Sink):\n def __init__(self, subject, name=None):\n super().__init__(name=name)\n self._subject = subject\n\n def dump(self):\n x = super().dump()\n x['jetstream'] = {'subject': self._subject}\n return x\n\n\nclass Step:\n def __init__(self, name, sources=None, sinks=None, volumes=None, terminator=False, sidecarResource=None):\n self._name = name or 'main'\n self._sources = sources or []\n self._sinks = sinks or []\n self._scale = None\n self._volumes = volumes or []\n self._terminator = terminator\n self._annotations = []\n self._sidecarResources = sidecarResource\n\n def log(self, name=None):\n self._sinks.append(LogSink(name=name))\n return self\n\n def http(self, url, name=None, insecureSkipVerify=None, headers=None):\n self._sinks.append(HTTPSink(\n url, name=name, insecureSkipVerify=insecureSkipVerify, headers=headers))\n return self\n\n def kafka(self, subject, name=None, a_sync=False, batchSize=None, linger=None, compressionType=None, acks=None,\n enableIdempotence=None, messageTimeout=None, maxInflight=None):\n self._sinks.append(KafkaSink(subject, name=name, a_sync=a_sync, batchSize=batchSize, linger=linger,\n compressionType=compressionType, acks=acks, enableIdempotence=enableIdempotence, messageTimeout=messageTimeout, maxInflight=maxInflight))\n return self\n\n def scale(self, desiredReplicas, scalingDelay=None, peekDelay=None):\n self._scale = {\n 'desiredReplicas': desiredReplicas,\n }\n if peekDelay:\n self._scale['peekDelay'] = peekDelay\n if scalingDelay:\n self._scale['scalingDelay'] = scalingDelay\n return self\n\n def stan(self, topic, name=None):\n self._sinks.append(STANSink(topic, name=name))\n return self\n\n def jetstream(self, subject, name=None):\n self._sinks.append(JetStreamSink(subject, name=name))\n return self\n\n def terminator(self):\n self._terminator = True\n return self\n\n def annotations(self, annotations):\n self._annotations = annotations\n return self\n\n def sidecarResources(self, sidecarResources):\n self._sidecarResources = sidecarResources\n return self\n\n def dump(self):\n y = {\n 'name': self._name,\n }\n if len(self._sources):\n y['sources'] = [x.dump() for x in self._sources]\n if len(self._sinks):\n y['sinks'] = [x.dump() for x in self._sinks]\n if self._scale:\n y['scale'] = self._scale\n if len(self._volumes) > 0:\n y['volumes'] = self._volumes\n if self._terminator:\n y['terminator'] = True\n if self._annotations:\n # TODO - labels too please\n y['metadata'] = {\n 'annotations': self._annotations\n }\n if self._sidecarResources:\n y['sidecar'] = {\n 'resources': self._sidecarResources\n }\n return y\n\n\nclass CatStep(Step):\n def __init__(self, name=None, sources=None, sinks=None):\n super().__init__(name=name, sources=sources, sinks=sinks)\n\n def dump(self):\n x = super().dump()\n x['cat'] = {}\n return x\n\n\nclass ContainerStep(Step):\n def __init__(self, name=None, image=None, args=None, fifo=False, volumes=None, volumeMounts=None, sources=None,\n sinks=None,\n env=None, resources=None,\n terminator=False):\n super().__init__(name, sources=sources, sinks=sinks,\n volumes=volumes, terminator=terminator)\n assert image\n self._image = image\n self._args = args or []\n self._fifo = fifo\n self._volumeMounts = volumeMounts or []\n self._env = env\n self._resources = resources\n\n def dump(self):\n x = super().dump()\n c = {\n 'image': self._image,\n }\n if len(self._args) > 0:\n c['args'] = self._args\n if self._fifo:\n c['in'] = {'fifo': True}\n if len(self._volumeMounts) > 0:\n c['volumeMounts'] = self._volumeMounts\n if self._env:\n c['env'] = [{'name': x, 'value': self._env[x]}\n for k, x in enumerate(self._env)]\n if self._resources:\n c['resources'] = self._resources\n x['container'] = c\n return x\n\n\nclass DedupeStep(Step):\n def __init__(self, name=None, sources=None, sinks=None):\n super().__init__(name, sources=sources, sinks=sinks)\n\n def dump(self):\n x = super().dump()\n x['dedupe'] = {}\n return x\n\n\nclass ExpandStep(Step):\n def __init__(self, name=None, sources=None, sinks=None):\n super().__init__(name, sources=sources, sinks=sinks)\n\n def dump(self):\n x = super().dump()\n x['expand'] = {}\n return x\n\n\nclass FilterStep(Step):\n def __init__(self, name=None, expression=None, sources=None, sinks=None):\n super().__init__(name, sources=sources, sinks=sinks)\n assert expression\n self._expression = expression\n\n def dump(self):\n x = super().dump()\n x['filter'] = {\n 'expression': self._expression\n }\n return x\n\n\nclass GitStep(Step):\n def __init__(self, name=None, url=None, branch=None, path=None, image=None, sources=None, sinks=None, env=None,\n terminator=False,\n command=None):\n super().__init__(name, sources=sources, sinks=sinks, terminator=terminator)\n assert url\n assert image\n self._url = url\n self._branch = branch or 'main'\n self._path = path or '.'\n self._image = image\n self._env = env\n self._command = command\n\n def dump(self):\n x = super().dump()\n y = {\n 'url': self._url,\n 'branch': self._branch,\n 'path': self._path,\n 'image': self._image\n }\n if self._command:\n y['command'] = self._command\n if self._env:\n y['env'] = self._env\n x['git'] = y\n return x\n\n\ndef storageVolumes(storage=None):\n if storage:\n storage['name'] = GROUPS_VOLUME_NAME\n return [storage]\n return []\n\n\nclass GroupStep(Step):\n def __init__(self, name=None, key=None, format=None, endOfGroup=None, storage=None, sources=None, sinks=None):\n super().__init__(name, sources=sources, sinks=sinks, volumes=storageVolumes(storage))\n assert key\n assert format\n assert endOfGroup\n self._key = key\n self._format = format\n self._endOfGroup = endOfGroup\n self._storage = storage\n\n def dump(self):\n x = super().dump()\n y = {\n 'key': self._key,\n 'format': self._format,\n 'endOfGroup': self._endOfGroup,\n }\n if self._storage:\n y['storage'] = {\n 'name': GROUPS_VOLUME_NAME\n }\n x['group'] = y\n return x\n\n\nclass FlattenStep(Step):\n def __init__(self, name=None, sources=None, sinks=None):\n super().__init__(name, sources=sources, sinks=sinks)\n\n def dump(self):\n x = super().dump()\n x['flatten'] = {}\n return x\n\n\nclass CodeStep(Step):\n def __init__(self, name=None, source=None, code=None, runtime=None, image=None, sources=None, sinks=None, terminator=False):\n super().__init__(name, sources=sources, sinks=sinks, terminator=terminator)\n if source:\n self._source = inspect.getsource(source).replace('def ' + source.__name__ + str(inspect.signature(source)),\n 'def handler' + str(inspect.signature(source)))\n else:\n self._source = code\n if runtime:\n self._runtime = runtime\n else:\n self._runtime = DEFAULT_RUNTIME\n self._image = image\n\n def dump(self):\n x = super().dump()\n y = {\n 'source': self._source,\n }\n if self._runtime:\n y['runtime'] = self._runtime\n if self._image:\n y['image'] = self._image\n x['code'] = y\n return x\n\n\nclass MapStep(Step):\n def __init__(self, name=None, expression=None, sources=None, sinks=None):\n super().__init__(name, sources=sources, sinks=sinks)\n assert expression\n self._expression = expression\n\n def dump(self):\n x = super().dump()\n x['map'] = {\n 'expression': self._expression\n }\n return x\n\n\nclass Source:\n def __init__(self, name=None, retry=None):\n self._name = name\n self._retry = retry\n\n def dump(self):\n x = {}\n if self._name:\n x['name'] = self._name\n if self._retry:\n x['retry'] = self._retry\n return x\n\n def cat(self, name=None):\n return CatStep(name, sources=[self])\n\n def container(self, name=None, image=None, args=None, fifo=False, volumes=None, volumeMounts=None, env=None,\n resources=None,\n terminator=False):\n return ContainerStep(name, sources=[self], image=image, args=args, fifo=fifo, volumes=volumes,\n volumeMounts=volumeMounts, env=env, resources=resources, terminator=terminator)\n\n def dedupe(self, name=None):\n return DedupeStep(name, sources=[self])\n\n def expand(self, name=None):\n return ExpandStep(name, sources=[self])\n\n def filter(self, name=None, expression=None):\n return FilterStep(name, expression, sources=[self])\n\n def git(self, name=None, url=None, branch=None, path=None, image=None, env=None, command=None):\n return GitStep(name, url, branch, path, image, sources=[self], env=env, command=command)\n\n def group(self, name=None, key=None, format=None, endOfGroup=None, storage=None):\n return GroupStep(name, key, format, endOfGroup, storage, sources=[self])\n\n def flatten(self, name=None):\n return FlattenStep(name, sources=[self])\n\n def code(self, name=None, source=None, code=None, runtime=None, image=None):\n return CodeStep(name, source=source, code=code, runtime=runtime, image=image, sources=[self])\n\n def map(self, name=None, expression=None):\n return MapStep(name, expression, sources=[self])\n\n\ndef cat(name=None):\n return CatStep(name)\n\n\ndef container(name=None, image=None, args=None, fifo=False, volumes=None, volumeMounts=None, env=None, resources=None,\n terminator=False):\n return ContainerStep(name, terminator=terminator, image=image, args=args, fifo=fifo, volumes=volumes,\n volumeMounts=volumeMounts, env=env, resources=resources)\n\n\ndef dedupe(name=None):\n return DedupeStep(name)\n\n\ndef expand(name=None):\n return ExpandStep(name)\n\n\ndef filter(name=None, filter=None):\n return FilterStep(name, filter)\n\n\ndef git(name=None, url=None, branch=None, path=None, image=None, env=None, command=None):\n return GitStep(name, url, branch, path, image, env=env, command=command)\n\n\ndef group(name=None, key=None, format=None, endOfGroup=None, storage=None):\n return GroupStep(name, key, format, endOfGroup, storage)\n\n\ndef flatten(name=None):\n return FlattenStep(name)\n\n\ndef handler(name=None, handler=None, code=None, runtime=None):\n return CodeStep(name, handler, code, runtime)\n\n\ndef map(name=None, map=None):\n return MapStep(name, map)\n\n\nclass CronSource(Source):\n def __init__(self, schedule=None, layout=None, name=None, retry=None):\n super().__init__(name=name, retry=retry)\n assert schedule\n self._schedule = schedule\n self._layout = layout\n\n def dump(self):\n x = super().dump()\n y = {'schedule': self._schedule}\n if self._layout:\n y['layout'] = self._layout\n x['cron'] = y\n return x\n\n\nclass HTTPSource(Source):\n def __init__(self, name=None, retry=None, serviceName=None):\n super().__init__(name=name, retry=retry)\n self._serviceName = serviceName\n\n def dump(self):\n x = super().dump()\n h = {}\n if self._serviceName:\n h['serviceName'] = self._serviceName\n x['http'] = h\n return x\n\n\nclass KafkaSource(Source):\n def __init__(self, topic, name=None, retry=None, startOffset=None, fetchMin=None, fetchWaitMax=None, groupId=None):\n super().__init__(name=name, retry=retry)\n assert topic\n self._topic = topic\n self._startOffset = startOffset\n self._fetchMin = fetchMin\n self._fetchWaitMax = fetchWaitMax\n self._groupId = groupId\n\n def dump(self):\n x = super().dump()\n y = {'topic': self._topic}\n if self._startOffset:\n y[\"startOffset\"] = self._startOffset\n if self._fetchMin:\n y[\"fetchMin\"] = self._fetchMin\n if self._fetchWaitMax:\n y[\"fetchWaitMax\"] = self._fetchWaitMax\n if self._groupId:\n y[\"groupId\"] = self._groupId\n x['kafka'] = y\n return x\n\n\nclass STANSource(Source):\n def __init__(self, subject, name=None, retry=None):\n super().__init__(name=name, retry=retry)\n assert subject\n self._subject = subject\n\n def dump(self):\n x = super().dump()\n y = {'subject': self._subject}\n x['stan'] = y\n return x\n\n\nclass JetStreamSource(Source):\n def __init__(self, subject, name=None, retry=None):\n super().__init__(name=name, retry=retry)\n assert subject\n self._subject = subject\n\n def dump(self):\n x = super().dump()\n y = {'subject': self._subject}\n x['jetstream'] = y\n return x\n\n\ndef cron(schedule=None, layout=None, name=None, retry=None):\n return CronSource(schedule, layout=layout, name=name, retry=retry)\n\n\ndef http(name=None, retry=None, serviceName=None):\n return HTTPSource(name=name, serviceName=serviceName, retry=retry)\n\n\ndef kafka(topic=None, name=None, retry=None, startOffset=None, fetchMin=None, fetchWaitMax=None, groupId=None):\n return KafkaSource(topic, name=name, retry=retry, startOffset=startOffset, fetchMin=fetchMin,\n fetchWaitMax=fetchWaitMax, groupId=groupId)\n\n\ndef stan(subject=None, name=None, retry=None):\n return STANSource(subject, name=name, retry=retry)\n\n\ndef jetstream(subject=None, name=None, retry=None):\n return JetStreamSource(subject, name, retry=retry)\n","repo_name":"argoproj-labs/old-argo-dataflow","sub_path":"dsls/python/argo_dataflow/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":20791,"program_lang":"python","lang":"en","doc_type":"code","stars":270,"dataset":"github-code","pt":"48"} +{"seq_id":"30824947962","text":"# Exercise 5: Display numbers from a list using loop\n# Conditions:\n# The number must be divisible by five\n# If the number is greater than 150, then skip it and move to the next number\n# If the number is greater than 500, then stop the loop\nnumbers = [12, 75, 150, 180, 145, 525, 50]\n\nfor number in numbers:\n if number > 500:\n break\n if number > 150:\n continue\n if number % 5 == 0 and number <= 150:\n print(number)\n else:\n continue\n\n","repo_name":"jahidulij/pythonExercises","sub_path":"Loops/e5_list_display.py","file_name":"e5_list_display.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20123673205","text":"from random import randint, random, seed\nfrom random import choice as random_choice\nfrom statistics import mean\n\nimport numpy as np\nfrom DummyNetwork import DummyNetwork\nfrom evaluator_new import Evaluator\nimport json\nfrom numpy.random import seed\n\nseed(42)\n\n\nclass GeneticAlgorithm:\n def __init__(self, history_file='history.txt'):\n self.history_file = history_file\n self.mutate = 0.2\n\n def start(self, target, population_count, ga_epochs, load_population=False, ):\n if load_population:\n curent_population = self.load_population(population_count)\n else:\n curent_population = self.spawn_population(population_count)\n for epoch_counter in range(ga_epochs):\n curent_population = self.evolve(curent_population, target, epoch_counter=epoch_counter, mutate=self.mutate)\n\n def evolve(self, pop, target, retain=0.2, random_select=0.05, mutate=0.2, epoch_counter=0):\n # train each idividual in population if needed\n\n # grade each individual from population and sort them\n\n unit_vs_grade = [(self.idividual_grade(x, target), x) for x in pop]\n grades = [x[0] for x in unit_vs_grade]\n if len(set(grades)) < 3:\n self.mutate = 0.5\n\n self.add_to_history(pop, grades, target, epoch_counter)\n sorted_unit_vs_grade = sorted(unit_vs_grade, key=lambda y: y[0])\n graded = [x[1] for x in sorted_unit_vs_grade]\n print(f\"Best: {[sorted_unit_vs_grade[idx][0] for idx in range(10)]}\")\n # choose parents\n retain_length = int(len(graded) * retain)\n parents = graded[:retain_length]\n\n # add non-best idividuals for diversity\n for individual in graded[retain_length:]:\n if random_select > random():\n parents.append(individual)\n\n # mutations\n to_mutate = []\n for individual_idx in range(1, len(parents)):\n if mutate > random():\n # change random element of the idividual\n to_mutate.append(individual_idx)\n\n self.mutate_selected(to_mutate, parents)\n\n print(\"All mutations finished!\")\n\n # breed parents\n parents_length = len(parents)\n desired_length = len(pop) - parents_length\n children = []\n while len(children) < desired_length:\n male = randint(0, parents_length - 1)\n female = randint(male, parents_length - 1)\n if male != female:\n male = parents[male]\n female = parents[female]\n child = self.breed_parents(male, female)\n children.append(child)\n\n parents.extend(children)\n return parents\n\n @staticmethod\n def mutate_selected(to_mutate, parents):\n print(\"Mutation!\")\n for idx in to_mutate:\n parents[idx].init_layers()\n\n def spawn_population(self, count):\n # Create a number of inviduals\n # return: a list of individuals\n pop = [DummyNetwork() for _ in range(count)]\n for ind in pop:\n ind.init_layers()\n print(pop)\n return pop\n\n def load_population_params(self):\n with open('last_population.txt', 'r') as last_population_file:\n population_params = json.load(last_population_file)\n\n population_params = [json.loads(param) for param in population_params]\n return population_params\n\n def load_population(self, population_count):\n parameters = [\n {'W1': [np.array([-0.08075051, -0.00991325, -0.13223971, -0.07179357, 0.03552764]),\n np.array([0.04964046, 0.07877713, -0.03497126, 0.00275545, -0.05813531]),\n np.array([-0.04141991, 0.08749575, 0.10646686, -0.0386272, -0.00092605]),\n np.array([0.05735792, -0.05083285, -0.01187205, -0.00992187, -0.04029669])],\n 'b1': [np.array([-0.02778843]), np.array([0.05890303]), np.array([0.0116377]), np.array([-0.01171745])],\n 'W2': [np.array([-0.00152704, 0.09348534, 0.03575567, 0.02454027]),\n np.array([-0.04502816, -0.01994074, 0.03037399, 0.0899285]),\n np.array([0.02514854, 0.05683151, 0.03631307, 0.00758105])],\n 'b2': [np.array([-0.03327081]), np.array([0.15642536]), np.array([-0.04355327])]}\n,\n {'W1': [np.array([-0.08075051, -0.00991325, -0.13223971, -0.07179357, 0.03552764]),\n np.array([0.04964046, 0.07877713, -0.03497126, 0.00275545, -0.05813531]),\n np.array([-0.04141991, 0.08749575, 0.10646686, -0.0386272, -0.00092605]),\n np.array([0.05735792, -0.05083285, -0.01187205, -0.00992187, -0.04029669])],\n 'b1': [np.array([-0.02778843]), np.array([0.05890303]), np.array([0.0116377]), np.array([-0.01171745])],\n 'W2': [np.array([-0.00152704, 0.09348534, 0.03575567, 0.02454027]),\n np.array([-0.04502816, -0.01994074, 0.03037399, 0.0899285]),\n np.array([0.02514854, 0.05683151, 0.03631307, 0.00758105])],\n 'b2': [np.array([-0.03327081]), np.array([0.15642536]), np.array([-0.04355327])]}\n,\n {'W1': [np.array([-0.08075051, -0.00991325, -0.13223971, -0.07179357, 0.03552764]),\n np.array([0.04964046, 0.07877713, -0.03497126, 0.00275545, -0.05813531]),\n np.array([-0.04141991, 0.08749575, 0.10646686, -0.0386272, -0.00092605]),\n np.array([0.05735792, -0.05083285, -0.01187205, -0.00992187, -0.04029669])],\n 'b1': [np.array([-0.02778843]), np.array([0.05890303]), np.array([0.0116377]), np.array([-0.01171745])],\n 'W2': [np.array([-0.00152704, 0.09348534, 0.03575567, 0.02454027]),\n np.array([-0.04502816, -0.01994074, 0.03037399, 0.0899285]),\n np.array([0.02514854, 0.05683151, 0.03631307, 0.00758105])],\n 'b2': [np.array([-0.03327081]), np.array([0.15642536]), np.array([-0.04355327])]}\n ]\n # population_params = self.load_population_params()\n population = [DummyNetwork(params=dict(param)) for param in parameters]\n if len(population) < population_count:\n for i in range(population_count - len(population)):\n d = DummyNetwork()\n d.init_layers()\n population.append(d)\n return population\n\n def save_population(self, population):\n # pop_to_string = [NU.toJSON() for NU in population]\n #\n # with open('last_population.txt', 'w+') as last_population_file:\n # last_population_file.write(json.dumps(pop_to_string))\n pass\n\n def add_to_history(self, population, grades, target, epoch):\n print(f\"Adding epoch {epoch} to history!\\n\")\n self.save_population(population)\n result_string = f'\\nEPOCH {epoch}\\n'\n for idx, individual in enumerate(population):\n result_string += \"Model: \\n\" + str(individual) + \"\\n\"\n result_string += f\"Grade: {grades[idx]}\\n\"\n\n with open(self.history_file, 'a+') as history_file:\n history_file.write(result_string)\n\n @staticmethod\n def idividual_grade(individual, target):\n # Determine the fitness of an individual. Lower is better.\n evaluator = Evaluator(individual)\n left = evaluator.evaluate_agent('left')\n evaluator = Evaluator(individual)\n right = evaluator.evaluate_agent('right')\n return target - left - right\n\n @staticmethod\n def breed_parents(male, female):\n m_params = male.params_values\n f_params = female.params_values\n l_rate = 0.01\n new_params = {}\n\n for key in m_params:\n new_params[key] = []\n for i in range(len(m_params[key])):\n row = []\n for k in range(len(m_params[key][i])):\n row.append((m_params[key][i][k] + f_params[key][i][k]) / 2)\n new_params[key].append(np.array(row))\n\n child = DummyNetwork(new_params)\n\n return child\n\nseed(42)\nga = GeneticAlgorithm()\nga.start(20000, 20, 100, load_population=True)\nprint(\"Finished!\")\n","repo_name":"szymonczaplak/self-driving-car-project","sub_path":"geneticAlgorithmLearning/GAStrucureLearning.py","file_name":"GAStrucureLearning.py","file_ext":"py","file_size_in_byte":8155,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"73285392467","text":"#Group Member:\n#Meng Sun ID:z5149213\n#Zechao Li ID:z5172016\n\nimport zipfile\nimport numpy as np\nfrom scipy.sparse import lil_matrix\nfrom sklearn.decomposition import TruncatedSVD\nfrom scipy.sparse import csr_matrix\nimport random\nimport gpflow\nfrom gpflow.test_util import notebook_niter\nfrom sklearn.linear_model import LogisticRegression\n\n\ndef read_train_zip(filename):\n zip_f = zipfile.ZipFile(filename)\n file_name_list = zip_f.namelist()[1:]\n x_file = []\n sub_x = []\n y_i = 0\n data_y = None\n all_x = []\n\n for file in file_name_list:\n if file[-1] == 'x':\n x_file.append(file)\n\n for xfile in x_file:\n temp_x_dict = dict()\n file_x_c = zip_f.read(xfile).decode('utf-8').split(\"\\n\")[:-1]\n file_y_c = [int(e) for e in zip_f.read(xfile[:-2] + \".y\").decode('utf-8').split(\"\\n\")[:-1]]\n\n for e in file_x_c:\n l = e.split(' ')\n if int(l[0]) not in temp_x_dict:\n temp_x_dict[int(l[0])] = [int(l[1])]\n else:\n temp_x_dict[int(l[0])].append(int(l[1]))\n all_x.append(temp_x_dict)\n for e in temp_x_dict:\n sub_x.append(e)\n\n sub_y_lable = np.zeros(shape=[len(file_y_c), 1])\n for i in range(len(file_y_c)):\n sub_y_lable[i][0] = file_y_c[i]\n if y_i == 0:\n data_y = sub_y_lable\n y_i = 1\n else:\n data_y = np.vstack((data_y, sub_y_lable))\n\n data_x = lil_matrix((len(sub_x), 2035523))\n k = 0\n for x in all_x:\n for e in x:\n for i in x[e]:\n data_x[k, i - 1] = 1\n k += 1\n\n return data_x, data_y\n\n\ndef read_test_zip(filename):\n zip_f = zipfile.ZipFile(filename)\n file_name_list = zip_f.namelist()[1:]\n x_file = []\n sub_x = []\n all_x = []\n seperate_list = []\n for file in file_name_list:\n if file[-1] == 'x':\n x_file.append(int(file[20:-2]))\n\n x_file.sort()\n for xfile in x_file:\n temp_x_dict = dict()\n file_x_c = zip_f.read(\"conll_test_features/\" + str(xfile) + \".x\").decode('utf-8').split(\"\\n\")[:-1]\n\n for e in file_x_c:\n l = e.split(' ')\n if int(l[0]) not in temp_x_dict:\n temp_x_dict[int(l[0])] = [int(l[1])]\n else:\n temp_x_dict[int(l[0])].append(int(l[1]))\n seperate_list.append(len(temp_x_dict))\n all_x.append(temp_x_dict)\n for e in temp_x_dict:\n sub_x.append(e)\n\n data_x = lil_matrix((len(sub_x), 2035523))\n k = 0\n for x in all_x:\n for e in x:\n for i in x[e]:\n data_x[k, i - 1] = 1\n k += 1\n\n return data_x, seperate_list\n\n\n\ndef main():\n # read the training data_x, data_y from dataset\n data_x, data_y = read_train_zip(\"conll_train.zip\")\n # transform data_x matrix to sparse csr_matrix\n X_sparse = csr_matrix(data_x)\n # use truncatedSVD to do dimensional reduction\n tsvd = TruncatedSVD(n_components=50, algorithm = 'arpack')\n tsvd.fit(X_sparse)\n X_sparse_tsvd = tsvd.transform(X_sparse)\n\n lable_dict = dict()\n for i in range(len(data_y)):\n if data_y[i][0] not in lable_dict:\n lable_dict[data_y[i][0]] = [i]\n else:\n lable_dict[data_y[i][0]].append(i)\n\n random_sample = []\n\n # extract reasonable number of training data\n for e in lable_dict:\n if len(lable_dict[e]) > 5000:\n k = random.sample(lable_dict[e], 5000)\n random_sample = random_sample + k\n else:\n random_sample = random_sample + lable_dict[e]\n\n # shuffle training dataset and validation dataset\n test_index = [_ for _ in range(211727)]\n random_test = random.sample(test_index, 10000)\n train_x = X_sparse_tsvd[random_sample]\n train_y = data_y[random_sample]\n val_x = X_sparse_tsvd[random_test]\n val_y = data_y[random_test]\n\n shuffle_list = np.array([_ for _ in range(train_x.shape[0])])\n np.random.shuffle(shuffle_list)\n train_x = train_x[shuffle_list]\n train_y = train_y[shuffle_list]\n\n # train the gp model\n g = gpflow.models.SVGP(\n train_x, train_y, kern=gpflow.kernels.RBF(input_dim=50),\n likelihood=gpflow.likelihoods.MultiClass(23),\n minibatch_size=1000,\n Z=train_x[::50].copy(), num_latent=23, whiten=True, q_diag=True)\n opt = gpflow.train.AdamOptimizer()\n opt.minimize(g, maxiter=notebook_niter(2000))\n\n result_t = g.predict_y(val_x)[0]\n\n #calculate the ER and MNLP for validation data set with GP model\n c = 0\n for i in range(len(val_x)):\n if result_t[i].argmax() == val_y[i][0]:\n c += 1\n er = 1 - c / len(val_x)\n\n mnlp = 0\n result_te = np.log(result_t)\n for i in range(len(val_x)):\n for j in range(23):\n mnlp += result_te[i][j]\n mnlp = - mnlp / len(val_x)\n print(\"GP model:\")\n print(\"error rate: {}, mean negative log probability: {}\".format(er, mnlp))\n\n # calculate the ER and MNLP for validation data set with softmax model\n lgpredict = LogisticRegression(solver='lbfgs', multi_class=\"multinomial\").fit(train_x, train_y)\n lgpresult = lgpredict.predict_proba(val_x)\n\n c = 0\n for i in range(len(val_x)):\n if lgpresult[i].argmax() == val_y[i][0]:\n c += 1\n er = 1 - c / len(val_x)\n\n mnlp = 0\n result_te = np.log(lgpresult)\n for i in range(len(val_x)):\n for j in range(22):\n mnlp += result_te[i][j]\n mnlp = - mnlp / len(val_x)\n print(\"Softmax model:\")\n print(\"error rate: {}, mean negative log probability: {}\".format(er, mnlp))\n\n\n # read test data from test dataset\n test_x, seperate_list = read_test_zip(\"conll_test_features.zip\")\n # dimensional reduction\n test = tsvd.transform(test_x)\n\n # predict\n result = g.predict_y(test)[0]\n result_1 = g.predict_y(test[20000:40000])[0]\n index = 0\n result = np.log(result)\n final = ''\n\n for e in seperate_list:\n for i in range(e):\n for j in range(22):\n final += str(round(result[index + i][j], 8))\n final += \",\"\n final += str(round(result[i][22], 8))\n final += \"\\n\"\n final += \"\\n\"\n index += e\n\n # written result prediction.txt\n with open(\"predictions.txt\", \"w\") as f:\n f.write(final)\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"ZechaoLi/comp9418","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":6381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"1379799003","text":"from graph_search import GraphSearch\n\nfrom priority_frontier import PriorityFrontier\n\n\nclass UCS(GraphSearch):\n\n def _init_frontier(self, dimensions, node):\n self.frontier = PriorityFrontier(dimensions)\n self.frontier.insert((node.path_cost, node))\n\n def _update_frontier(self, child):\n if child.state in self.frontier:\n self.frontier.replace((child.path_cost, child))\n elif child.state not in self.explored:\n self.frontier.insert((child.path_cost, child))\n","repo_name":"bernardoabreu/IA","sub_path":"tp1/source/ucs.py","file_name":"ucs.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"27846480286","text":"from db import db\nfrom datetime import datetime\nfrom models.comments import CommentsModel\nfrom models.like import LikeModel\n\nclass PostModel(db.Model):\n __tablename__ = 'post'\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.Text(), nullable=False)\n total_likes = db.Column(db.Integer, default=0)\n total_dislikes = db.Column(db.Integer, default=0)\n last_modified = db.Column(db.DateTime, nullable=True)\n author_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n comments = db.relationship('CommentsModel', backref='post', cascade='all,delete')\n like_obj = db.relationship('LikeModel', backref='post', cascade='all,delete')\n\n\n def __init__(self, content, author_id):\n self.content = content\n self.author_id = author_id\n self.last_modified = datetime.now()\n\n def json(self):\n from models.user import UserModel\n try:\n user = UserModel.find_by_id(self.author_id)\n except NoResultFound as e:\n return {'message':'user does not exist !'}, 400\n return {'author': user.username , 'content':self.content, 'Total Likes': self.total_likes}\n\n @classmethod\n def find_by_id(cls, id):\n post = PostModel.query.filter_by(id = id).first()\n return post\n\n def delete_from_db(self):\n try:\n db.session.delete(self)\n db.session.commit()\n except CompileError or InternalError:\n db.session.rollback()\n return {'message':'an unknown error occured !'}, 500\n except DisconnectionError:\n db.sesson.rollback()\n return {'message':'Database disconnected !'}, 200\n except TimeoutError:\n db.session.rollback()\n return {'message':'session timed out !'}, 200\n\n def add_to_db(self):\n try:\n db.session.add(self)\n db.session.commit()\n except CompileError or InternalError:\n db.session.rollback()\n return {'message':'an unknown error occured !'}, 500\n except DisconnectionError:\n db.sesson.rollback()\n return {'message':'Database disconnected !'}, 200\n except IdentifierError:\n db.session.rollback()\n return {'message':'character limit exceeded, kindly check !'}, 200\n except TimeoutError:\n db.session.rollback()\n return {'message':'session timed out !'}, 200\n","repo_name":"mohananandgithub58/Reddit-Clone","sub_path":"code/models/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":2434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11638694974","text":"import torch\nimport torch.nn as nn\nfrom typing import List, Tuple, Dict, Union\nimport number_translation_dataset\nimport number_translation_dataset as num_dataset\nfrom torch.utils.data import DataLoader\nfrom model import NumberTranslationModel\n\n\ndef fit(model, dataloader, epochs, optimizer: torch.optim.Optimizer):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print('Training on device', device)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.9, patience=20)\n for epoch in range(epochs):\n print(f'epoch: {epoch}')\n compute_acc = True if epoch % 20 == 0 else False\n\n training_metrics = eval(model, dataloader, compute_acc=compute_acc)\n print(f'training set log prob:', training_metrics)\n\n for data_batch in dataloader:\n optimizer.zero_grad()\n nums, eng_tokens, ch_tokens = tuple(zip(*data_batch))\n output = model(eng_tokens, ch_tokens, device=device)\n loss = -torch.mean(output['cum_log_probs'])\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n optimizer.step()\n\n scheduler.step(training_metrics['avg_cum_log_prob'])\n\ndef accuracy(prediction_tensor, true_output_tokens):\n \"\"\"\n Returns the number of rows for which the two inputs are the same.\n prediction_tensor is padded, ignores anything after the end_token.\n :param prediction_tensor:\n :param true_output_tokens:\n :return:\n \"\"\"\n num_matching = 0\n for i in range(len(true_output_tokens)):\n # single example sentence, which is a tensor\n true_tokens = true_output_tokens[i]\n matches = True\n for j in range(true_tokens.shape[0]):\n if prediction_tensor[i, j].item() != true_tokens[j]:\n matches = False\n break\n if matches:\n num_matching += 1\n return num_matching\n\ndef eval(model, dataloader, compute_acc=False):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = model.to(device=device)\n with torch.no_grad():\n cum_log_prob = 0.0\n num_examples = 0\n correct_predictions = 0\n for data_batch in dataloader:\n nums, eng_tokens, ch_tokens = tuple(zip(*data_batch))\n output = model(eng_tokens, ch_tokens, device=device)\n cum_log_prob += torch.sum(output['cum_log_probs'])\n num_examples += len(data_batch)\n\n if compute_acc:\n prediction_tensor = model.greedy_predict(input_tokens=eng_tokens, device=device, max_output_len=25,\n index_to_words=number_translation_dataset.INDEX_TO_CHAR)['token_tensor']\n correct_predictions += accuracy(prediction_tensor, ch_tokens)\n \"\"\"\n print('prediction tensor:')\n print(prediction_tensor)\n print('ch_tokens:')\n print(ch_tokens)\n print('accuracy:', accuracy(prediction_tensor, ch_tokens))\n \"\"\"\n\n # return average log probability of output\n metrics = {}\n metrics['avg_cum_log_prob'] = cum_log_prob / num_examples\n\n if compute_acc:\n metrics['predictions_acc'] = correct_predictions / num_examples\n return metrics\n\n\nif __name__ == '__main__':\n # test the model\n\n # english to chinese\n model = NumberTranslationModel(num_input_tokens=len(num_dataset.WORD_TOKENS),\n num_output_tokens=len(num_dataset.CHAR_TOKENS),\n input_embedding_dim=10,\n output_embedding_dim=10,\n encoder_lstm_hidden_dim=50,\n input_start_token_index=num_dataset.WORD_TO_INDEX[num_dataset.START_TOKEN],\n input_end_token_index=num_dataset.WORD_TO_INDEX[num_dataset.END_TOKEN],\n output_start_token_index=num_dataset.CHAR_TO_INDEX[num_dataset.START_TOKEN],\n output_end_token_index=num_dataset.CHAR_TO_INDEX[num_dataset.END_TOKEN])\n dataset = num_dataset.NumberTranslationDataset(size=10000)\n device = 'cuda'\n\n\n def collate_fn(x):\n return x\n\n\n dataloader = DataLoader(dataset, batch_size=1000, shuffle=True, collate_fn=collate_fn)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.01)\n model = model.to(device='cuda')\n\n fit(model, dataloader, epochs=10000, optimizer=optimizer)\n \"\"\"\n for i in range(1000):\n fit(model, dataloader, epochs=20, optimizer=optimizer)\n\n print(eval(model, dataloader, compute_acc=True))\n\n n, en, ch = dataset[0]\n \n predictions = model.greedy_predict(input_tokens=(en,), device='cuda', max_output_len=25,\n index_to_words=number_translation_dataset.INDEX_TO_CHAR)\n print('n=', n)\n print('english:', en)\n print('chinese:', ch)\n print(predictions)\n out = model(input_tokens=[en], output_tokens=[ch], device='cuda')\n print('cum log prob:', out['cum_log_probs'])\n \"\"\"\n","repo_name":"AhmadIssa0/number_translation","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38018989731","text":"import json\n\n\nEMPTY_SYMBOL = 'NA'\n\n\nclass Section:\n \"\"\"\n Parameters\n ----------\n body : list\n This attribute represents the paragraphs, sentences and tokens as one\n list. It's wraps them in multiple layers:\n body = [[para_1], ..., [para_L]]\n para_i = [[sen_1], ..., [sen_M]]\n sen_j = [word_1, ..., word_N]\n paper_id : int\n This indicates which paper a section belongs to.\n title : str\n The title of a section.\n vector : tensor\n Section vector of output_dim defined in siamese_models module.\n \"\"\"\n def __init__(self, article_id, paragraphs, title, citations=None, vector=None):\n self.article_id = article_id\n self.vector = vector\n if len(title) == 0:\n self.title = EMPTY_SYMBOL\n else:\n # self.title = title.lower()\n self.title = title\n self.body = self._format_section(paragraphs)\n self.citations = citations\n\n def __repr__(self):\n return self.title\n\n def __len__(self):\n return len(self.body)\n\n def __iter__(self):\n return iter(self.body)\n\n def _format_section(self, paragraphs):\n # Avoid throwing error during testing\n try:\n import spacy\n except ImportError:\n raise ImportError('spacy is missing, please run `pip install spacy` and `pip install scispacy`.')\n\n try:\n scispacy = spacy.load('en_core_sci_sm')\n except OSError:\n raise OSError('model is not installed, please try the following:\\n',\n '`pip install https://s3-us-west-2.amazonaws.com/ai2-s2-scispacy/releases/v0.2.4/en_core_sci_sm-0.2.4.tar.gz`')\n \"\"\"\n Parameters\n ----------\n paragraphs : list of paragraphs\n It stores paragraphs as its elements.\n\n Returns\n -------\n body : list\n It stores input section into the following format:\n body = [[para_1], ..., [para_L]]\n para_i = [[sen_1], ..., [sen_M]]\n sen_j = [word_1, ..., word_N]\n \"\"\"\n body = []\n # decompose the body into paragraphs.\n for para in paragraphs:\n paragraph = []\n # decompose paragraphs into sentences by scispacy\n # clean texts here\n with scispacy.disable_pipes(\"tagger\"):\n _para = scispacy(para)\n for sentence in _para.sents:\n # tokenize each sentence by scispacy\n tokenized_sentence = [token.text for token in sentence]\n paragraph.append(tokenized_sentence)\n body.append(paragraph)\n return body\n\n def get_paragraphs(self):\n \"\"\" It returns all paragraphs in section. \"\"\"\n return self.body\n\n def num_sentences(self):\n \"\"\" It returns the number of sentences in section. \"\"\"\n count = 0\n for para in self.body:\n count += len(para)\n return count\n\n def get_sentence(self, para_idx):\n \"\"\" It returns a sentence in a paragraph specified by index \"\"\"\n para = self.body[para_idx]\n return para\n\n def num_tokens(self):\n \"\"\" It returns the number of tokens in section. \"\"\"\n count = 0\n for para in self.body:\n for sen in para:\n count += len(sen)\n return count\n\n def get_tokens(self):\n \"\"\" It returns all the tokens in section. \"\"\"\n tokens = []\n for para in self.body:\n for sen in para:\n tokens += sen\n return tokens\n","repo_name":"box-key/csn-searcher","sub_path":"csn_searcher/csn/section.py","file_name":"section.py","file_ext":"py","file_size_in_byte":3585,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"10060528616","text":"import random\r\n\r\ndef snake_water_gun():\r\n\r\n chants = [\"snake\" , \"water\" , \"gun\"]\r\n y = [1,2,3]\r\n choice = random.choice(chants)\r\n\r\n print(\"Snake = 1\\nWater = 2\\nGun = 3\\nEnter your choice (1/2/3): \" , end=\"\")\r\n\r\n try:\r\n x = int(input(\"\")) \r\n while x not in y:\r\n x = int(input(\"Please enter a valid input (1/2/3): \"))\r\n except ValueError as error:\r\n return error\r\n\r\n my_choice = chants[x-1]\r\n\r\n print(f\"your choice : {my_choice}\\ncomputer's choice : {choice}\")\r\n\r\n if (my_choice == \"snake\" and choice == \"water\") or (my_choice == \"water\" and choice == \"gun\") or (my_choice == \"gun\" and choice == \"snake\"):\r\n return \"you lost computer won.\"\r\n elif my_choice == choice:\r\n return \"no one wins its a tie.\"\r\n else:\r\n return \"you won computer lost.\"\r\n \r\na = snake_water_gun()\r\nprint(a)\r\n","repo_name":"Fazalkadivar21/python_projects","sub_path":"Snake_water_gun/snake_water_gun.py","file_name":"snake_water_gun.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"2916200505","text":"\"\"\"Add site table\n\nRevision ID: 1b2c4f47018d\nRevises: 12886216a763\nCreate Date: 2023-04-01 20:08:01.631957\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '1b2c4f47018d'\ndown_revision = '12886216a763'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('sites',\n sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),\n sa.Column('name', sa.String(), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('user_sites',\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('site_id', postgresql.UUID(as_uuid=True), nullable=True),\n sa.Column('permission', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['site_id'], ['sites.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('user_sites')\n op.drop_table('sites')\n # ### end Alembic commands ###\n","repo_name":"Mozzo1000/docudir","sub_path":"src/migrations/versions/1b2c4f47018d_add_site_table.py","file_name":"1b2c4f47018d_add_site_table.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40658113697","text":"from collections import OrderedDict\nimport json\n\nimport numpy as np\nimport pandas as pd\nimport tables\n\nfrom .obtain_features_helper import WormStats\nfrom .param_readers import read_unit_conversions, read_ventral_side, read_fps\n\n\nwcon_metadata_fields = ['id', 'lab', 'who', 'timestamp', 'temperature', 'humidity', 'arena',\n 'food', 'media', 'sex', 'stage', 'age', 'strain', 'protocol', 'interpolate', 'software']\n\n\ndef wcon_reformat_metadata(metadata_dict):\n wcon_metadata = OrderedDict()\n for field in wcon_metadata_fields:\n if field in metadata_dict:\n wcon_metadata[field] = metadata_dict[field]\n\n wcon_metadata['@OMG'] = OrderedDict()\n for field in metadata_dict:\n if field not in wcon_metadata_fields:\n wcon_metadata['@OMG'][field] = metadata_dict[field]\n\n if '@OMG' in metadata_dict:\n for field in metadata_dict['@OMG']:\n wcon_metadata['@OMG'][field] = metadata_dict['@OMG'][field]\n\n return wcon_metadata\n\n\ndef readMetaData(fname, provenance_step='FEAT_CREATE'):\n def _order_metadata(metadata_dict):\n ordered_fields = ['strain', 'timestamp', 'gene', 'chromosome', 'allele',\n 'strain_description', 'sex', 'stage', 'ventral_side', 'media', 'arena', 'food',\n 'habituation', 'who', 'protocol', 'lab', 'software']\n\n extra_fields = metadata_dict.keys() - set(ordered_fields)\n ordered_fields += sorted(extra_fields)\n\n ordered_metadata = OrderedDict()\n for field in ordered_fields:\n if field in metadata_dict:\n ordered_metadata[field] = metadata_dict[field]\n return ordered_metadata\n\n with tables.File(fname, 'r') as fid:\n if '/experiment_info' not in fid:\n experiment_info = {}\n else:\n experiment_info = fid.get_node('/experiment_info').read()\n experiment_info = json.loads(experiment_info.decode('utf-8'))\n\n provenance_tracking = fid.get_node('/provenance_tracking/' + provenance_step).read()\n provenance_tracking = json.loads(provenance_tracking.decode('utf-8'))\n\n if 'commit_hash' in provenance_tracking:\n # old name\n pkgs_versions = provenance_tracking['commit_hash']\n else:\n pkgs_versions = provenance_tracking['pkgs_versions']\n\n if 'tierpsy' in pkgs_versions:\n tierpsy_version = pkgs_versions['tierpsy']\n else:\n tierpsy_version = pkgs_versions['MWTracker']\n\n MWTracker_ver = {\"name\": \"tierpsy (https://github.com/ver228/tierpsy-tracker)\",\n \"version\": tierpsy_version,\n \"featureID\": \"@OMG\"}\n\n experiment_info[\"software\"] = MWTracker_ver\n\n return _order_metadata(experiment_info)\n\n\ndef __reformatForJson(A):\n if isinstance(A, (int, float)):\n return A\n\n good = ~np.isnan(A) & (A != 0)\n dd = A[good]\n if dd.size > 0:\n dd = np.abs(np.floor(np.log10(np.abs(dd))) - 2)\n precision = max(2, int(np.min(dd)))\n A = np.round(A.astype(np.float64), precision)\n A = np.where(np.isnan(A), None, A)\n\n # wcon specification require to return a single number if it is only one element list\n if A.size == 1:\n return A[0]\n else:\n return A.tolist()\n\n\ndef __addOMGFeat(fid, worm_feat_time, worm_id):\n worm_features = OrderedDict()\n # add time series features\n for col_name, col_dat in worm_feat_time.iteritems():\n if col_name not in ['worm_index', 'timestamp']:\n worm_features[col_name] = col_dat.values\n\n worm_path = '/features_events/worm_%i' % worm_id\n worm_node = fid.get_node(worm_path)\n # add event features\n for feature_name in worm_node._v_children:\n feature_path = worm_path + '/' + feature_name\n worm_features[feature_name] = fid.get_node(feature_path)[:]\n\n return worm_features\n\n\ndef _get_ventral_side(features_file):\n ventral_side = read_ventral_side(features_file)\n if not ventral_side or ventral_side == 'unknown':\n ventral_type = '?'\n else:\n # we will merge the ventral and dorsal contours so the ventral contour is clockwise\n ventral_type = 'CW'\n return ventral_type\n\n\ndef readData(features_file, READ_FEATURES=False, IS_FOR_WCON=True):\n '''\n Read 'data' records from the features file, one per worm index\n\n Parameters\n ----------\n features_file : ...\n HDF5 features file file from which data is to be read\n READ_FEATURES : bool, optional\n If `True`, add custom features to each record\n IS_FOR_WCON : bool, optional\n If `True`, then the records are formatted for WCON JSON output. This adjusts\n the types of some feature values and sets the lab prefix for features (\"@OMG\")\n\n Yields\n ------\n dict\n Data records\n '''\n if IS_FOR_WCON:\n lab_prefix = '@OMG '\n else:\n lab_prefix = ''\n\n with pd.HDFStore(features_file, 'r') as fid:\n if '/features_timeseries' not in fid:\n return {} # empty file nothing to do here\n\n features_timeseries = fid['/features_timeseries']\n feat_time_group_by_worm = features_timeseries.groupby('worm_index')\n\n ventral_side = _get_ventral_side(features_file)\n\n with tables.File(features_file, 'r') as fid:\n # fps used to adjust timestamp to real time\n fps = read_fps(features_file)\n\n # get pointers to some useful data\n skeletons = fid.get_node('/coordinates/skeletons')\n dorsal_contours = fid.get_node('/coordinates/dorsal_contours')\n ventral_contours = fid.get_node('/coordinates/ventral_contours')\n\n # let's append the data of each individual worm as a element in a list\n\n # group by iterator will return sorted worm indexes\n for worm_id, worm_feat_time in feat_time_group_by_worm:\n\n worm_id = int(worm_id)\n # read worm skeletons data\n worm_skel = skeletons[worm_feat_time.index]\n worm_dor_cnt = dorsal_contours[worm_feat_time.index]\n worm_ven_cnt = ventral_contours[worm_feat_time.index]\n\n # start ordered dictionary with the basic features\n worm_basic = OrderedDict()\n worm_basic['id'] = str(worm_id)\n worm_basic['head'] = 'L'\n worm_basic['ventral'] = ventral_side\n worm_basic['ptail'] = worm_ven_cnt.shape[1] - 1 # index starting with 0\n\n worm_basic['t'] = worm_feat_time['timestamp'].values / fps # convert from frames to seconds\n worm_basic['x'] = worm_skel[:, :, 0]\n worm_basic['y'] = worm_skel[:, :, 1]\n\n contour = np.hstack((worm_ven_cnt, worm_dor_cnt[:, ::-1, :]))\n worm_basic['px'] = contour[:, :, 0]\n worm_basic['py'] = contour[:, :, 1]\n\n if READ_FEATURES:\n worm_features = __addOMGFeat(fid, worm_feat_time, worm_id)\n for feat in worm_features:\n worm_basic[lab_prefix + feat] = worm_features[feat]\n\n if IS_FOR_WCON:\n for x in worm_basic:\n if x not in ['id', 'head', 'ventral', 'ptail']:\n worm_basic[x] = __reformatForJson(worm_basic[x])\n\n # append features\n yield worm_basic\n\n\ndef readUnits(features_file, READ_FEATURES=False):\n '''\n Read in the units for the corresponding data records recoverable from the features\n file\n\n Parameters\n ----------\n features_file : ...\n HDF5 features file file from which units are to be read\n READ_FEATURES : bool, optional\n If `True`, add units for custom features to each record\n\n Returns\n -------\n dict\n The units for each field\n '''\n fps_out, microns_per_pixel_out, _ = read_unit_conversions(features_file)\n xy_units = microns_per_pixel_out[1]\n time_units = fps_out[2]\n\n units = OrderedDict()\n units[\"size\"] = \"mm\" # size of the plate\n units['t'] = time_units # frames or seconds\n\n for field in ['x', 'y', 'px', 'py']:\n units[field] = xy_units # (pixels or micrometers)\n\n if READ_FEATURES:\n # TODO how to change microns to pixels when required\n ws = WormStats()\n for field, unit in ws.features_info['units'].iteritems():\n units['@OMG ' + field] = unit\n\n return units\n","repo_name":"openworm/owmeta-movement","sub_path":"owmeta_movement/tierpsy/export_wcon.py","file_name":"export_wcon.py","file_ext":"py","file_size_in_byte":8372,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"35071260914","text":"#!/usr/bin/env python\nfrom __future__ import print_function # Print function in Python 2 and 3\nfrom coapthon import defines\nfrom coapthon.client.helperclient import HelperClient\nimport logging\nfrom time import sleep\nimport argparse\n\n'''\n CoAP Client for Observe Tracker\n'''\n\n__author__ = 'Fco R Tendero'\n\n\n# Global variables\nobservables = [] # list of observed paths.\n\n\n# Observe callback receive function\ndef observe_callback(response):\n print(response.payload)\n\n\ndef update_observable_resources(response):\n \"\"\"\n Update list of observable paths by parsing\n :param response: obtained response from client.discover()\n \"\"\"\n\n resources_list = response.payload.split(\",\")\n\n for resource in resources_list:\n elems = str(resource).split(\";\")\n\n if \"obs\" in elems: # if resource is observable\n # get path (position 0) and remove undesired characters\n observable_path = elems[0].translate(\n str.maketrans({'<': '', '>': ''})\n )\n\n # only append to global list if it has not been observed yet\n if observable_path not in observables:\n observables.append(observable_path)\n\n\ndef main(args):\n \"\"\"\n Get the payload of observable resources every 5 seconds\n :param args: IP and Port of the host to observe\n \"\"\"\n\n logging.disable(logging.DEBUG) # Disable DEBUG logging\n\n while True:\n client = HelperClient(server=(args.host, args.port))\n response = client.discover()\n\n # catching exception if the response doesn't contain a payload\n try:\n update_observable_resources(response)\n\n for path in observables:\n client.observe(path, observe_callback)\n\n except AttributeError:\n print(\"Empty resource: Doesn't contain a payload\")\n\n finally:\n sleep(5) # 5 seconds between new discover\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n '-H', '--host',\n help=\"IP direction of the CoAP server, default: MULTICAST. \"\n \"*NOTE*: If Server is transmitting MULTICAST, no messages will be received if an UNICAST IP is selected\",\n default=defines.ALL_COAP_NODES\n )\n parser.add_argument(\n '-P', '--port',\n help=\"Port where CoAP server is listening, default: 5683\",\n default=5683\n )\n\n main(parser.parse_args())\n","repo_name":"frtendero/CoAP_Observe_Tracker","sub_path":"class_observe_client.py","file_name":"class_observe_client.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"24474635295","text":"import pandas as pd\r\nimport numpy as np\r\nimport seaborn as sn\r\nimport matplotlib.pyplot as plt\r\nfrom IPython.display import display\r\nfrom IPython.display import display_html\r\nfrom sklearn import linear_model\r\nfrom sklearn.linear_model import LogisticRegression\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nfrom sklearn.linear_model import Perceptron\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.svm import SVC, LinearSVC\r\nfrom sklearn.naive_bayes import GaussianNB\r\n#survied:是否生存(0死1活) pclass:艙等(1頭等2商務3經濟) name:姓名\r\n#sex:性別(0女1男) age:年齡 sibsp:船上有兄弟/配偶的數目 parch:船上有父母/小孩的數目\r\n#ticket:船票編號 fare:船票價格 cabin:船艙編號 embarked:上船的岸口(C法國瑟堡Q英國皇后鎮S英國南安普頓)\r\n\r\n#資料路徑\r\ntest_data = pd.read_csv(r\"C:\\Users\\Faker\\Desktop\\資料探勘\\DEMO\\titanic data\\test.csv\")\r\ntrain_data = pd.read_csv(r\"C:\\Users\\Faker\\Desktop\\資料探勘\\DEMO\\titanic data\\train.csv\")\r\nsave_path = r\"C:\\Users\\Faker\\Desktop\\資料探勘\\DEMO\\titanic data\"\r\nsave_name = r\"alldata.csv\"\r\nprint(train_data.shape)\r\nprint(test_data.shape)\r\n#資料合併後儲存\r\nalldata = train_data.append(test_data)\r\nalldata.reset_index(inplace=True, drop=True)\r\n#alldata.to_csv(save_path+'\\\\'+ save_name,encoding=\"utf_8_sig\",index=False)\r\nprint(alldata.head())\r\n\r\n#計算缺失\r\nall_data_miss = alldata.isnull().sum()\r\nprint(all_data_miss[1:11])\r\ntotal_data = np.product(alldata.shape)\r\ntotal_miss = all_data_miss[1:11].sum()\r\npercent_miss = (total_miss / total_data) * 100\r\nprint(total_miss, percent_miss)\r\n\r\n#性別存活率\r\nplt.figure(num=1)\r\nplt.title(\"sex/survived\")\r\n#sn.histplot(x= alldata['sex'], hue=alldata['survived'])\r\nsn.countplot(x= alldata['sex'], hue=alldata['survived'])\r\ndisplay(alldata[[\"sex\", \"survived\"]].groupby(['sex'], as_index=False).mean().round)\r\n\r\n#艙等存活率\r\nplt.figure(num=2)\r\nplt.title(\"pclass/survived\")\r\nsn.countplot(x=alldata['pclass'], hue=alldata['survived'])\r\ndisplay(alldata[['pclass', 'survived']].groupby(['pclass'], as_index=False).mean().round)\r\n\r\n#性別與艙等存活率\r\ndisplay(alldata[['pclass', 'sex', 'survived']].groupby(['pclass', 'sex'], as_index=False).mean().sort_values(by='survived', ascending=False))\r\n\r\n##艙等/票價 存活率\r\nplt.figure(num=3)\r\nplt.title(\"fare & pclass vs survived\")\r\n#alldata['logfare'] = (alldata['fare']+1).map(lambda x : np.log10(x) if x > 0 else 0) #取log\r\n#sn.boxplot(x=alldata['fare'],y=alldata['pclass'],hue=alldata['survived'],orient='h',palette=\"Set3\")\r\nsn.scatterplot(x=alldata['pclass'], y=alldata['fare'], hue=alldata['survived'])\r\ndisplay(pd.pivot_table(alldata, values=['fare'], index=['pclass'], columns=['survived'], aggfunc='median').round(3))\r\n\r\n\r\n#上船口存活率\r\nplt.figure(num=4)\r\nplt.title(\"embarked/survived\")\r\nsn.countplot(x=alldata['embarked'], hue=alldata['survived'])\r\ndisplay(alldata[['embarked', 'survived']].groupby(['embarked'], as_index=False).mean().round)\r\n\r\n#上船口艙等\r\nplt.figure(num=5)\r\nplt.title(\"embarked/pclass\")\r\n#sn.histplot(x=alldata['embarked'], hue=alldata['pclass'])\r\nsn.countplot(x=alldata['embarked'], hue=alldata['pclass'])\r\ndisplay(alldata[['embarked', 'pclass']].groupby(['embarked'], as_index=False).mean().round)\r\n#display(alldata[['pclass', 'embarked', 'sex']].groupby(['pclass', 'embarked'], as_index=False).mean().sort_values(by='sex', ascending=False))\r\n\r\n#姓名長度存活率\r\nnamelen = []\r\nfor names in alldata['name']:\r\n namelen.append(len(names.split()))\r\nalldata['namelen'] = namelen\r\nplt.figure(num=6)\r\nplt.title(\"namelen/survived\")\r\nsn.countplot(x=alldata['namelen'], hue=alldata['survived'])\r\ndisplay(alldata[['namelen', 'survived']].groupby(['namelen'], as_index=False).mean().round)\r\n\r\n#家庭人數存活率\r\nplt.figure(num=7)\r\nplt.title(\"family/survived\")\r\nalldata['family'] = alldata['sibsp']+alldata['parch']\r\nprint(alldata['family'])\r\nsn.countplot(x=alldata['family'], hue=alldata['survived'])\r\ndisplay(alldata[['family', 'survived']].groupby(['family'], as_index=False).mean().round)\r\n\r\n#年齡特徵\r\nplt.figure(num=8)\r\nsn.scatterplot(x=alldata['age'], y=alldata['fare'], hue=alldata['survived'])\r\nplt.figure(num=9)\r\nsn.histplot(x=alldata['age'], hue=alldata['survived'],bins=10)\r\nalldata['haveage'] = alldata['age'].isnull().map(lambda x: 0 if x == True else 1)\r\nfig, [ax1,ax2] = plt.subplots(1, 2)\r\nax1 = sn.countplot(x=alldata['pclass'], hue=alldata['haveage'],ax=ax1)\r\nax2 = sn.countplot(x=alldata['sex'], hue=alldata['haveage'],ax=ax2)\r\npd.crosstab(alldata['haveage'], alldata['sex'], margins=True).round(3)\r\n\r\nhaveage_s = ((alldata['haveage'] == 1) & (alldata['pclass'] != 3) & (alldata['survived'] == 1))\r\nhaveage_d = ((alldata['haveage'] == 1) & (alldata['pclass'] != 3) & (alldata['survived'] == 0))\r\nfig, ax = plt.subplots()\r\nplt.title(\"age/survived in pclass1.2\")\r\nax = sn.distplot(alldata.loc[haveage_s, 'age'], kde=False, bins=10, label='survived')\r\nax = sn.distplot(alldata.loc[haveage_d, 'age'], kde=False, bins=10, label='dead')\r\nax.legend()\r\n\r\n#資料清洗\r\n#年齡\r\nalldata['title'] = alldata['name'].str.extract(' ([A-Za-z]+)\\.', expand=False)\r\nalldata['title'] = alldata['title'].replace(['Capt', 'Col', 'Countess', 'Don', 'Dr', 'Dona', 'Jonkheer',\r\n 'Major', 'Rev', 'Sir'], 'Rare')\r\nalldata['title'] = alldata['title'].replace(['Mlle', 'Ms', 'Mme'], 'Miss')\r\nalldata['title'] = alldata['title'].replace(['Lady'], 'Mrs')\r\nalldata['title'] = alldata['title'].map({\"Mr\":0, \"Rare\":1, \"Master\":2, \"Miss\":3, \"Mrs\":4})\r\nmeamage = alldata.groupby('title')['age'].median()\r\n#alldata['newage'] = alldata['age']\r\nfor i in range(0,5):\r\n alldata.loc[(alldata['age'].isnull()) & (alldata['title'] == i), 'age'] = meamage[i]\r\n#alldata['newage'] = alldata['newage'].astype('int')\r\n\r\nalldata['name'] = alldata['namelen']\r\nalldata[\"sex\"] = [1 if i == \"male\" else 0 for i in alldata[\"sex\"]]\r\nalldata[\"fare\"] = alldata[\"fare\"].fillna(alldata[\"fare\"].mean())\r\nalldata['fare'] = (alldata['fare']+1).map(lambda x : np.log10(x) if x > 0 else 0) #取log\r\nalldata[\"embarked\"] = [0 if i == \"S\" else i for i in alldata[\"embarked\"]]\r\nalldata[\"embarked\"] = [1 if i == \"Q\" else i for i in alldata[\"embarked\"]]\r\nalldata[\"embarked\"] = [2 if i == \"C\" else i for i in alldata[\"embarked\"]]\r\nalldata[\"embarked\"] = alldata[\"embarked\"].fillna(0)\r\nalldata = alldata.drop('cabin',axis=1)\r\nalldata = alldata.drop('ticket',axis=1)\r\nalldata = alldata.drop('haveage',axis=1)\r\nalldata = alldata.drop('parch',axis=1)\r\nalldata = alldata.drop('sibsp',axis=1)\r\nalldata = alldata.drop('namelen',axis=1)\r\nprint(alldata.head())\r\nall_data_miss = alldata.isnull().sum()\r\nprint(all_data_miss[1:15])\r\n#alldata.to_csv(save_path+'\\\\'+ save_name,encoding=\"utf_8_sig\",index=False)\r\n\r\n#資料圖關聯熱點圖\r\ncols = ['survived', 'pclass', 'name', 'sex', 'age', 'family', 'fare', 'embarked','title']\r\nalldata_corr = alldata[cols].corr()\r\nprint(alldata_corr)\r\nplt.figure(num=12)\r\nplt.title(\"att/corr\")\r\nsn.heatmap(alldata_corr,annot=True)\r\n#plt.show()\r\n\r\n#ML\r\n#獨立資料\r\nY_train = train_data['survived']\r\nX_train = alldata.drop(\"survived\", axis=1)\r\nX_test = X_train.iloc[891:]\r\nX_train = X_train.head(891)\r\n\r\n#隨機森林\r\nrandom_forest = RandomForestClassifier()#oob_score=True)\r\nrandom_forest.fit(X_train, Y_train)\r\nY_pred = random_forest.predict(X_test)\r\nrandom_forest.score(X_train, Y_train)\r\nacc_random_forest = round(random_forest.score(X_train, Y_train), 3)\r\n#oobscore = round(random_forest.oob_score_, 3)\r\n#print(\"oobscore:\", oobscore)\r\nprint(acc_random_forest)\r\n\r\n#邏輯回歸\r\nlogreg = LogisticRegression(solver='lbfgs',max_iter=200)\r\nlogreg.fit(X_train, Y_train)\r\nY_pred = logreg.predict(X_test)\r\nacc_log = round(logreg.score(X_train, Y_train), 2)\r\n\r\n#SVM\r\nlinear_svc = LinearSVC()\r\nlinear_svc.fit(X_train, Y_train)\r\nY_pred = linear_svc.predict(X_test)\r\nacc_linear_svc = round(linear_svc.score(X_train, Y_train), 2)\r\n\r\n#決策數\r\ndecision_tree = DecisionTreeClassifier()\r\ndecision_tree.fit(X_train, Y_train)\r\nY_pred = decision_tree.predict(X_test)\r\nacc_decision_tree = round(decision_tree.score(X_train, Y_train), 2)\r\n\r\n#perceptron\r\nperceptron = Perceptron(max_iter=100)\r\nperceptron.fit(X_train, Y_train)\r\nY_pred = perceptron.predict(X_test)\r\nacc_perceptron = round(perceptron.score(X_train, Y_train), 2)\r\n\r\n#SGD\r\nsgd = linear_model.SGDClassifier()\r\nsgd.fit(X_train, Y_train)\r\nY_pred = sgd.predict(X_test)\r\nsgd.score(X_train, Y_train)\r\nacc_sgd = round(sgd.score(X_train, Y_train), 2)\r\n\r\n#貝式分類器\r\ngaussian = GaussianNB()\r\ngaussian.fit(X_train, Y_train)\r\nY_pred = gaussian.predict(X_test)\r\nacc_gaussian = round(gaussian.score(X_train, Y_train), 2)\r\n\r\n#結果排序\r\nresults = pd.DataFrame({\r\n \"model\": [\"Support Vector Machines\", \"Logistic Regression\", \"Random Forest\", \"Naive Bayes\", \"Perceptron\",\r\n \"Stochastic Gradient Decent\", \"Decision Tree\"],\r\n \"acc\": [acc_linear_svc, acc_log, acc_random_forest, acc_gaussian, acc_perceptron, acc_sgd,\r\n acc_decision_tree]})\r\nresult = results.sort_values(by=\"acc\", ascending=False)\r\nresult = result.set_index(\"acc\")\r\nprint(result.head(9))\r\n\r\n#交叉驗證\r\nfrom sklearn.model_selection import cross_val_score\r\nrf = RandomForestClassifier(n_estimators=100)\r\nacc = cross_val_score(rf, X_train, Y_train, cv=9, scoring = \"accuracy\")\r\nprint(\"Acc:\", acc)\r\nprint(\"Mean:\", acc.mean())\r\nprint(\"Standard Deviation:\", acc.std())\r\n\r\n#特徵觀察\r\nimportances = pd.DataFrame({'feature':X_train.columns,'importance':np.round(random_forest.feature_importances_,3)})\r\nimportances = importances.sort_values('importance',ascending=False).set_index('feature')\r\nprint(importances.head(9))\r\n\r\n#超參數調整\r\nrandom_forest = RandomForestClassifier(criterion='gini',\r\n n_estimators=100,\r\n min_samples_split=8,\r\n min_samples_leaf=2,\r\n oob_score=True,\r\n random_state=1,\r\n )\r\nrandom_forest.fit(X_train, Y_train)\r\nY_prediction = random_forest.predict(X_test)\r\nrandom_forest.score(X_train, Y_train)\r\noobscore = round(random_forest.oob_score_,3)\r\nprint(\"oobscore:\", oobscore)\r\n\r\n\r\n\r\n\r\n\r\n\r\n#https://yulongtsai.medium.com/https-medium-com-yulongtsai-titanic-top3-8e64741cc11f\r\n#http://www.taroballz.com/2019/05/25/ML_RandomForest_Classifier/\r\n#https://chih-sheng-huang821.medium.com/%E4%BA%A4%E5%8F%89%E9%A9%97%E8%AD%89-cross-validation-cv-3b2c714b18db\r\n#https://towardsdatascience.com/predicting-the-survival-of-titanic-passengers-30870ccc7e8","repo_name":"los61013/kaggle-titanic","sub_path":"titanic.py","file_name":"titanic.py","file_ext":"py","file_size_in_byte":10699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"35628112435","text":"# This sample tests a loop where some of the variables within the loop\n# depend on each other.\n\n# pyright: strict\n\n\ndef find_min(nums: list[int]) -> int:\n low = 0\n high = len(nums) - 1\n while low < high:\n mid = (low + high) // 2\n if nums[mid] > nums[high]:\n low = mid + 1\n elif nums[mid] < nums[high]:\n high = mid\n else:\n high -= 1\n return nums[low]\n","repo_name":"microsoft/pyright","sub_path":"packages/pyright-internal/src/tests/samples/loop21.py","file_name":"loop21.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","stars":11208,"dataset":"github-code","pt":"48"} +{"seq_id":"31149238142","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 18 17:08:45 2019\n\n@author: Regupathi (Regu) Angappan (he/him)\n\"\"\"\n##############################################################################\n##############################################################################\n#Import Necessary Modules\n\nimport matplotlib\nfrom pylab import *\nfrom libgauss import *\nfrom glob import glob\nimport os\nimport numpy as np\nfrom datetime import datetime\nfrom scipy.io import savemat, loadmat\nfrom numpy.polynomial.polynomial import polyfit\nimport sys\nfrom datetime import datetime, time, timedelta\nfrom dateutil.relativedelta import *\n\n##############################################################################\n##############################################################################\n#############\n# Load File #\n#############\n\nmin_t = 43200\nmax_t = 185911200\n\ndef get_normalized_t(t):\n t1 = datetime(2010, 1, 1, 00, 00, 00)\n t2 = t\n delta_t_norm = ((t2-t1).total_seconds() - min_t)/(max_t-min_t)\n return delta_t_norm\n\ninputs = np.sort(glob('filteredbin*.mat'))\n\nbb_res = 9\nlon_ideal = np.arange(-180, 180+bb_res, bb_res)\nlat_ideal = np.arange(-90, 90+bb_res, bb_res)\nsc = 1.5\n\nyear_norm = [-0.00023242300987797793, 0.1694363742010459, 0.3391051714119698, 0.5092388146426496, 0.6789076118535735, 0.8485764090644974, 1.0182452062754213]\n\n# Let's make a directory to store all the outputs! \ndirname = 'TimeSeriesPlots'\n\nif not os.path.exists(dirname):\n os.mkdir(dirname)\n\n# Time to analyze the data! \nfor long in range(len(lon_ideal)-1):\n \n for lat in range(len(lat_ideal)-1):\n \n long_oc = lon_ideal[long] #Longitude of concern - that is being plotted/analyzed\n lat_oc = lat_ideal[lat] #Latitude of concern - that is being plotted/analyzed\n \n Br_plot = []\n Btheta_plot = []\n Bphi_plot =[]\n date_v = []\n t_s_plot = []\n date1_a =[]\n \n ######################################################################\n for filename in inputs:\n \n dat = loadmat(filename)\n title = filename.split('.')[0]\n date = title.split('_')[1]\n \n year = date.split('-')[0]\n month = date.split('-')[1]\n day = date.split('-')[2]\n time = date.split('-')[3]\n \n YY = year[2:4]\n MM = month\n DD = day\n HH = time\n \n ##################################################################\n #######################\n #Date time conversion #\n #######################\n dt = '20'+YY+'-'+MM+'-'+DD+' '+HH\n \n def date_diff_in_Seconds(dt2, dt1):\n timedelta_r = dt2 - dt1 \n return timedelta_r.days * 24 * 3600 + timedelta_r.seconds\n \n date1 = datetime.strptime(dt, '%Y-%m-%d %H')\n date1_a.append(date1)\n \n # Reference date\n date2 = datetime.strptime('2010-01-01 00', '%Y-%m-%d %H')\n \n t_s = date_diff_in_Seconds(date1, date2)\n t_s_plot.append(t_s)\n \n ##################################################################\n Br = dat['br_bin']\n Btheta = dat['btheta_bin']\n Bphi = dat['bphi_bin']\n\n Br1 = Br[long, lat]\n Br_plot.append(Br1)\n Btheta1 = Btheta[long, lat]\n Btheta_plot.append(Btheta1)\n Bphi1 = Bphi[long, lat]\n Bphi_plot.append(Bphi1)\n \n Br_plot_a = np.asarray(Br_plot)\n Btheta_plot_a = np.asarray(Btheta_plot)\n Bphi_plot_a = np.asarray(Bphi_plot)\n \n ##################################################################\n \n # Plot the resulting overall time series\n t_s_plot_a = np.asarray(t_s_plot)\n t_s_plot_norm = (t_s_plot_a - min(t_s_plot_a))/(max(t_s_plot_a) - min(t_s_plot_a))\n \n plt.figure(1)\n #plt.plot(t_s_plot_norm, Br_plot_a,'o-',label='data')\n plt.plot(t_s_plot_norm, Br_plot_a,'o-')\n #legend()\n xlabel('Time')\n ylabel(r\"$B_{r}$, nT\")\n xticks(year_norm, ['2010','2011','2012','2013','2014','2015','2016'])\n plt.title(r\"$B_{r}$ for latitude %d & longitude %d\"%(lat_oc,long_oc))\n \n \n plt.figure(2)\n #plt.plot(t_s_plot_norm, Btheta_plot_a,'o-',label='data')\n plt.plot(t_s_plot_norm, Btheta_plot_a,'o-')\n #legend()\n xlabel('Time')\n ylabel(r\"$B_{\\theta}$, nT\")\n# xticks(arange(0.,1.1,0.16666), ['2010','2011','2012','2013','2014','2015','2016'])\n xticks(year_norm, ['2010','2011','2012','2013','2014','2015','2016'])\n plt.title(r\"$B_{\\theta}$ for latitude %d & longitude %d\"%(lat_oc,long_oc))\n \n \n plt.figure(3)\n #plt.plot(t_s_plot_norm, Bphi_plot_a,'o-',label='data')\n plt.plot(t_s_plot_norm, Bphi_plot_a,'o-')\n #legend()\n xlabel('Time')\n ylabel(r\"$B_{\\phi}$, nT\")\n xticks(year_norm, ['2010','2011','2012','2013','2014','2015','2016'])\n plt.title(r\"$B_{\\phi}$ for latitude %d & longitude %d\"%(lat_oc,long_oc))\n \n # Calculating the polynomial fit for the 18 month windows, sliding by 1 month\n date_end_d = date1_a[-1] # Last date in data\n date_end_f = date_end_d+relativedelta(months=-3) # Last date in fit\n \n def nearest(items, pivot):\n return min(items, key=lambda x: abs(x - pivot))\n \n move_d = date1_a[0]\n start_previous = 0\n \n Br_fit_slope = []\n Btheta_fit_slope = []\n Bphi_fit_slope = []\n t_slope_a = []\n t_start_a =[]\n t_end_a =[]\n \n Br_c_0 =[]\n Br_c_2 =[]\n Br_c_3 =[]\n Btheta_c_0 =[]\n Btheta_c_2 =[]\n Btheta_c_3 =[]\n Bphi_c_0 =[]\n Bphi_c_2 =[]\n Bphi_c_3 =[]\n \n while(1):\n \n start_d = move_d\n \n start_d = nearest(date1_a, start_d)\n \n if start_d == start_previous:\n start_d = date1_a[idx_s+1]\n \n end_d = start_d+relativedelta(months=+18) \n \n last_date = nearest(date1_a, end_d)\n \n # Find index of first date & last date\n idx_s = date1_a.index(start_d)\n idx_e = date1_a.index(last_date)\n \n Br_fit = Br_plot[idx_s:idx_e+1]\n Btheta_fit = Btheta_plot[idx_s:idx_e+1]\n Bphi_fit = Bphi_plot[idx_s:idx_e+1]\n date_fit = t_s_plot_norm[idx_s:idx_e+1] # Based on normalized array\n date_fit_a = np.asarray(date_fit)\n date_fit_norm = (date_fit_a - min(date_fit_a))/(max(date_fit_a) - min(date_fit_a))\n \n t_start = t_s_plot_norm[idx_s]\n t_start_a.append(t_start)\n t_end = t_s_plot_norm[idx_e]\n t_end_a.append(t_end)\n \n start_previous = start_d\n move_d = start_d+relativedelta(months=+1)\n \n # Fitting with 3rd degree polynomial\n \n if (len(date_fit)<10): # Ensure that there is ample data to do a fit (tried 5 and it works just as well)\n continue\n \n n_p = 3\n \n Br_polyfit_c = polyfit(date_fit_norm,Br_fit,n_p)\n Br_polyfit = Br_polyfit_c[0]+Br_polyfit_c[1]*(date_fit_norm)+Br_polyfit_c[2]*(date_fit_norm**2)+Br_polyfit_c[3]*(date_fit_norm**3)\n Br_fit_slope.append(Br_polyfit_c[1])\n Br_c_0.append(Br_polyfit_c[0])\n Br_c_2.append(Br_polyfit_c[2])\n Br_c_3.append(Br_polyfit_c[3])\n \n Btheta_polyfit_c = polyfit(date_fit_norm,Btheta_fit,n_p)\n Btheta_polyfit = Btheta_polyfit_c[0]+Btheta_polyfit_c[1]*(date_fit_norm)+Btheta_polyfit_c[2]*(date_fit_norm**2)+Btheta_polyfit_c[3]*(date_fit_norm**3)\n Btheta_fit_slope.append(Btheta_polyfit_c[1])\n Btheta_c_0.append(Btheta_polyfit_c[0])\n Btheta_c_2.append(Btheta_polyfit_c[2])\n Btheta_c_3.append(Btheta_polyfit_c[3])\n \n Bphi_polyfit_c = polyfit(date_fit_norm,Bphi_fit,n_p)\n Bphi_polyfit = Bphi_polyfit_c[0]+Bphi_polyfit_c[1]*(date_fit_norm)+Bphi_polyfit_c[2]*(date_fit_norm**2)+Bphi_polyfit_c[3]*(date_fit_norm**3)\n Bphi_fit_slope.append(Bphi_polyfit_c[1])\n Bphi_c_0.append(Bphi_polyfit_c[0])\n Bphi_c_2.append(Bphi_polyfit_c[2])\n Bphi_c_3.append(Bphi_polyfit_c[3])\n \n # Mid point for the fit \n mid_fit_t = start_d+relativedelta(months=+9)\n t_slope = get_normalized_t(mid_fit_t)\n \n if t_slope <= 1.:\n t_slope_a.append(t_slope) \n \n # Plot the fits\n figure(1)\n plt.plot(date_fit, Br_polyfit, '-')\n plt.savefig('TimeSeriesPlots/FilteredBrDataTimeSeries_fit_lat_%d_long_%d.png'%(lat_oc,long_oc), dpi=400)\n \n figure(2)\n plt.plot(date_fit, Btheta_polyfit, '-')\n plt.savefig('TimeSeriesPlots/FilteredBthetaDataTimeSeries_fit_lat_%d_long_%d.png'%(lat_oc,long_oc), dpi=400)\n \n figure(3)\n plt.plot(date_fit, Bphi_polyfit, '-')\n plt.savefig('TimeSeriesPlots/FilteredBphiDataTimeSeries_fit_lat_%d_long_%d.png'%(lat_oc,long_oc), dpi=400)\n \n if (move_d > date_end_f):\n break\n \n # Plot the vatriation of the linear term\n \n Br_fit_slope = np.asarray(Br_fit_slope)\n Btheta_fit_slope = np.asarray(Btheta_fit_slope)\n Bphi_fit_slope = np.asarray(Bphi_fit_slope)\n t_slope_a = np.asarray(t_slope_a)\n \n Br_c_0 = np.asarray(Br_c_0)\n Br_c_2 = np.asarray(Br_c_2)\n Br_c_3 = np.asarray(Br_c_3)\n Btheta_c_0 = np.asarray(Btheta_c_0)\n Btheta_c_2 = np.asarray(Btheta_c_2)\n Btheta_c_3 = np.asarray(Btheta_c_3)\n Bphi_c_0 = np.asarray(Bphi_c_0)\n Bphi_c_2 = np.asarray(Bphi_c_2)\n Bphi_c_3 = np.asarray(Bphi_c_3)\n \n # Reduce noise by finding the average of the quartiles \n \n Br_mean_seg_a = []\n Br_slope_mean_seg_a = []\n Btheta_mean_seg_a = []\n Btheta_slope_mean_seg_a = []\n Bphi_mean_seg_a = []\n Bphi_slope_mean_seg_a = []\n \n for i in range(len(t_slope_a)):\n \n t_inq_1 = t_slope_a[i] # normalized between 0 & 1 for entire data range\n \n idx = where( (t_start_a < t_inq_1) & (t_end_a > t_inq_1) )[0]\n \n \n Br_fit_t = []\n Br_slope_t = []\n Btheta_fit_t = []\n Btheta_slope_t = []\n Bphi_fit_t = []\n Bphi_slope_t = []\n \n for k in idx:#t_up_lim, t_low_lim):\n \n t_inq = (t_inq_1 - t_start_a[k])/(t_end_a[k] - t_start_a[k])\n \n Br_c_0_t = Br_c_0[k]\n Br_c_1_t = Br_fit_slope[k]\n Br_c_2_t = Br_c_2[k]\n Br_c_3_t = Br_c_3[k]\n Br_t = Br_c_0_t+Br_c_1_t*(t_inq)+Br_c_2_t*(t_inq**2)+Br_c_3_t*(t_inq**3)\n \n Br_slope_t.append(Br_c_1_t)\n Br_fit_t.append(Br_t)\n \n Btheta_c_0_t = Btheta_c_0[k]\n Btheta_c_1_t = Btheta_fit_slope[k]\n Btheta_c_2_t = Btheta_c_2[k]\n Btheta_c_3_t = Btheta_c_3[k]\n Btheta_t = Btheta_c_0_t+Btheta_c_1_t*(t_inq)+Btheta_c_2_t*(t_inq**2)+Btheta_c_3_t*(t_inq**3)\n \n Btheta_slope_t.append(Btheta_c_1_t)\n Btheta_fit_t.append(Btheta_t)\n \n Bphi_c_0_t = Bphi_c_0[k]\n Bphi_c_1_t = Bphi_fit_slope[k]\n Bphi_c_2_t = Bphi_c_2[k]\n Bphi_c_3_t = Bphi_c_3[k]\n Bphi_t = Bphi_c_0_t+Bphi_c_1_t*(t_inq)+Bphi_c_2_t*(t_inq**2)+Bphi_c_3_t*(t_inq**3)\n \n Bphi_slope_t.append(Bphi_c_1_t)\n Bphi_fit_t.append(Bphi_t)\n \n try:\n Br_q1 = np.percentile(Br_fit_t, 25)\n Br_q3 = np.percentile(Br_fit_t, 75)\n Br_fit_t = np.sort(Br_fit_t).tolist()\n Br_q1 = nearest(Br_fit_t, Br_q1)\n Br_q3 = nearest(Br_fit_t, Br_q3)\n except:\n break\n \n idx_q1_r = Br_fit_t.index(Br_q1)\n idx_q3_r = Br_fit_t.index(Br_q3)\n \n Br_mean_seg = mean(Br_fit_t[idx_q1_r:idx_q3_r])\n Br_mean_seg_a.append(Br_mean_seg)\n \n Br_slope_q1 = np.percentile(Br_slope_t, 25)\n Br_slope_q3 = np.percentile(Br_slope_t, 75)\n Br_slope_t = np.sort(Br_slope_t).tolist()\n Br_slope_q1 = nearest(Br_slope_t, Br_slope_q1)\n Br_slope_q3 = nearest(Br_slope_t, Br_slope_q3)\n \n idx_q1_r_slope = Br_slope_t.index(Br_slope_q1)\n idx_q3_r_slope = Br_slope_t.index(Br_slope_q3)\n \n Br_slope_mean_seg = mean(Br_slope_t[idx_q1_r_slope:idx_q3_r_slope])\n Br_slope_mean_seg_a.append(Br_slope_mean_seg)\n \n Btheta_q1 = np.percentile(Btheta_fit_t, 25)\n Btheta_q3 = np.percentile(Btheta_fit_t, 75)\n Btheta_fit_t = np.sort(Btheta_fit_t).tolist()\n Btheta_q1 = nearest(Btheta_fit_t, Btheta_q1)\n Btheta_q3 = nearest(Btheta_fit_t, Btheta_q3)\n \n idx_q1_theta = Btheta_fit_t.index(Btheta_q1)\n idx_q3_theta = Btheta_fit_t.index(Btheta_q3)\n \n Btheta_mean_seg = mean(Btheta_fit_t[idx_q1_theta:idx_q3_theta])\n Btheta_mean_seg_a.append(Btheta_mean_seg)\n \n Btheta_slope_q1 = np.percentile(Btheta_slope_t, 25)\n Btheta_slope_q3 = np.percentile(Btheta_slope_t, 75)\n Btheta_slope_t = np.sort(Btheta_slope_t).tolist()\n Btheta_slope_q1 = nearest(Btheta_slope_t, Btheta_slope_q1)\n Btheta_slope_q3 = nearest(Btheta_slope_t, Btheta_slope_q3)\n \n idx_q1_theta_slope = Btheta_slope_t.index(Btheta_slope_q1)\n idx_q3_theta_slope = Btheta_slope_t.index(Btheta_slope_q3)\n \n Btheta_slope_mean_seg = mean(Btheta_slope_t[idx_q1_theta_slope:idx_q3_theta_slope])\n Btheta_slope_mean_seg_a.append(Btheta_slope_mean_seg)\n \n Bphi_q1 = np.percentile(Bphi_fit_t, 25)\n Bphi_q3 = np.percentile(Bphi_fit_t, 75)\n Bphi_fit_t = np.array(Bphi_fit_t)\n Bphi_mean_seg = mean(Bphi_fit_t[(Bphi_fit_t > Bphi_q1) & (Bphi_fit_t < Bphi_q3)])\n Bphi_mean_seg_a.append(Bphi_mean_seg)\n \n Bphi_slope_q1 = np.percentile(Bphi_slope_t, 25)\n Bphi_slope_q3 = np.percentile(Bphi_slope_t, 75)\n Bphi_slope_t = np.sort(Bphi_slope_t).tolist()\n Bphi_slope_q1 = nearest(Bphi_slope_t, Bphi_slope_q1)\n Bphi_slope_q3 = nearest(Bphi_slope_t, Bphi_slope_q3)\n \n idx_q1_phi_slope = Bphi_slope_t.index(Bphi_slope_q1)\n idx_q3_phi_slope = Bphi_slope_t.index(Bphi_slope_q3)\n \n Bphi_slope_mean_seg = mean(Bphi_slope_t[idx_q1_phi_slope:idx_q3_phi_slope])\n Bphi_slope_mean_seg_a.append(Bphi_slope_mean_seg)\n \n Br_mean_seg_a = np.asarray(Br_mean_seg_a)\n Btheta_mean_seg_a = np.asarray(Btheta_mean_seg_a)\n Bphi_mean_seg_a = np.asarray(Bphi_mean_seg_a)\n Br_slope_mean_seg_a = np.asarray(Br_slope_mean_seg_a)\n Btheta_slope_mean_seg_a = np.asarray(Btheta_slope_mean_seg_a)\n Bphi_slope_mean_seg_a = np.asarray(Bphi_slope_mean_seg_a)\n \n figure(4)\n plt.plot(t_slope_a, Br_mean_seg_a,'o-')\n xlabel('Time')\n ylabel(r\"$B_{r}$, nT\")\n xticks(year_norm, ['2010','2011','2012','2013','2014','2015','2016'])\n plt.title(r'$B_{r}$ From Polynomial Fit Segments for latitude %d & longitude %d'%(lat_oc,long_oc))\n plt.savefig('TimeSeriesPlots/Br_fit_seg_lat_%d_long_%d.png'%(lat_oc,long_oc), dpi=400)\n \n figure(5)\n plt.plot(t_slope_a, Btheta_mean_seg_a,'o-')\n xlabel('Time')\n ylabel(r\"$B_{\\theta}$, nT\")\n xticks(year_norm, ['2010','2011','2012','2013','2014','2015','2016'])\n plt.title(r'$B_{\\theta}$ From Polynomial Fit Segments for latitude %d & longitude %d'%(lat_oc,long_oc))\n plt.savefig('TimeSeriesPlots/Btheta_fit_seg_lat_%d_long_%d.png'%(lat_oc,long_oc), dpi=400)\n \n figure(6)\n plt.plot(t_slope_a, Bphi_mean_seg_a,'o-')\n xlabel('Time')\n ylabel(r\"$B_{\\phi}$, nT\")\n xticks(year_norm, ['2010','2011','2012','2013','2014','2015','2016'])\n plt.title(r'$B_{\\phi}$ From Polynomial Fit Segments for latitude %d & longitude %d'%(lat_oc,long_oc))\n plt.savefig('TimeSeriesPlots/Bphi_fit_seg_lat_%d_long_%d.png'%(lat_oc,long_oc), dpi=400)\n \n t_slope_a_6 = t_slope_a*5.89 # 5.89 years from start to end point of data.\n year_norm_6 = [-0.00023242300987797793*6, 0.1694363742010459*6, 0.3391051714119698*6, 0.5092388146426496*6, 0.6789076118535735*6, 0.8485764090644974*6, 1.0182452062754213*6]\t \n\t\n figure(7)\n plt.plot(t_slope_a_6, gradient(Br_mean_seg_a,t_slope_a_6),'o-')\n xlabel('Time')\n ylabel(r\"$B_{r}$, nT/yr\")\n xticks(year_norm_6, ['2010','2011','2012','2013','2014','2015','2016'])\n plt.ylim(-600,350)\n plt.title(r'$B_{r}$ slope for latitude %d & longitude %d'%(lat_oc,long_oc))\n plt.savefig('TimeSeriesPlots/Br_grad_lat_%d_long_%d_norm.png'%(lat_oc,long_oc), dpi=400)\n \n \n figure(8)\n plt.plot(t_slope_a_6, gradient(Btheta_mean_seg_a,t_slope_a_6),'o-')\n xlabel('Time')\n ylabel(r\"$B_{\\theta}$, nT/yr\")\n xticks(year_norm_6, ['2010','2011','2012','2013','2014','2015','2016'])\n plt.ylim(-250,340)\n plt.title(r'$B_{\\theta}$ slope for latitude %d & longitude %d'%(lat_oc,long_oc))\n plt.savefig('TimeSeriesPlots/Btheta_grad_lat_%d_long_%d_norm.png'%(lat_oc,long_oc), dpi=400)\n \n \n figure(9)\n plt.plot(t_slope_a_6, gradient(Bphi_mean_seg_a,t_slope_a_6),'o-')\n xlabel('Time')\n ylabel(r\"$B_{\\phi}$, nT/yr\")\n xticks(year_norm_6, ['2010','2011','2012','2013','2014','2015','2016'])\n plt.ylim(-200,275)\n plt.title(r'$B_{\\phi}$ slope for latitude %d & longitude %d'%(lat_oc,long_oc))\n plt.savefig('TimeSeriesPlots/Bphi_grad_lat_%d_long_%d_norm.png'%(lat_oc,long_oc), dpi=400)\n \n \n Br_grad = gradient(Br_mean_seg_a,t_slope_a_6)\n Btheta_grad = gradient(Btheta_mean_seg_a,t_slope_a_6)\n Bphi_grad = gradient(Bphi_mean_seg_a,t_slope_a_6)\n\n savemat('grad_timeseries_b_%d_%d.mat' %(lat_oc, long_oc), {'Br_grad':Br_grad, 'Btheta_grad':Btheta_grad, 'Bphi_grad':Bphi_grad,'time':t_slope_a}) \n\n plt.close('all')\n plt.close('all')\n\n","repo_name":"reguang/IridiumGeomagJerk","sub_path":"IridiumPythonScripts/AccioSV.py","file_name":"AccioSV.py","file_ext":"py","file_size_in_byte":19465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"48"} +{"seq_id":"71996009106","text":"# -*- coding: UTF-8 -*-\r\nfrom prompt_toolkit.formatted_text import ANSI\r\n\r\nfrom sploitkit import *\r\n\r\n\r\n# ----------------------------- SUBCONSOLE DEFINITION --------------------------\r\nclass ModuleConsole(Console):\r\n \"\"\" Module subconsole definition. \"\"\"\r\n level = \"module\"\r\n message = [\r\n ('class:prompt', \" \"),\r\n ('class:prompt', None),\r\n ('class:prompt', \"(\"),\r\n ('class:module', None),\r\n ('class:prompt', \")\"),\r\n ]\r\n style = {\r\n 'prompt': \"#eeeeee\",\r\n 'module': \"#ff0000\",\r\n }\r\n \r\n def __init__(self, parent, module):\r\n self.attach(module, True)\r\n self.logname = module.fullpath\r\n self.message[1] = ('class:prompt', self.module.category)\r\n self.message[3] = ('class:module', self.module.base)\r\n self.opt_prefix = \"Module\"\r\n super(ModuleConsole, self).__init__(parent)\r\n\r\n\r\n# ---------------------------- GENERAL-PURPOSE COMMANDS ------------------------\r\nclass Use(Command):\r\n \"\"\" Select a module \"\"\"\r\n except_levels = [\"session\"]\r\n \r\n def complete_values(self):\r\n return Module.get_list()\r\n \r\n def run(self, module):\r\n new_mod, old_mod = Module.get_modules(module), self.module\r\n # avoid starting a new subconsole for the same module\r\n if old_mod is not None and old_mod.fullpath == new_mod.fullpath:\r\n return\r\n ModuleConsole(self.console, new_mod).start()\r\n\r\n\r\n# ----------------------------- MODULE-LEVEL COMMANDS --------------------------\r\nclass ModuleCommand(Command):\r\n \"\"\" Proxy class (for setting the level attribute). \"\"\"\r\n level = \"module\"\r\n\r\n\r\nclass Run(ModuleCommand):\r\n \"\"\" Run module \"\"\"\r\n def run(self):\r\n if self.module.check():\r\n self.module._instance.run()\r\n\r\n\r\nclass Show(ModuleCommand):\r\n \"\"\" Show module-relevant information or options \"\"\"\r\n keys = [\"info\", \"options\"]\r\n \r\n def complete_values(self, key):\r\n if key == \"options\":\r\n return list(self.config.keys())\r\n elif key == \"issues\":\r\n l = []\r\n for attr in [\"console\", \"module\"]:\r\n for _, __, errors in getattr(self, attr).issues(self.cname):\r\n l.extend(list(errors.keys()))\r\n return l\r\n \r\n def run(self, key, value=None):\r\n if key == \"options\":\r\n if value is None:\r\n print_formatted_text(ANSI(str(self.config)))\r\n else:\r\n c = Config()\r\n c[self.config.option(value), True] = self.config[value]\r\n print_formatted_text(ANSI(str(c)))\r\n elif key == \"info\":\r\n i = self.console.module.get_info((\"fullpath|path\", \"description\"), (\"author\", \"email\", \"version\"),\r\n (\"comments\",), (\"options\",), show_all=True)\r\n if len(i.strip()) != \"\":\r\n print_formatted_text(i)\r\n elif key == \"issues\":\r\n t = Entity.get_issues()\r\n if len(t) > 0:\r\n print_formatted_text(t)\r\n \r\n def set_keys(self):\r\n if self.module and self.module.has_issues(self.cname):\r\n self.keys += [\"issues\"]\r\n else:\r\n while \"issues\" in self.keys:\r\n self.keys.remove(\"issues\")\r\n\r\n","repo_name":"dhondta/python-sploitkit","sub_path":"src/sploitkit/base/commands/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":3302,"program_lang":"python","lang":"en","doc_type":"code","stars":223,"dataset":"github-code","pt":"48"} +{"seq_id":"73160957905","text":"import tensorflow as tf \n\nFLAGS = tf.flags.FLAGS\n\ntf.flags.DEFINE_integer('num_gpus', 2,\n 'Number of GPUs available.')\ntf.flags.DEFINE_integer('total_batch_size', 64,\n 'The total batch size for each batch. It will be splitted into num_gpus partitions.')\ntf.flags.DEFINE_integer('save_epochs', 25,\n 'How often to save ckpt files.')\ntf.flags.DEFINE_integer('max_epochs', 400,\n 'train, evaluate, ensemble: maximum epochs to run;\\n'\n 'others: number of different examples to viasualize.')\ntf.flags.DEFINE_integer('image_size', 28,\n 'Define the image size for dataset.')\n\ntf.flags.DEFINE_string('mode', 'train',\n 'train: train the model;\\n'\n 'test: test the model;\\n'\n 'evaluate: evaluate the model for both training and testing set using different evaluation metrics;\\n')\ntf.flags.DEFINE_string('adversarial_method', 'Default',\n 'Default: no adversarial training method applied;\\n'\n 'FGSM: fast gradient sign method;\\n'\n 'BIM: basic iterative method;\\n'\n 'LLCM: least-likely class method.')\ntf.flags.DEFINE_string('hparams_override', None,\n '--hparams_override=num_prime_capsules=64,padding=SAME,leaky=true,remake=false')\ntf.flags.DEFINE_string('data_dir', 'dataset',\n 'The data directory')\ntf.flags.DEFINE_string('dataset', 'mnist',\n 'The dataset to use for the experiment.\\n'\n 'mnist, fashion_mnist, svhn, cifar10.')\ntf.flags.DEFINE_string('model', 'caps',\n 'The model to use for the experiment.\\n'\n 'caps, caps_r or cnn.')\ntf.flags.DEFINE_string('summary_dir', './summary',\n 'The directory to write results.')\ntf.flags.DEFINE_string('load_test_path', None, \n 'The (processed) test set file to load.')\n\ntf.flags.DEFINE_float('epsilon', 1,\n 'epsilon for adversarial attacks.')\ntf.flags.DEFINE_integer('iteration_n', 1,\n 'iteration number for iterative procedure.')\n\ndef default_hparams():\n \"\"\"Builds an HParams object with default hperparameters.\"\"\"\n return tf.contrib.training.HParams(\n decay_rate=0.96,\n decay_steps=2000,\n leaky=False,\n learning_rate=0.001,\n loss_type='margin',\n num_prime_capsules=32,\n padding='VALID',\n remake=True,\n routing=3,\n verbose=False)","repo_name":"HAXRD/Adversarial-Attacks-on-CapsNets","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42653779111","text":"from winstealer import *\r\nfrom commons.utils import *\r\nfrom commons.skills import *\r\nfrom commons.items import *\r\nfrom commons.targeting import *\r\nimport json, time, math\r\nimport urllib3, json, urllib, ssl\r\nfrom evade import checkEvade\r\nfrom commons.timer import Timer\r\nimport random\r\nfrom API.summoner import *\r\nfrom commons.targit import *\r\n\r\nwinstealer_script_info = {\r\n \"script\": \"Vex\",\r\n \"author\": \"Vex\",\r\n \"description\": \"Vex\",\r\n \"target_champ\": \"vex\",\r\n}\r\n\r\ncombo_key = 57\r\ncombo_switch_key = 56\r\n\r\nuse_q_in_combo = True\r\nuse_w_in_combo = True\r\nuse_e_in_combo = True\r\nuse_r_in_combo = True\r\n\r\nQECombo = False\r\nEQCombo = False\r\n\r\n\r\nq = {\"Range\": 1200}\r\nq_speed1 = 600\r\nq_speed2 = 3600\r\nw = {\"Range\": 475}\r\ne = {\"Range\": 800}\r\ne_speed = 1300\r\nrmin = {\"Range\": 2000}\r\nrmax = {\"Range\": 3000}\r\nr_speed = 1600\r\n\r\ndef winstealer_load_cfg(cfg):\r\n global combo_key,use_q_in_combo, use_e_in_combo, use_w_in_combo, use_r_in_combo, combo_switch_key\r\n combo_key = cfg.get_int(\"combo_key\", 57)\r\n combo_switch_key = cfg.get_int(\"combo_switch_key\", 56)\r\n use_q_in_combo = cfg.get_bool(\"use_q_in_combo\", True)\r\n use_w_in_combo = cfg.get_bool(\"use_w_in_combo\", True)\r\n use_e_in_combo = cfg.get_bool(\"use_e_in_combo\", True)\r\n use_r_in_combo = cfg.get_bool(\"use_r_in_combo\", True)\r\n\r\ndef winstealer_save_cfg(cfg):\r\n global combo_key,use_q_in_combo, use_e_in_combo, use_w_in_combo, use_r_in_combo, combo_switch_key\r\n cfg.set_int(\"combo_key\", combo_key)\r\n cfg.set_int(\"combo_switch_key\", combo_switch_key)\r\n\r\n cfg.set_bool(\"use_q_in_combo\", use_q_in_combo)\r\n cfg.set_bool(\"use_w_in_combo\", use_w_in_combo)\r\n cfg.set_bool(\"use_e_in_combo\", use_e_in_combo)\r\n cfg.set_bool(\"use_r_in_combo\", use_r_in_combo)\r\n\r\ndef winstealer_draw_settings(game, ui):\r\n global combo_key,use_q_in_combo, use_e_in_combo, use_w_in_combo, use_r_in_combo,combo_switch_key\r\n ui.text(\"[Vex]\")\r\n\r\n combo_key = ui.keyselect(\"Combo key\", combo_key)\r\n \r\n \r\n if ui.treenode(\"[Combo Settings]\"):\r\n ui.text(\" Press [Alt] key to switch between combo modes \")\r\n use_q_in_combo = ui.checkbox(\"Use Q in combo\", use_q_in_combo)\r\n use_w_in_combo = ui.checkbox(\"Use W in combo\", use_w_in_combo)\r\n use_e_in_combo = ui.checkbox(\"Use E in combo\", use_e_in_combo)\r\n use_r_in_combo = ui.checkbox(\"Use R in combo\", use_r_in_combo)\r\n ui.treepop()\r\n\r\nssl._create_default_https_context = ssl._create_unverified_context\r\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\r\ndef getPlayerStats():\r\n response = urllib.request.urlopen(\"https://127.0.0.1:2999/liveclientdata/activeplayer\").read()\r\n stats = json.loads(response)\r\n return stats\r\n\r\ndef r_recast_damage(game, target):\r\n # Calculate raw R damage on target\r\n r_lvl = game.player.R.level\r\n if r_lvl == 0:\r\n return 0\r\n ap = getPlayerStats()[\"championStats\"][\"abilityPower\"]\r\n ad = getPlayerStats()[\"championStats\"][\"attackDamage\"]\r\n min_dmg = [150,250,350]\r\n missing_hp = (target.max_health - target.health)\r\n missing_hp_pct = (missing_hp / target.max_health) * 100\r\n increased_pct = 0.015 * missing_hp_pct\r\n if increased_pct > 1:\r\n increased_pct = 1\r\n r_damage = (1 + increased_pct) * (min_dmg[r_lvl - 1] + 0.50 * ap )\r\n # Reduce damage based on target's magic resist\r\n mr = target.magic_resist\r\n if mr >= 0:\r\n dmg_multiplier = 100 / (100 + mr)\r\n else:\r\n dmg_multiplier = 2 - 100 / (100 - mr)\r\n r_damage *= dmg_multiplier\r\n return r_damage\r\n\r\ndef r_initial_damage(game, target):\r\n # Calculate raw R damage on target\r\n r_lvl = game.player.R.level\r\n if r_lvl == 0:\r\n return 0\r\n ap = getPlayerStats()[\"championStats\"][\"abilityPower\"]\r\n ad = getPlayerStats()[\"championStats\"][\"attackDamage\"]\r\n min_dmg = [75,125,175]\r\n missing_hp = (target.max_health - target.health)\r\n missing_hp_pct = (missing_hp / target.max_health) * 100\r\n increased_pct = 0.015 * missing_hp_pct\r\n if increased_pct > 1:\r\n increased_pct = 1\r\n r1_damage = (1 + increased_pct) * (min_dmg[r_lvl - 1] + 0.20 * ap )\r\n # Reduce damage based on target's magic resist\r\n mr = target.magic_resist\r\n if mr >= 0:\r\n dmg_multiplier = 100 / (100 + mr)\r\n else:\r\n dmg_multiplier = 2 - 100 / (100 - mr)\r\n r1_damage *= dmg_multiplier\r\n return r1_damage\r\n\r\ndef q_damage(game, target):\r\n # Calculate raw Q damage on target\r\n q_lvl = game.player.Q.level\r\n if q_lvl == 0:\r\n return 0\r\n ap = getPlayerStats()[\"championStats\"][\"abilityPower\"]\r\n ad = getPlayerStats()[\"championStats\"][\"attackDamage\"]\r\n min_dmg = [60,110,160,210,260]\r\n missing_hp = (target.max_health - target.health)\r\n missing_hp_pct = (missing_hp / target.max_health) * 100\r\n increased_pct = 0.015 * missing_hp_pct\r\n if increased_pct > 1:\r\n increased_pct = 1\r\n q_damage = (1 + increased_pct) * (min_dmg[q_lvl - 1] + 0.60 * ap )\r\n # Reduce damage based on target's magic resist\r\n mr = target.magic_resist\r\n if mr >= 0:\r\n dmg_multiplier = 100 / (100 + mr)\r\n else:\r\n dmg_multiplier = 2 - 100 / (100 - mr)\r\n q_damage *= dmg_multiplier\r\n return q_damage\r\n\r\ndef w_damage(game, target):\r\n # Calculate raw W damage on target\r\n w_lvl = game.player.W.level\r\n if w_lvl == 0:\r\n return 0\r\n ap = getPlayerStats()[\"championStats\"][\"abilityPower\"]\r\n ad = getPlayerStats()[\"championStats\"][\"attackDamage\"]\r\n min_dmg = [80,120,160,200,240]\r\n missing_hp = (target.max_health - target.health)\r\n missing_hp_pct = (missing_hp / target.max_health) * 100\r\n increased_pct = 0.015 * missing_hp_pct\r\n if increased_pct > 1:\r\n increased_pct = 1\r\n w_damage = (1 + increased_pct) * (min_dmg[w_lvl - 1] + 0.30 * ap )\r\n # Reduce damage based on target's magic resist\r\n mr = target.magic_resist\r\n if mr >= 0:\r\n dmg_multiplier = 100 / (100 + mr)\r\n else:\r\n dmg_multiplier = 2 - 100 / (100 - mr)\r\n w_damage *= dmg_multiplier\r\n return w_damage\r\n\r\ndef e_damage(game, target):\r\n # Calculate raw E damage on target\r\n e_lvl = game.player.E.level\r\n if e_lvl == 0:\r\n return 0\r\n ap = getPlayerStats()[\"championStats\"][\"abilityPower\"]\r\n ad = getPlayerStats()[\"championStats\"][\"attackDamage\"]\r\n min_dmg = [50,70,90,110,130]\r\n min_bonus = [0.40,0.45,0.50,0.55,0.60]\r\n missing_hp = (target.max_health - target.health)\r\n missing_hp_pct = (missing_hp / target.max_health) * 100\r\n increased_pct = 0.015 * missing_hp_pct\r\n if increased_pct > 1:\r\n increased_pct = 1\r\n e_damage = (1 + increased_pct) * (min_dmg[e_lvl - 1] + min_bonus[e_lvl - 1] * ap )\r\n # Reduce damage based on target's magic resist\r\n mr = target.magic_resist\r\n if mr >= 0:\r\n dmg_multiplier = 100 / (100 + mr)\r\n else:\r\n dmg_multiplier = 2 - 100 / (100 - mr)\r\n e_damage *= dmg_multiplier\r\n return e_damage\r\n\r\nclass Fake_target():\r\n def __init__(self, name, pos, gameplay_radius):\r\n self.name = name\r\n self.pos = pos\r\n self.gameplay_radius = gameplay_radius\r\n\r\ndef predict_pos(target, duration):\r\n \"\"\"Predicts the target's new position after a duration\"\"\"\r\n target_direction = target.pos.sub(target.prev_pos).normalize()\r\n # In case the target wasn't moving\r\n if math.isnan(target_direction.x):\r\n target_direction.x = 0.0\r\n if math.isnan(target_direction.y):\r\n target_direction.y = 0.0\r\n if math.isnan(target_direction.z):\r\n target_direction.z = 0.0\r\n if target_direction.x == 0.0 and target_direction.z == 0.0:\r\n return target.pos\r\n # Target movement speed\r\n target_speed = target.movement_speed\r\n # The distance that the target will have traveled after the given duration\r\n distance_to_travel = target_speed * duration\r\n return target.pos.add(target_direction.scale(distance_to_travel))\r\n\r\ndef EQ1Combo(game):\r\n Q = getSkill(game, \"Q\")\r\n E = getSkill(game, \"E\")\r\n before_cpos = game.get_cursor()\r\n\r\n if use_e_in_combo and IsReady(game, E):\r\n target = game.GetBestTarget(\r\n UnitTag.Unit_Champion,\r\n e['Range'],\r\n )\r\n\r\n if ValidTarget(target):\r\n e_travel_time = e['Range'] / e_speed\r\n predicted_pos = predict_pos(target, e_travel_time)\r\n predicted_target = Fake_target(target.name, predicted_pos, target.gameplay_radius)\r\n if game.player.pos.distance(predicted_target.pos) <= e['Range']:\r\n game.move_cursor(game.world_to_screen(predicted_target.pos))\r\n time.sleep(0.01)\r\n E.trigger(False)\r\n time.sleep(0.01)\r\n game.move_cursor(before_cpos)\r\n \r\n if use_q_in_combo and IsReady(game, Q):\r\n target = game.GetBestTarget(\r\n UnitTag.Unit_Champion,\r\n q['Range'],\r\n )\r\n if ValidTarget(target):\r\n q_travel_time = q['Range'] / q_speed2\r\n predicted_pos = predict_pos(target, q_travel_time)\r\n predicted_target = Fake_target(target.name, predicted_pos, target.gameplay_radius)\r\n if game.player.pos.distance(predicted_target.pos) <= q['Range']:\r\n game.move_cursor(game.world_to_screen(predicted_target.pos))\r\n time.sleep(0.01)\r\n Q.trigger(False)\r\n time.sleep(0.01)\r\n game.move_cursor(before_cpos)\r\n\r\ndef QE1Combo(game):\r\n Q = getSkill(game, \"Q\")\r\n E = getSkill(game, \"E\")\r\n before_cpos = game.get_cursor() \r\n if use_q_in_combo and IsReady(game, Q):\r\n target = game.GetBestTarget(\r\n UnitTag.Unit_Champion,\r\n q['Range'],\r\n )\r\n if ValidTarget(target):\r\n q_travel_time = q['Range'] / q_speed2\r\n predicted_pos = predict_pos(target, q_travel_time)\r\n predicted_target = Fake_target(target.name, predicted_pos, target.gameplay_radius)\r\n if game.player.pos.distance(predicted_target.pos) <= q['Range']:\r\n game.move_cursor(game.world_to_screen(predicted_target.pos))\r\n time.sleep(0.01)\r\n Q.trigger(False)\r\n time.sleep(0.01)\r\n game.move_cursor(before_cpos)\r\n \r\n if use_e_in_combo and IsReady(game, E):\r\n target = game.GetBestTarget(\r\n UnitTag.Unit_Champion,\r\n e['Range'],\r\n )\r\n if ValidTarget(target):\r\n e_travel_time = e['Range'] / e_speed\r\n predicted_pos = predict_pos(target, e_travel_time)\r\n predicted_target = Fake_target(target.name, predicted_pos, target.gameplay_radius)\r\n if game.player.pos.distance(predicted_target.pos) <= e['Range']:\r\n game.move_cursor(game.world_to_screen(predicted_target.pos))\r\n time.sleep(0.01)\r\n E.trigger(False)\r\n time.sleep(0.01)\r\n game.move_cursor(before_cpos)\r\n\r\ndef WCombo(game):\r\n W = getSkill(game, \"W\")\r\n before_cpos = game.get_cursor() \r\n if use_w_in_combo and IsReady(game, W):\r\n target = game.GetBestTarget(\r\n UnitTag.Unit_Champion,\r\n w['Range'],\r\n )\r\n if ValidTarget(target):\r\n if game.player.pos.distance(target.pos) <= w['Range']:\r\n W.trigger(False)\r\n\r\ndef RCombo(game):\r\n Q = getSkill(game, \"Q\")\r\n W = getSkill(game, \"W\")\r\n E = getSkill(game, \"E\")\r\n R = getSkill(game, \"R\")\r\n before_cpos = game.get_cursor()\r\n total_damage = 0\r\n target = GetBestTargetsInRange(game)\r\n\r\n if use_r_in_combo and IsReady(game, R):\r\n target = game.GetBestTarget(\r\n UnitTag.Unit_Champion,\r\n rmin[\"Range\"],\r\n )\r\n if ValidTarget(target):\r\n r_travel_time = rmax[\"Range\"] / r_speed\r\n predicted_pos = predict_pos(target, r_travel_time)\r\n predicted_target = Fake_target(target.name, predicted_pos, target.gameplay_radius)\r\n if game.player.pos.distance(predicted_target.pos) <= rmax[\"Range\"] and target.health < r_recast_damage(game, target):\r\n game.move_cursor(game.world_to_screen(predicted_target.pos))\r\n time.sleep(0.01)\r\n R.trigger(False)\r\n time.sleep(0.01)\r\n game.move_cursor(before_cpos)\r\n\r\ndef winstealer_update(game, ui):\r\n self = game.player\r\n global EQCombo\r\n\r\n\r\n\r\n if self.is_alive and self.is_visible :\r\n if game.was_key_pressed(combo_switch_key):\r\n EQCombo = ~EQCombo\r\n if EQCombo:\r\n pos = game.player.pos\r\n game.draw_text(game.world_to_screen(pos).add(Vec2(-15,20)), \"E->Q\", Color.PURPLE)\r\n if game.is_key_down(combo_key):\r\n EQ1Combo(game)\r\n else:\r\n pos = game.player.pos\r\n game.draw_text(game.world_to_screen(pos).add(Vec2(-15,20)), \"Q->E\", Color.PURPLE)\r\n if game.is_key_down(combo_key):\r\n QE1Combo(game)\r\n if use_w_in_combo and game.is_key_down(combo_key):\r\n WCombo(game)\r\n if use_r_in_combo and game.is_key_down(combo_key):\r\n RCombo(game)\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n \r\n\r\n","repo_name":"chinesedoinb/lviewdatabase","sub_path":"3vex.py","file_name":"3vex.py","file_ext":"py","file_size_in_byte":13322,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"48"} +{"seq_id":"31585297837","text":"import torch\nimport random\nimport numpy as np\nimport os\n\ndef set_seed(seed, rank, world_size):\n rng = random.Random(seed)\n seed_per_rank = [rng.randint(0, 2**32-1) for _ in range(world_size)]\n cur_seed = seed_per_rank[rank]\n\n torch.manual_seed(cur_seed)\n torch.cuda.manual_seed(cur_seed)\n torch.cuda.manual_seed_all(cur_seed)\n np.random.seed(cur_seed)\n random.seed(cur_seed)\n #torch.set_deterministic(True)\n torch.backends.cudnn.deterministic = True\n #torch.backends.cudnn.enabled = False\n torch.backends.cudnn.benchmark = False\n os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':4096:8'\n os.environ['PYTHONHASHSEED'] = str(cur_seed)\n","repo_name":"webber2933/iCLIP","sub_path":"iCLIP/utils/random_seed.py","file_name":"random_seed.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"16690305221","text":"from sqlalchemy.ext.compiler import compiles\n\nfrom ..sqlalchemy.ext import SACreateTempTableAs\n\n\nclass CreateTempTableAs(SACreateTempTableAs):\n\n def __init__(self, name, query, expr):\n super(CreateTempTableAs, self).__init__(name, query)\n self.expr = expr\n\n\n@compiles(CreateTempTableAs)\ndef create_temp_table_as(element, compiler, **kw):\n return 'CREATE TEMP TABLE %s WITH(appendonly=true, orientation=heap, bucketnum=1) ' \\\n 'AS %s DISTRIBUTED BY (%s)' % (\n element.name,\n compiler.process(element.query),\n ', '.join(element.expr.schema.names) # we just distribute by all columns\n )\n","repo_name":"aliyun/aliyun-odps-python-sdk","sub_path":"odps/df/backends/seahawks/ext.py","file_name":"ext.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":399,"dataset":"github-code","pt":"48"} +{"seq_id":"44380705164","text":"from openselfsup.utils import build_from_cfg\n\nimport torch\nfrom PIL import Image\nfrom torchvision.transforms import Compose, RandomCrop\nimport torchvision.transforms.functional as TF\n\nfrom .registry import DATASETS, PIPELINES\nfrom .base import BaseDataset\n\n\ndef image_to_patches(img):\n \"\"\"Crop split_per_side x split_per_side patches from input image.\n\n Args:\n img (PIL Image): input image.\n\n Returns:\n list[PIL Image]: A list of cropped patches.\n \"\"\"\n split_per_side = 3 # split of patches per image side\n patch_jitter = 21 # jitter of each patch from each grid\n h, w = img.size\n h_grid = h // split_per_side\n w_grid = w // split_per_side\n h_patch = h_grid - patch_jitter\n w_patch = w_grid - patch_jitter\n assert h_patch > 0 and w_patch > 0\n patches = []\n for i in range(split_per_side):\n for j in range(split_per_side):\n p = TF.crop(img, i * h_grid, j * w_grid, h_grid, w_grid)\n p = RandomCrop((h_patch, w_patch))(p)\n patches.append(p)\n return patches\n\n\n@DATASETS.register_module\nclass RelativeLocDataset(BaseDataset):\n \"\"\"Dataset for relative patch location.\n \"\"\"\n\n def __init__(self, data_source, pipeline, format_pipeline):\n super(RelativeLocDataset, self).__init__(data_source, pipeline)\n format_pipeline = [build_from_cfg(p, PIPELINES) for p in format_pipeline]\n self.format_pipeline = Compose(format_pipeline)\n\n def __getitem__(self, idx):\n img = self.data_source.get_sample(idx)\n assert isinstance(img, Image.Image), \\\n 'The output from the data source must be an Image, got: {}. \\\n Please ensure that the list file does not contain labels.'.format(\n type(img))\n img = self.pipeline(img)\n patches = image_to_patches(img)\n patches = [self.format_pipeline(p) for p in patches]\n perms = []\n # create a list of patch pairs\n [perms.append(torch.cat((patches[i], patches[4]), dim=0)) for i in range(9) if i != 4]\n # create corresponding labels for patch pairs\n patch_labels = torch.LongTensor([0, 1, 2, 3, 4, 5, 6, 7])\n return dict(img=torch.stack(perms), patch_label=patch_labels) # 8(2C)HW, 8\n\n def evaluate(self, scores, keyword, logger=None):\n raise NotImplemented\n","repo_name":"WXinlong/DenseCL","sub_path":"openselfsup/datasets/relative_loc.py","file_name":"relative_loc.py","file_ext":"py","file_size_in_byte":2327,"program_lang":"python","lang":"en","doc_type":"code","stars":523,"dataset":"github-code","pt":"48"} +{"seq_id":"70905027985","text":"# coding:utf-8\nclass Node:\n def __init__(self, value=None, next=None, prev=None):\n self.value, self.next, self.prev = value, next, prev\n\n\nclass Cycle_Double_Linkedlist:\n def __init__(self, maxsize=None):\n self.maxsize = maxsize\n node = Node()\n node.next, node.prev = node, node\n self.root = node\n self.length = 0\n\n def headnode(self):\n return self.root.next\n\n def tailnode(self):\n return self.root.prev\n\n def __len__(self):\n return self.length\n\n def iter_node(self):\n if self.root.next is self.root:\n return\n else:\n curnode = self.headnode()\n while curnode.next is not self.root:\n yield curnode\n curnode = curnode.next\n yield curnode\n\n def iter_node_reverse(self):\n if self.root.next is self.root:\n return\n else:\n curnode = self.tailnode()\n while curnode.prev is not self.root:\n yield curnode\n curnode = curnode.prev\n yield curnode\n\n def __iter__(self):\n for node in self.iter_node():\n yield node.value\n\n def append(self, value): # O(1), 你发现一般不用 for 循环的就是 O(1),有限个步骤\n if self.maxsize is not None and len(self) > self.maxsize:\n raise Exception('full of circle double linkedlist')\n node = Node(value=value)\n tailnode = self.tailnode() or self.root\n tailnode.next = node\n node.prev = tailnode\n node.next = self.root\n self.root.prev = node\n self.length += 1\n\n def appendleft(self, value):\n if self.maxsize is not None and len(self) > self.maxsize:\n raise Exception('full of circle double linkedlist')\n node = Node(value=value)\n if self.root.next is self.root:\n node.next = self.root\n node.prev = self.root\n self.root.next = node\n self.root.prev = node\n else:\n node.next = self.headnode()\n node.prev = self.root\n self.root.next = node\n self.headnode().prev = node\n self.length += 1\n\n def remove(self, node): # O(1),传入node 而不是 value 我们就能实现 O(1) 删除\n if node is self.root:\n return\n else:\n node.prev.next = node.next\n node.next.prev = node.prev\n self.length -= 1\n\n\ndef test_Cycle_double_linkedlist():\n cdll = Cycle_Double_Linkedlist()\n assert len(cdll) == 0\n cdll.append(1)\n cdll.append(2)\n cdll.append(3)\n assert list(cdll) == [1, 2, 3]\n assert [node.value for node in cdll.iter_node()] == [1, 2, 3]\n assert [node.value for node in cdll.iter_node_reverse()] == [3, 2, 1]\n assert cdll.headnode().value == 1\n cdll.remove(cdll.headnode())\n assert len(cdll) == 2\n assert [node.value for node in cdll.iter_node()] == [2, 3]\n cdll.appendleft(0)\n assert [node.value for node in cdll.iter_node()] == [0, 2, 3]\n","repo_name":"articuly/python_study","sub_path":"数据结构与算法/数据结构CSDN_循环双端链表.py","file_name":"数据结构CSDN_循环双端链表.py","file_ext":"py","file_size_in_byte":3015,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"15309740813","text":"# Faça um programa que leia um número e retorne o fatorial !\r\n\r\nnumero = int(input('Digite um número para saber seu fatorial:'))\r\nfatorial = numero - 1\r\n\r\nwhile fatorial != 1:\r\n numero = numero * fatorial\r\n fatorial -= 1\r\n\r\nprint(numero)","repo_name":"isabluiza/senai-python","sub_path":"resolucoes/039.py","file_name":"039.py","file_ext":"py","file_size_in_byte":246,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"42043423348","text":"# https://think.cs.vt.edu/corgis/python/drugs/drugs.html\nimport pandas\nfrom matplotlib import pyplot\ndata = pandas.read_csv('drugs.csv')\nfor x in data.columns: print(x)\n\n\nohio = data.query('State==\"Ohio\"')\n\npyplot.figure()\npyplot.plot(ohio.Year, ohio['Rates_Tobacco_Cigarette Past Month_12-17'])\npyplot.plot(ohio.Year, ohio['Rates_Tobacco_Cigarette Past Month_18-25'])\npyplot.plot(ohio.Year, ohio['Rates_Tobacco_Cigarette Past Month_26+'])\npyplot.legend(['Y','M','O'])\n\n\npyplot.figure()\npyplot.plot(ohio.Year, ohio['Rates_Alcohol_Dependence Past Year_12-17'])\npyplot.plot(ohio.Year, ohio['Rates_Alcohol_Dependence Past Year_18-25'])\npyplot.plot(ohio.Year, ohio['Rates_Alcohol_Dependence Past Year_26+'])\npyplot.legend(['Y','M','O'])\n\n\npyplot.show()\n\nlatest = data.query('Year == 2014')\n\nv1 = latest[\"Rates_Alcohol_Need Treatment Past Year_18-25\"]\nv2 = latest[\"Rates_Alcohol_Perceptions of Risk_18-25\"]\n\npyplot.scatter(v2,v1)\npyplot.show()\n\ndep = \"Rates_Alcohol_Need Treatment Past Year_26+\"\nind = \"Rates_Alcohol_Perceptions of Risk_18-25\"\npyplot.show()","repo_name":"dvanderelst/GradStats","sub_path":"DataScenarios/scenario_Drugs/process_drugs.py","file_name":"process_drugs.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29539676723","text":"'''\nGiven an array A of 0s and 1s, consider N_i: the i-th subarray from A[0] to A[i] interpreted as a binary number (from most-significant-bit to least-significant-bit.)\n\nReturn a list of booleans answer, where answer[i] is true if and only if N_i is divisible by 5.\n\nExample 1:\n\nInput: [0,1,1]\nOutput: [true,false,false]\nExplanation: \nThe input numbers in binary are 0, 01, 011; which are 0, 1, and 3 in base-10. Only the first number is divisible by 5, so answer[0] is true.\nExample 2:\n\nInput: [1,1,1]\nOutput: [false,false,false]\n\nNote:\n\n1 <= A.length <= 30000\nA[i] is 0 or 1\n'''\n\nclass Solution(object):\n def prefixesDivBy5(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: List[bool]\n \"\"\"\n result = []\n if not A:\n return []\n str_bin = ''\n for val in A:\n str_bin += str(val)\n if(int(str_bin, 2)%5 == 0):\n result.append(True)\n else:\n result.append(False)\n return result\n","repo_name":"Garvit244/Leetcode","sub_path":"1000-1100q/1018.py","file_name":"1018.py","file_ext":"py","file_size_in_byte":1007,"program_lang":"python","lang":"en","doc_type":"code","stars":1245,"dataset":"github-code","pt":"48"} +{"seq_id":"5160462270","text":"from colorama import Fore, Back, Style\nfrom os import system, name\nfrom time import sleep\nfrom os import system\nimport global_stuff\nimport object\nimport collisions\n\npower_duration=global_stuff.power_duration\n\nclass powerup(object.object):\n\n def __init__(self,x,y,effect):\n \n self._x=x\n self._y=y\n self._h=1 \n self._w=1\n if effect == 1:\n self._img=[[\"B\"]]\n if effect == 2:\n self._img=[[\"S\"]]\n if effect == 3:\n self._img=[[\"F\"]]\n if effect == 4:\n self._img = [[\"D\"]]\n if effect == 5:\n self._img = [[\"G\"]]\n if effect == 6:\n self._img = [[\"P\"]]\n \n self._xv=0\n self._yv=1\n self.spawned = 0\n self._effect = effect\n self.time = 0\n\n def collected(self, board):\n\n if self._y + 1 > 48:\n self.spawned=0\n self._img=[[\" \"]]\n return\n\n else:\n for i in range(self._yv):\n if int(self._y)+1+i > 49:\n self.spawned=0\n return -1\n\n if board[int(self._y)+1+i][int(self._x)] == \"=\" or board[int(self._y)+1+i][int(self._x)] == \"!\":\n self.remove_onscreen(board)\n self.spawned = 0\n power_duration[self._effect-1]+=150\n if self._effect == 1:\n power_duration[1]=0\n if self._effect == 2:\n power_duration[0]=0\n return self._effect\n\n def move_powerup(self,board):\n\n\n self.remove_onscreen(board)\n fl=collisions.pp_wall(self,board)\n # collisions.pp_brick(self,board)\n\n self._y+=self._yv\n\n self.time+=1\n if self.time==10:\n self._yv+=1\n self.time=0\n if fl==1:\n self._x+=self._xv\n self._y-=self._yv\n if fl==-1:\n \n self.spawned=0\n for i in range(1,49):\n board[48][i]=\" \"\n self.remove_onscreen(board)\n\n return self.collected(board)\n\n self.print_onscreen(board)\n\n return self.collected(board)\n \n def fall(self,board):\n\n self.remove_onscreen(board)\n self._y+=1\n \n","repo_name":"Aardg/BrickBreaker","sub_path":"powerup_master.py","file_name":"powerup_master.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11342135993","text":"import sys\nfrom bitstring import BitArray\nfrom btc_utils import * \n\nHEADERS_FILE = './data/btc_headers'\n\ndef main():\n if len(sys.argv) != 2:\n print('Usage: python get_header.py ')\n exit(0) \n\n block_number = int(sys.argv[1])\n b, block_bytes = get_header(block_number, HEADERS_FILE)\n if b is None: \n return 1 \n\n htime = int.from_bytes(b.timestamp, 'little') \n nbits = int.from_bytes(b.nbits, 'little') \n\n block_hash = get_btc_hash(block_bytes) \n hash248 = block_hash[1:] # 31 bytes\n print('Hash (Hex): %s' % block_hash.hex()) \n print('Hash (Int): %d' % int.from_bytes(block_hash, 'big')) \n print('Hash248 (Int): %d' % int.from_bytes(hash248, 'big')) \n print('Block Number: %d' % b.block_number)\n print('Prev Hash: %s' % b.hash_prev.hex())\n print('TimeS: %d' % htime)\n print('NBits: %d' % nbits)\n \n bits = BitArray(block_bytes)\n bits_str = ''\n for bit in bits.bin: \n bits_str += bit + ' ' \n print(bits_str)\n \n return 0 \n \nif __name__== '__main__':\n main()\n","repo_name":"BromleyLabs/BTCSnarkRelay","sub_path":"test/get_header.py","file_name":"get_header.py","file_ext":"py","file_size_in_byte":1044,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"48"} +{"seq_id":"41511397903","text":"#to be run in python3\n\nimport openmatrix as omx, numpy as np, tables, os, sys, pandas as pd\n\n#get arguments\nmodel_name = sys.argv[1] \noutput_dir = sys.argv[2]\n\nfor period in ['EA', 'AM', 'MD', 'PM', 'EV']:\n \n print(\"Working on setting zone mapping for cv trips_%s.omx\" %(period))\n # check if the old file exists, if so, delete it\n if os.path.exists(output_dir + \"/%s/trips_%s_.omx\" % (model_name, period)):\n os.remove(output_dir + \"/%s/trips_%s_.omx\" % (model_name, period))\n # rename the file\n os.rename(output_dir + \"/%s/trips_%s.omx\" % (model_name, period), output_dir + \"/%s/trips_%s_.omx\" % (model_name, period))\n trip_table_old = omx.open_file(output_dir + \"/%s/trips_%s_.omx\" % (model_name, period), 'r')\n trip_table = omx.open_file(output_dir + \"/%s/trips_%s.omx\" % (model_name, period), 'w')\n\n for core in trip_table_old.list_matrices():\n\n mapping_name = trip_table_old.list_mappings()[0]\n zone_mapping = trip_table_old.mapping(mapping_name)\n zones = list(zone_mapping.keys())\n zones_sorted = sorted(zones)\n pos = [zones.index(zone) for zone in zones_sorted]\n\n data = trip_table_old[core]\n data = data[:][pos,:][:,pos]\n\n trip_table[core] = data\n\n trip_table_old.close()\n trip_table.close()","repo_name":"camsys/SANDAG-ABM","sub_path":"src/asim-cvm/scripts/set_zoneMapping.py","file_name":"set_zoneMapping.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"48"} +{"seq_id":"30918479260","text":"import setuptools\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\nsetuptools.setup(\n name='character_tracker',\n version='0.17',\n packages=setuptools.find_packages(),\n url='https://github.com/llpk79/motw',\n license='MIT',\n author='Paul Kutrich',\n author_email='pkutrich@gmail.com',\n description='Tracker for Monster of the Week characters',\n long_description=long_description,\n include_package_data=True,\n package_data={\n \"character_tracker\": [\"pickle/*.pkl\", \"archetypes/*.json\"]\n }\n)\n","repo_name":"llpk79/motw","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"14484238992","text":"import sqlite3 as lite\nimport sys\nimport os\npath = os.path.dirname(__file__) + \"csdl3.db\"\ncon = lite.connect(path)\n\nwith con:\n sql = con.cursor()\n sql.execute(\n \"CREATE TABLE SINHVIEN(MASV INT, TENSV TEXT,NAMSINH DATE,GIOITINH CHAR, HOCPHI INT)\")\n sql.execute(\n \"INSERT INTO SINHVIEN VALUES(1, 'Nguyen Nhu Y', '26/6/40','Nam', 123456)\")\n sql.execute(\n \"INSERT INTO SINHVIEN VALUES(2, 'Nguyen Thi B', '26/6/30', 'Nu', 1234567)\")\ncon.close()\n\n\n# xoas\n# import sqlite3 as lite\n# import sys\n# import os\n# path = os.path.dirname(__file__) + \"csdl2.db\"\n# con = lite.connect(path)\n\n# with con:\n# sql = con.cursor()\n# sql.execute(\n# \"drop table SINHVIEN\")\n# con.close()\n\n\n# cap nhap\n# import MySQLdb\n# db = MySQLdb.connect(\"localhost\", \"root\", \"123\", \"TESTDB\")\n# cursor = db.cursor()\n\n# # Truy van SQL de UPDATE cac ban ghi\n# sql = \"UPDATE SINHVIEN SET TUOI = TUOI + 1 WHERE GIOITINH = '%c'\" % ('M')\n# try:\n# cursor.execute(sql)\n# # Commit cac thay doi vao trong Database\n# db.commit()\n# except:\n# # Rollback trong tinh huong co bat ky error nao\n# db.rollback()\n# db.close()\n","repo_name":"nhuydev1704/linux","sub_path":"sql/taocsdl.py","file_name":"taocsdl.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22872027382","text":"from core.dag.dag import DAGraph\nfrom visuals.show_by_depth import ShowGraph\n\n\nclass NN(DAGraph, ShowGraph):\n \"\"\" 神经网络\n 由若干起始节点和若干结束节点组成\n\n ATTRIBUTE\n nodes : 神经元结点\n\n METHOD\n forward(*args, **kwargs) : 前向传播 x, parameters --> y\n backward() : 反向传播 grad_upper_layer --> grad_this_layer, grad_params_this_layer\n optimize(lr, algorithm=\"SGD\") : 应用相应的优化算法,更新参数的梯度\n zero_grad() : 将所有结点的 _grad 清零,默认不清除 local_grad\n create_connections(): (初始化调用)将网络中所有有关联的节点创建连接\n \"\"\"\n def __init__(self, leaves, roots, create_connection=True):\n nodes = self.get_subgraph(leaves, roots)\n super(NN, self).__init__(nodes)\n if create_connection:\n self.create_connections()\n\n def forward(self, *args, **kwargs):\n \"\"\" 前向传播\n 调用此函数说明前向传播由此发起\n 因此传入参数必须是完整的\n INPUT\n *args : 按shape匹配到 interface 的相应位置,并传入\n *kwargs : 传入key对应的interface位置\n\n OUTPUT\n outputs : 列表,所有根节点上接收到的输出\n 若只有一个输出,则返回的是输出数组\n \"\"\"\n\n # for x in args:\n # for inp_layer in self.leaves:\n # if inp_layer.match_inp_shape(x.shape):\n # keep_rock_n_rolling(inp_layer, input=x)\n\n for key, val in kwargs.items():\n for inp_layer in self.leaves:\n if key in inp_layer.interface.keys():\n if inp_layer.interface[key] is None:\n keep_rock_n_rolling(inp_layer, input_=val, name=key)\n break\n\n outputs = [root.output for root in self.roots]\n if len(outputs) == 1:\n outputs = outputs[0]\n\n return outputs\n\n def backward(self):\n \"\"\" 反向传播\n 调用跳阶梯度,然后与局部梯度相乘\n \"\"\"\n self.back_flow(grad_accumulator)\n\n def optimize(self, lr, algorithm='SGD'):\n self.for_flow(grad_optimizer, fargs=(lr, 'SGD'))\n\n def zero_grad(self):\n self.for_flow(zero)\n\n def create_connections(self):\n self.for_flow(conn)\n\n\ndef keep_rock_n_rolling(neuron, *, input_, name=None):\n \"\"\"不能用 for_flow 的原因\n 流动的顺序需要控制\n 只有神经元接受了子节点的全部信息后\n 才能继续流动\n 需要多线程?\n control_for_flow(self, fargs, wait_until)\n 未来将会重写这个函数\n \"\"\"\n if name:\n output = neuron(**{name: input_})\n else:\n output = neuron(input_)\n if output is not None:\n for p in neuron.parents.values():\n keep_rock_n_rolling(p, input_=output, name=neuron.name)\n neuron.clear_interface()\n\n\ndef grad_accumulator(neuron):\n if neuron.parents == {}:\n for key, val in neuron.local_grad_.items():\n neuron.grad_[key] = val\n for key, val in neuron.local_grad_params_.items():\n neuron.grad_params_[key] = val\n else:\n for i, (name, p) in enumerate(neuron.parents.items()):\n if i == 0:\n for key, val in neuron.local_grad_.items():\n neuron.grad_[key] = p.grad_[neuron.name] @ val\n for key, val in neuron.local_grad_params_.items():\n neuron.grad_params_[key] = val.T @ p.grad_[neuron.name]\n\n else:\n for key, val in neuron.local_grad_.items():\n neuron.grad_[key] += p.grad_[neuron.name] @ val\n for key, val in neuron.local_grad_params_.items():\n neuron.grad_params_[key] += val.T @ p.grad_[neuron.name]\n neuron.clear_local_grad()\n\n\ndef grad_optimizer(neuron, fargs=(0.01, 'SGD')):\n neuron.optimize(lr=fargs[0], algorithm=fargs[1])\n neuron.clear_grad()\n\n\ndef zero(neuron):\n neuron.clear_grad()\n\n\ndef conn(node):\n for c in node.children.values():\n node.create_connection(c)\n","repo_name":"tcchhlegend/Kafka----------Neurual-Network-Package-of-New-Generation","sub_path":"core/nn/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"16690143821","text":"import sys\n\nimport pytest\n\nfrom ...config import options\nfrom ...tests.core import tn\n\nif sys.version_info[0] >= 3:\n from .storage_api import StorageApiArrowClient\nelse:\n StorageApiArrowClient = None\n\n\n_test_table_id = 0\n\n\n@pytest.fixture\ndef storage_api_client(odps_daily):\n global _test_table_id\n\n options.always_enable_schema = True\n\n test_table_name = tn(\"test_halo_common_table_\" + str(_test_table_id))\n _test_table_id += 1\n odps_daily.delete_table(test_table_name, if_exists=True)\n table = odps_daily.create_table(\n test_table_name,\n (\"a BIGINT, b BIGINT, c BIGINT, d BIGINT\", \"pt string\"),\n if_not_exists=True,\n )\n try:\n yield StorageApiArrowClient(odps_daily, table)\n finally:\n table.drop(async_=True)\n options.always_enable_schema = False","repo_name":"aliyun/aliyun-odps-python-sdk","sub_path":"odps/apis/storage_api/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":399,"dataset":"github-code","pt":"48"} +{"seq_id":"1401773551","text":"from queue import Queue\nimport logging\n\nfrom utils import *\n\n\ndef ddmin(c, n, test):\n \"\"\"\n The original delta debugging algorithm\n\n :param c: Current input\n :param n: Current granularity\n :param test: Test function used to determine if a particular input leads to a fault, a return value of True\n indicates a failure\n :return: Minimal subset of c leading to a failure\n \"\"\"\n\n logger = logging.getLogger('network-testing')\n logger.info(f'\\nRunning minimization on set {str_repr(c)} with granularity {n}\\n')\n\n if not n <= len(c):\n return c\n\n split = [c[i * (len(c) // n) + min(i, len(c) % n):(i + 1) * (len(c) // n) + min(i + 1, len(c) % n)] for i in\n range(n)]\n\n comps = [[i for i in c if i not in delta] for delta in split]\n\n for delta in split:\n if test(delta):\n return ddmin(delta, 2, test)\n\n for comp in comps:\n if test(comp):\n return ddmin(comp, max(n - 1, 2), test)\n\n if n < len(c):\n return ddmin(c, min(len(c), 2 * n), test)\n else:\n return c\n\n\ndef ddmin_iter(c, test_f, test_tc, f_to_tc):\n \"\"\"\n Iterative delta debugging algorithm\n :param c: Starting input\n :param test_f: Higher level test function\n :param test_tc: Lower level test function\n :param f_to_tc: Function to convert from high to low level\n :return: A list of minimal failure-inducing subsets for c\n \"\"\"\n logger = logging.getLogger('network-testing')\n\n min_sets = []\n q = Queue()\n q.put(c)\n\n while not q.empty():\n h = q.get()\n faulty_feature_set = False\n for ms in min_sets:\n if all([f in f_to_tc(h) for f in ms]):\n for f in ms:\n q.put([x for x in h if x != f])\n faulty_feature_set = True\n break\n if faulty_feature_set:\n continue\n\n if test_f(h):\n min_features = ddmin(h, 2, test_f)\n min_set = ddmin(f_to_tc(min_features), 2, test_tc)\n logger.info(f'New minimal subset: {str_repr(min_set)}\\n')\n min_sets.append(min_set)\n\n for f in min_features:\n q.put([x for x in h if x != f])\n\n return min_sets\n","repo_name":"nsg-ethz/Metha","sub_path":"delta_debugging.py","file_name":"delta_debugging.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"48"} +{"seq_id":"18262561736","text":"#balance = 3329\n#annualInterestRate = 0.2\n\ndyn_balance = balance\nmonthlyInterestRate = annualInterestRate / 12\nlowerBound = balance / 12\nupperBound = (balance * (1+monthlyInterestRate)**12)/12\nfixedPayment = 0\n\nwhile(abs(dyn_balance) > 0.01):\n dyn_balance = balance\n fixedPayment = 0.5*(lowerBound + upperBound)\n\n for i in range(1, 13):\n unpaidBalance = dyn_balance - fixedPayment\n interest = monthlyInterestRate * unpaidBalance\n dyn_balance = unpaidBalance + interest\n if (dyn_balance > 0):\n lowerBound = fixedPayment\n elif (dyn_balance < 0):\n upperBound = fixedPayment\n\nprint(\"Lowest Payment:\", round(fixedPayment, 2)) \n","repo_name":"BiplabG/edx-Python-course-MIT-6001","sub_path":"week 2/problem set 2/problem-3.py","file_name":"problem-3.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"70725973265","text":"# imports\nfrom statsmodels.distributions.empirical_distribution import ECDF\nfrom statsmodels.tsa.ar_model import AutoReg\nfrom scipy.stats import pearsonr\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport yfinance as yf\nimport pandas as pd\nimport numpy as np\n\nplt.style.use('ggplot')\n\n\nclass ARGG:\n __slots__ = ('prices', 'returns', 'error', 'slope', 'intercept', 'mu', 'std', 'beta', 'loc', 'scale')\n\n def __init__(self, prices):\n # store original data\n self.prices = prices.to_numpy()\n self.returns = prices.pct_change(1).to_numpy()[1:]\n\n # model returns using an AR-1\n intercept, slope = AutoReg(self.returns, 1).fit().params\n self.intercept = intercept\n self.slope = slope\n\n # calculate error term\n error = (self.intercept + (self.slope * self.returns[:-1])) - self.returns[1:]\n self.error = error\n\n # calculate typical statistics for error term\n self.mu = self.error.mean()\n self.std = self.error.std()\n\n # model error term using a generalized gaussian\n beta, loc, scale = stats.gennorm.fit(self.error, method='MM')\n self.beta = beta\n self.loc = loc\n self.scale = scale\n\n def error_cdf(self, x):\n return stats.gennorm.cdf(x, self.beta, self.loc, self.scale)\n\n def error_pdf(self, x):\n return stats.gennorm.pdf(x, self.beta, self.loc, self.scale)\n\n def inverse_error_pdf(self, x):\n return stats.gennorm.ppf(x, self.beta, self.loc, self.scale)\n\n def gen_error(self, n):\n return stats.gennorm.rvs(self.beta, self.loc, self.scale, size=n)\n\n def gen_walk(self, n):\n walk_arr = [self.returns[-1]]\n while len(walk_arr) < n + 1:\n new_return = float((self.intercept + (self.slope * walk_arr[-1])) + self.gen_error(1))\n walk_arr.append(new_return)\n return np.array(walk_arr[1:])\n\n def error_conf(self, conf):\n return stats.gennorm.interval(conf, self.beta, self.loc, self.scale)\n\n def __str__(self):\n string = 'Distribution Details:\\n'\n string += f'Intercept: {self.intercept:.3f}\\n'\n string += f'Lag Weight: {self.slope:.3f}\\n'\n string += f'Error Mu: {self.mu:.3f}\\n'\n string += f'Error STD: {self.std:.3f}\\n'\n string += f'Error Loc: {self.loc:.3f}\\n'\n string += f'Error Scale: {self.scale:.3f}\\n'\n string += f'Error Beta: {self.beta:.3f}'\n return string\n\n\n# load in stock price data\ntickers = 'AAPL'\nstart = '2018-01-06'\nend = '2023-01-06'\n\n# fetch stock data\nprices = yf.download(tickers, start=start, end=end, group_by='column', progress=True, threads=True)['Adj Close']\nprices.fillna(method='ffill', inplace=True)\nprices.dropna(axis=0, inplace=True)\nreturns = prices.pct_change(1)[1:].values\n\n# check the first lag auto-correlation\nplt.scatter(returns[0:-1], returns[1:], alpha=0.25)\nplt.title(\"Apple Daily Returns vs. Apple 1-Lag Daily Returns\")\nplt.xlabel(\"Apple 1-Lag Daily Returns (%)\")\nplt.ylabel(\"Apple Daily Returns (%)\")\nlagged_corr = pearsonr(returns[1:], returns[0:-1])\nprint(f'Lagged Correlation is {lagged_corr} Explaining {lagged_corr[0] ** 2 * 100 :.2f}% Of Tomorrow\\'s Variance')\n\n# investigate the auto-correlation effects\nfrom statsmodels.graphics.tsaplots import plot_pacf\nplot_pacf(returns, alpha=0.05, zero=False, method='ywm')\nplt.figure()\n\n# model the stock returns as an ARGG\nmodel = ARGG(prices)\n\n# plot the different distributions to visualize their fits\nx = np.linspace(min(model.error), max(model.error), 101)\nplt.style.use('ggplot')\nplt.title('Comparing Fitted CDFs')\nplt.plot(ECDF(model.error).x, ECDF(model.error).y, label='Empirical CDF')\nplt.plot(x, stats.norm.cdf(x, loc=model.mu, scale=model.std), label='Normal Gaussian CDF')\nplt.plot(x, model.error_cdf(x), label='Generalized Gaussian CDF')\nplt.ylabel('Cumulative Probability (%)')\nplt.xlabel('Daily Return (%)')\nplt.legend()\nplt.figure()\n\nplt.title('Comparing Fitted PDFs')\nplt.hist(model.error, density=True, bins=50, label='Empirical PDF')\nplt.plot(x, stats.norm.pdf(x, loc=model.mu, scale=model.std), label='Normal Gaussian PDF')\nplt.plot(x, model.error_pdf(x), label='Generalized Gaussian PDF')\nplt.ylabel('Probability Density')\nplt.xlabel('Daily Return (%)')\nplt.legend()\nplt.figure()\n\n\ndef simulate(stock_dist, n_days, n_times, plot=None):\n\n final_arr = []\n for i in range(n_times):\n rand_walk = stock_dist.prices[-1] * np.cumprod(1 + stock_dist.gen_walk(n_days))\n rand_walk = np.insert(rand_walk, 0, stock_dist.prices[-1])\n\n if plot is not None:\n if i % plot == 0 or i == 0:\n plt.plot(rand_walk, c='dimgray')\n else:\n pass\n\n final_arr.append(rand_walk[-1])\n\n if plot is not None:\n plt.title(\"Autocorrelated Hypothetical Stock Price Paths\")\n plt.xlabel('Number Of Trading Days (#)')\n plt.ylabel('Stock Price ($)')\n\n return final_arr\n\n\nprojection_dist = simulate(stock_dist=model, n_days=21, n_times=10_000, plot=100)\nlower = np.percentile(projection_dist, 0.5)\nmid = np.percentile(projection_dist, 50)\nupper = np.percentile(projection_dist, 99.5)\nprint(f'1st Percentile Price Projection: {lower:.2f} ({((lower / model.prices[-1]) - 1) * 100:.2f}%)')\nprint(f'50th Percentile Price Projection: {mid:.2f} ({((mid / model.prices[-1]) - 1) * 100:.2f}%)')\nprint(f'99th Percentile Price Projection: {upper:.2f} ({((upper / model.prices[-1]) - 1) * 100:.2f}%)')\nplt.show()\n","repo_name":"ColbySP/RiskManagement","sub_path":"NON_independent_return_model.py","file_name":"NON_independent_return_model.py","file_ext":"py","file_size_in_byte":5476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72269425745","text":"import dgl\nimport torch\nfrom torch.utils.data import IterableDataset\n\n\ndef compact_and_copy(frontier, seeds):\n block = dgl.to_block(g=frontier, dst_nodes=seeds)\n for col, data in frontier.edata.items():\n if col == dgl.EID:\n continue\n block.edata[col] = data[block.edata[dgl.EID]]\n return block\n\n\ndef assign_simple_node_features(ndata, g, ntype, assign_id=False):\n for col in g.nodes[ntype].data.keys():\n if not assign_id and col == dgl.NID:\n continue\n induced_nodes = ndata[dgl.NID]\n ndata[col] = g.nodes[ntype].data[col][induced_nodes]\n\n\ndef assign_features_to_blocks(blocks, g, ntype):\n assign_simple_node_features(blocks[0].srcdata, g, ntype)\n assign_simple_node_features(blocks[-1].dstdata, g, ntype)\n\n\ndef sample_context_blocks(g, blocks, context_dicts, ntype, ctype):\n context_blocks = []\n for block, context_dict in zip(blocks, context_dicts):\n context_ids = []\n inner_to_global_id = {}\n dom_context_dict, pair_context_dict = context_dict\n for inner, glob in zip(block.nodes(ntype).tolist(), block.ndata[dgl.NID][ntype].tolist()):\n inner_to_global_id[inner] = glob\n for src, dst in zip(block.edges()[0].tolist(), block.edges()[1].tolist()):\n if (inner_to_global_id[src], inner_to_global_id[dst]) in pair_context_dict:\n context_ids.append(pair_context_dict[(inner_to_global_id[src], inner_to_global_id[dst])])\n elif (inner_to_global_id[dst], inner_to_global_id[src]) in pair_context_dict:\n context_ids.append(pair_context_dict[(inner_to_global_id[dst], inner_to_global_id[src])])\n elif inner_to_global_id[dst] in dom_context_dict:\n context_ids.append(dom_context_dict[inner_to_global_id[dst]])\n else:\n eids = block.edge_ids(src, dst)\n block.remove_edges(eids)\n sample_graph = g.subgraph({ctype: context_ids})\n context_blocks.append(sample_graph.nodes[ctype].data)\n return context_blocks\n\n\nclass ItemToItemBatchSampler(IterableDataset):\n def __init__(self, g, user_type, item_type, batch_size):\n self.g = g\n self.user_type = user_type\n self.item_type = item_type\n self.user_to_item_etype = list(g.metagraph()[user_type][item_type])[0]\n self.item_to_user_etype = list(g.metagraph()[item_type][user_type])[0]\n self.batch_size = batch_size\n\n def __iter__(self):\n while True:\n heads = torch.randint(0, self.g.number_of_nodes(self.item_type), (self.batch_size,))\n result = dgl.sampling.random_walk(\n self.g,\n heads,\n metapath=[self.item_to_user_etype, self.user_to_item_etype])\n tails = result[0][:, 2]\n neg_tails = torch.randint(0, self.g.number_of_nodes(self.item_type), (self.batch_size,))\n mask = (tails != -1)\n yield heads[mask], tails[mask], neg_tails[mask]\n\n\nclass NeighborSampler(object):\n def __init__(self, g, user_type, item_type, random_walk_length, random_walk_restart_prob,\n num_random_walks, num_neighbors, num_layers):\n self.g = g\n self.user_type = user_type\n self.item_type = item_type\n self.user_to_item_etype = list(g.metagraph()[user_type][item_type])[0]\n self.item_to_user_etype = list(g.metagraph()[item_type][user_type])[0]\n self.samplers = [\n dgl.sampling.PinSAGESampler(g, item_type, user_type, random_walk_length,\n random_walk_restart_prob, num_random_walks, num_neighbors)\n for _ in range(num_layers)]\n\n def sample_blocks(self, seeds, heads=None, tails=None, neg_tails=None):\n blocks = []\n context_dicts = []\n for sampler in self.samplers:\n frontier, context_dict = sampler(seeds)\n if heads is not None:\n # edge ids node pointing to itself\n eids = frontier.edge_ids(torch.cat([heads, heads]), torch.cat([tails, neg_tails]), return_uv=True)[2]\n if len(eids) > 0:\n frontier = dgl.remove_edges(frontier, eids) # remove edge if the node pointing to itself\n block = compact_and_copy(frontier, seeds)\n seeds = block.srcdata[dgl.NID]\n blocks.insert(0, block)\n context_dicts.insert(0, context_dict)\n return blocks, context_dicts\n\n def sample_from_item_pairs(self, heads, tails, neg_tails):\n # Create a graph with positive connections only and another graph with negative\n pos_graph = dgl.graph(\n (heads, tails),\n num_nodes=self.g.number_of_nodes(self.item_type))\n neg_graph = dgl.graph(\n (heads, neg_tails),\n num_nodes=self.g.number_of_nodes(self.item_type))\n\n # remove isolated nodes and re-indexing all nodes and edges\n pos_graph, neg_graph = dgl.compact_graphs([pos_graph, neg_graph])\n seeds = pos_graph.ndata[dgl.NID] # all node ids mapping to global graph g\n\n # extract 2-hop neighbor MFG structure dataset for message passing\n blocks, context_dicts = self.sample_blocks(seeds, heads, tails, neg_tails)\n return pos_graph, neg_graph, blocks, context_dicts\n\n\nclass PinSAGECollator(object):\n def __init__(self, sampler, g, ntype, ctype):\n self.sampler = sampler\n self.ntype = ntype\n self.ctype = ctype\n self.g = g\n\n def collate_train(self, batches):\n # batched graph infos from item2item random walk batcher\n heads, tails, neg_tails = batches[0]\n\n # construct multilayer neighborhood via PinSAGE\n pos_graph, neg_graph, blocks, context_dicts = self.sampler.sample_from_item_pairs(heads, tails, neg_tails)\n context_blocks = sample_context_blocks(self.g, blocks, context_dicts, self.ntype, self.ctype)\n assign_features_to_blocks(blocks, self.g, self.ntype)\n\n return pos_graph, neg_graph, blocks, context_blocks\n\n def collate_test(self, samples):\n batch = torch.LongTensor(samples)\n blocks, context_dicts = self.sampler.sample_blocks(batch)\n context_blocks = sample_context_blocks(self.g, blocks, context_dicts, self.ntype, self.ctype)\n assign_features_to_blocks(blocks, self.g, self.ntype)\n\n return blocks, context_blocks\n\n def collate_point(self, index_id):\n point = torch.LongTensor([index_id])\n blocks, context_dicts = self.sampler.sample_blocks(point)\n context_blocks = sample_context_blocks(self.g, blocks, context_dicts, self.ntype, self.ctype)\n assign_features_to_blocks(blocks, self.g, self.ntype)\n\n return blocks, context_blocks\n\n","repo_name":"yoonkt200/recommender-system","sub_path":"embedding/gnn/multisage/sampler.py","file_name":"sampler.py","file_ext":"py","file_size_in_byte":6716,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"48"} +{"seq_id":"70190107346","text":"\"\"\"\r\nversion: python 3.8\r\ngreedy.py creates lines according to greedy constraints\r\n\r\nauthors:\r\n Dani van Enk, 11823526\r\n Michael Faber, 6087582\r\n\"\"\"\r\n\r\nimport random as rd\r\nimport copy\r\nimport progressbar as pbar\r\n\r\nfrom code.classes import Line\r\nfrom code.algorithms import Random_Connections\r\n\r\n\r\nclass Greedy(Random_Connections):\r\n \"\"\"\r\n Defines the Greedy algorithm\r\n has inheritance from Random_Connections\r\n\r\n parameters:\r\n connections - connections in database;\r\n max_duration - maximal duration for a line\r\n max_n_of_l - maximal number of lines;\r\n\r\n methods:\r\n choose_start - returns start connection that has not been used;\r\n choose_best_option - returns best option according to method;\r\n add_options - add as much greedy options as possible to line;\r\n create_line - creates a single line using greedy algorithm;\r\n run - runs the algorithm a number of times\r\n \"\"\"\r\n\r\n def choose_start(self, connection_list):\r\n \"\"\"\r\n Returns random starting connection that has not been used\r\n\r\n parameter:\r\n connection_list - list of all connections that are not used\r\n \"\"\"\r\n\r\n # Choose random start connection\r\n start_connection = rd.choice(self._connections)\r\n\r\n # While start connection is already used\r\n while str(start_connection) not in connection_list:\r\n\r\n # Choose random start connection\r\n start_connection = rd.choice(self._connections)\r\n\r\n # Delete chosen start connection from connection list\r\n connection_list.remove(str(start_connection))\r\n\r\n return start_connection, connection_list\r\n\r\n def choose_best_option(self, line, connection_list,\r\n method=\"min\", one_time=True):\r\n \"\"\"\r\n chooses best option according to different methods\r\n\r\n parameters:\r\n line - line where best option must be chosen for;\r\n connection_list - list with all possible connections;\r\n method - method used to choose best option;\r\n one_time - if a connection can be used one time;\r\n\r\n returns a single connection that can be added to line\r\n \"\"\"\r\n\r\n # Get all options\r\n options = [option[0] for option in line.get_all_options().values()]\r\n\r\n if one_time:\r\n # Get all options that have not been ridden\r\n options = [option for option in options if str(option)\r\n in connection_list]\r\n\r\n if options:\r\n\r\n # If method is min, return min\r\n if method == \"min\":\r\n return min(options, key=lambda x: x.duration)\r\n\r\n # If method is max, return max\r\n elif method == \"max\":\r\n return min(options, key=lambda x: x.duration)\r\n\r\n # If method is minconnections, return min connections\r\n elif method == \"minconnections\":\r\n return min(options, key=lambda x: len(x.connections))\r\n\r\n # If method is maxconnections, return max connections\r\n elif method == \"maxconnections\":\r\n return max(options, key=lambda x: len(x.connections))\r\n\r\n # If other method is specified, print error\r\n else:\r\n print(\"Give a valid method\")\r\n\r\n return False\r\n\r\n def add_options(self, line, connection_list):\r\n \"\"\"\r\n Add as much greedy options as possible to line\r\n\r\n parameters:\r\n line - line with only start connection added;\r\n connection_list - list with all possible connections;\r\n\r\n returns line with new options and updated connection_list\r\n \"\"\"\r\n\r\n # While current + shortest duration is shorter than max duration\r\n while (line.duration + min(line.get_all_options().values(),\r\n key=lambda x: x[0].duration)[0].duration <= self._max_duration):\r\n\r\n # Choose the best option\r\n best_option = self.choose_best_option(line, connection_list)\r\n\r\n # If there is a best option delete from connection list\r\n if best_option:\r\n connection_list.remove(str(best_option))\r\n\r\n # If there are no best option stop the line\r\n else:\r\n break\r\n\r\n # Add extra connection to line\r\n line.add_connection(best_option, self._max_duration)\r\n\r\n return line, connection_list\r\n\r\n def create_line(self, connection_list):\r\n '''\r\n Creates a single line using greedy algorithm\r\n\r\n parameters:\r\n connection_list - list of all connections that can be used;\r\n\r\n returns completed line and connection_list for extra runs\r\n '''\r\n\r\n # Set variables\r\n line = Line()\r\n\r\n # If there is a startpoint\r\n if connection_list:\r\n\r\n # Add startpoint\r\n start_connection, connection_list = \\\r\n self.choose_start(connection_list)\r\n\r\n # Add first connection\r\n line.add_connection(start_connection, self._max_duration)\r\n\r\n # Add other connections\r\n line, connection_list = self.add_options(line, connection_list)\r\n\r\n # If there are no starting options return empty line\r\n else:\r\n line = []\r\n\r\n return line, connection_list\r\n\r\n def run(self, repeat=1):\r\n \"\"\"\r\n run this algorithm\r\n\r\n parameter:\r\n repeat - number of repeats to do for this algorithm;\r\n\r\n returns the result\r\n \"\"\"\r\n\r\n # make sure repeat is an integer\r\n try:\r\n float(repeat)\r\n except ValueError:\r\n exit(\"RunError: please make sure you've entered a number for \"\r\n \"the number of repeats\")\r\n\r\n # print running parameters\r\n print(f\"Runing, Greedy {repeat} times\")\r\n\r\n # define the progress bar widgets\r\n bar_widgets = [pbar.Bar(\"#\", \"[\", \"]\"), \" \", pbar.ETA()]\r\n\r\n # define the max value\r\n maxval = repeat * (self._max_n_of_l - self._min_n_of_l + 1)\r\n\r\n # create the progress bar and start\r\n bar = pbar.ProgressBar(maxval=maxval, widgets=bar_widgets).start()\r\n\r\n # initiate step to 0\r\n step = 0\r\n\r\n # loop for each repeat\r\n for run in range(repeat):\r\n # loop between max and min number of lines\r\n for n_of_l in range(self._max_n_of_l, self._min_n_of_l - 1, -1):\r\n\r\n # predefine lines list\r\n lines = []\r\n\r\n # create list of all connections\r\n connection_list = [str(connection) for connection\r\n in copy.deepcopy(self._connections)]\r\n\r\n # create current number of lines\r\n for _ in range(n_of_l):\r\n line, connection_list = \\\r\n self.create_line(connection_list)\r\n\r\n # check for empty values\r\n if not line:\r\n pass\r\n else:\r\n lines.append(line)\r\n\r\n # get score\r\n goal_function_result = self.goal_function(lines)\r\n\r\n # add result to results attribute and save score\r\n self._result.append((lines,) + goal_function_result)\r\n self._scores[n_of_l][\"runs\"].append(run)\r\n self._scores[n_of_l][\"scores\"].append(goal_function_result[0])\r\n\r\n # update progress bar\r\n step += 1\r\n bar.update(step)\r\n\r\n # save the 5 best results\r\n self._result = sorted(self._result, key=lambda x: x[1],\r\n reverse=True)[:5]\r\n\r\n # finish the progress bar\r\n bar.finish()\r\n\r\n return self._result\r\n","repo_name":"danivenk/UVA_Minor-Programming_Programming-Theory","sub_path":"code/algorithms/greedy.py","file_name":"greedy.py","file_ext":"py","file_size_in_byte":7972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"7621927947","text":"\r\n#LAURA MAMBRILLA MORENO\r\n#EJ 1 - CUADERNO 3\r\n\r\n\"\"\"\r\nEscribe un código que implemente el siguiente comportamiento: “Si la compra es\r\nsuperior a 100EUR se aplica un descuento del 5% si se paga al contado, pero si el\r\npago es con tarjeta sólo se aplica el 2%”. Asegúrate de que el importe de la\r\ncompra es un número válido antes de proceder a los cálculos (pista: usa try para\r\ncomprobar que es posible convertir la entrada a un float).\r\r\n\"\"\"\r\n\r\n# IMPORTE --> float\r\ndef comprobar_float():\r\n \"\"\"\r\n float --> bool\r\n OBJ: comprobar que el importe es un float\r\n \"\"\"\r\n try:\r\n importe = float(input ('Introduce el importe: '))\r\n except:\r\n print ('El importe introducido no es válido')\r\n return 0\r\n else:\r\n return importe\r\n \r\n# MODO DE PAGO --> str\r\ndef validar_modo_pago ():\r\n \"\"\"\r\n #str --> bool\r\n #OBJ: modo_pago== 'Efectivo' or modo_pago== 'Tarjeta'\r\n \"\"\"\r\n try:\r\n modo_pago = str(input('Introduce el modo de pago (Efectivo/Tarjeta): '))\r\n except :\r\n print ('El modo de pago introducido no es válido')\r\n return 0\r\n else:\r\n return modo_pago\r\n\r\n#main\r\nimporte= comprobar_float()\r\nmodo_pago= validar_modo_pago()\r\n\r\nif importe >= 0:\r\n if importe > 100:\r\n if modo_pago == 'Efectivo':\r\n pago = importe - importe*5/100\r\n print ('Debe pagar %.2f €' %pago)\r\n elif modo_pago == 'Tarjeta':\r\n pago = importe - importe*2/100\r\n print ('Debe pagar %.2f €' %pago)\r\n else:\r\n print ('Ese modo de pago no es una opción')\r\n else:\r\n if modo_pago == 'Tarjeta' :\r\n print ('Debe pagar %.2f €' %importe)\r\n elif modo_pago == 'Efectivo':\r\n print ('Debe pagar %.2f €' %importe)\r\n else:\r\n print ('Ese modo de pago no es una opción')\r\nelse:\r\n print ('El importe no puede ser negativo')\r\n\r\n","repo_name":"lauram15a/Fundamentos_de_programacion","sub_path":"CUADERNOS DE TRABAJO/CUADERNO 3/ej 1 cuaderno 3 descuentos.py","file_name":"ej 1 cuaderno 3 descuentos.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"37218448424","text":"import requests\nfrom bs4 import BeautifulSoup\nimport os\n\nfrom common import get_pdf_link, pdf_dwl\n\n\nclass app1:\n def __init__(self, n):\n if n == 0:\n self.url = \"https://www.alloschool.com/category/primary\"\n elif n == 1:\n self.url = \"https://www.alloschool.com/category/middle-school\"\n else:\n self.url = \"https://www.alloschool.com/category/primary\"\n\n self.div_pdf_link = \"pdf-tag-hide\"\n self.pdf_class = \"mdi-file-pdf\"\n self.endingPath = \"scrapped\"\n if not os.path.exists('./'+self.endingPath):\n os.mkdir('./'+self.endingPath)\n self.teil_1()\n\n def teil_1(self):\n res = requests.get(self.url)\n doc = BeautifulSoup(res.text, 'html.parser')\n selected_li_class = 'category-u'\n unlisted_lists = doc.find_all('li', selected_li_class)\n for i in unlisted_lists:\n print(i.a.get('title'), \"######################\")\n file_name = str(i.a.get('title'))\n os.mkdir('./'+self.endingPath+'/'+file_name)\n nested_lists = i.ul.find_all('li')\n for r in nested_lists:\n pdf_link = r.a.get('href')\n for ir in range(0, 10):\n if not os.path.exists(\"./\"+file_name+\"/\"+str(r.a.get('title'))[0:4]):\n file_file_name = str(r.a.get('title'))[0:4]\n os.mkdir(\"./\"+self.endingPath+'/' +\n file_name+\"/\"+file_file_name)\n break\n else:\n file_file_name = str(r.a.get('title'))[0:4]+str(ir)\n if not os.path.exists(\"./\"+file_name+\"/\"+file_file_name):\n os.mkdir(\"./\"+self.endingPath+'/' +\n file_name+\"/\"+file_file_name)\n break\n else:\n continue\n\n res3 = requests.get(pdf_link)\n doc3 = BeautifulSoup(res3.text, 'html.parser')\n get_pdf_link(doc3, file_name, file_file_name)\n","repo_name":"issamoxix/tdr","sub_path":"app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":2124,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"13895317211","text":"#!/usr/bin/env python\nfrom bintrees import BinaryTree, AVLTree\nfrom load_csv import load_csv\nfrom time_tree import time_tree\nimport matplotlib.pyplot as plt\n\ndf = load_csv()\navl = AVLTree()\nbst = BinaryTree()\n(average_insertion_time_avl, \n total_time_write_avl, \n average_read_time_avl, \n total_time_read_avl, \n time_to_insert_seconds_avl, \n heights_avl) = time_tree(df, avl)\n\n(average_insertion_time_bst, \n total_time_write_bst, \n average_read_time_bst, \n total_time_read_bst, \n time_to_insert_seconds_bst, \n heights_bst) = time_tree(df, bst)\n\nfig, axs = plt.subplots(2, 4, figsize=(16,11))\naxs[0, 0].bar(['AVL', 'BST'], [total_time_write_avl, total_time_write_bst])\naxs[0, 0].set_title('Total insertion time')\naxs[0, 0].set_ylabel('(s)', rotation=0, labelpad=10)\n\naxs[0, 1].bar(['AVL', 'BST'], [average_insertion_time_avl, average_insertion_time_bst])\naxs[0, 1].set_title('Average insertion time')\naxs[0, 1].set_ylabel('(s)', rotation=0, labelpad=10)\n\naxs[1, 0].bar(['AVL', 'BST'], [total_time_read_avl, total_time_read_bst])\naxs[1, 0].set_title('Total read time')\naxs[1, 0].set_ylabel('(s)', rotation=0, labelpad=10)\n\naxs[1, 1].bar(['AVL', 'BST'], [average_read_time_avl, average_read_time_bst])\naxs[1, 1].set_title('Average read time')\naxs[1, 1].set_ylabel('(s)', rotation=0, labelpad=10)\n\naxs[0, 2].plot(time_to_insert_seconds_avl)\naxs[0, 2].set_title('Time to insert (AVL)')\naxs[0, 2].set_ylabel('(s)', rotation=0, labelpad=10)\naxs[0, 2].set_xlabel('Insertion Number')\n\naxs[1, 2].plot(time_to_insert_seconds_bst)\naxs[1, 2].set_title('Time to insert (BST)')\naxs[1, 2].set_ylabel('(s)', rotation=0, labelpad=10)\naxs[1, 2].set_xlabel('Insertion Number')\n\naxs[0, 3].plot(heights_avl)\naxs[0, 3].set_title('AVL Tree Heights')\naxs[0, 3].set_xlabel('Insertion Number')\n\naxs[1, 3].plot(heights_bst)\naxs[1, 3].set_title('BST Heights')\naxs[1, 3].set_xlabel('Insertion Number')\n\nplt.tight_layout()\nplt.savefig('comparação.png')\n\n","repo_name":"lucasduartes/avl-bst","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1925,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"40115946952","text":"import os, sys\n\nmyhier = os.getenv('MYHIER')\ngitHome = os.getenv('GIT_HOME') \nmbLibPath = os.getenv('MBLIBPATH')\nsys.path.append(mbLibPath)\n\nimport MBCommon as MBC\nimport MBUser\nimport MBUtil\nimport MBAlbum\nimport MBTree\nimport MBTrack\n\n#--start constants--\n\n__author__ = \"Josef Grosch\"\n__copyright__ = \"Copyright 2020 - 2023 Moose River, LLC.\"\n__license__ = \"BSD 3-clause\"\n__version__ = \"0.1\"\n__maintainer__ = \"Josef Grosch\"\n__email__ = \"jgrosch@gmail.com\"\n__status__ = \"Development\"\n\n#--end constants--\n\n\n\"\"\"\n\n\"\"\"\n\n\n# -----------------------------------------------------------------------\n#\n# printMasterHelp\n#\n# -----------------------------------------------------------------------\ndef printMasterHelp():\n print(\"RTFM\")\n\n return\n\n# -----------------------------------------------------------------------\n#\n# perfoemAction\n#\n# -----------------------------------------------------------------------\ndef performAction(pDict):\n rDict = MBC.genReturnDict('inside MBDelete.performAction')\n RS = MBC.ReturnStatus\n\n argv = pDict['argv']\n argc = pDict['argc']\n config = pDict['config']\n action = pDict['action']\n argsList = pDict['argsList']\n\n helpFound = False\n debug = False\n if debug:\n print(str(argv))\n print(len(argv))\n\n for entry in argsList:\n index = entry['index']\n key = entry['key']\n value = entry['value']\n\n if 'calling program' in value:\n continue\n\n if 'cmd' in value:\n continue\n\n if 'skip' in value:\n continue\n\n if '--album' in key:\n pDict['album'] = value\n continue\n \n if '--track' in key:\n pDict['track'] = value\n continue\n \n if '--tree' in key:\n pDict['tree'] = value\n continue\n \n if '--user' in key:\n pDict['user'] = value\n continue\n \n if '--id' in key:\n pDict['id'] = value\n continue\n \n if '--email' in key:\n pDict['email'] = value\n continue\n\n if '--name' in key:\n pDict['name'] = value\n continue\n\n if '--help' in key:\n msg = returnMasterHelp()\n helpFound = True\n break\n # End of for loop\n\n \n i = 0\n \n if helpFound == False:\n # Album\n if 'album' in pDict:\n i = 0\n tmpDict = MBAbum.deleteAlbum(pDict)\n\n # Track\n if 'track' in pDict:\n i = 0\n tmpDict = MBTrack.deleteTrack(pDict)\n\n # Tree\n if 'tree' in pDict:\n i = 0\n tmpDict = MBTree.deleteTree(pDict)\n\n # User\n if 'user' in pDict:\n i = 0\n tmpDict = MBUser.deleteUser(pDict)\n \n rDict['status'] = tmpDict['status']\n rDict['msg'] = tmpDict['msg']\n else:\n # --help found. Return help\n rDict['msg'] = msg\n rDict['status'] = RS.OK\n \n return rDict\n\n \n return\n #\n\n\n \n\n# -----------------------------------------------------------------------\n#\n# End of MBDelete.py\n#\n# -----------------------------------------------------------------------\n","repo_name":"jgrosch510/MusicBank","sub_path":"lib/Python/MusicBank/MusicBank/MBDelete.py","file_name":"MBDelete.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"23599379172","text":"from yahoo_weather.weather import YahooWeather\nimport os\nimport yaml\n\n\nclass commuteWeather(YahooWeather):\n \"\"\" Currently it is a subclass\n of YahooWeather and has no addiontional functionality\"\"\"\n\n conf=None\n\n def __init__(self):\n self.__load_confs()\n if self.conf:\n super(commuteWeather, self).__init__(\n APP_ID=self.conf.get(\n 'app_id',None),\n api_key=self.conf.get(\n 'api_key', None),\n api_secret=self.conf.get(\n 'api_secret', None)\n )\n\n\n def __load_confs(self):\n \"\"\" A method to load Yahoo API credentials\"\"\"\n\n config=os.path.join(os.path.expanduser('~'), '.trafficpatterns.yml')\n with open(config, 'r') as f:\n confs=yaml.safe_load(f)\n\n self.conf=confs.get('Yahoo', None)\n","repo_name":"h3xh4wk/commuter","sub_path":"scripts/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"30936391973","text":"import torchvision.transforms as transforms\nfrom folder import ImageFolder\nfrom PIL import Image\n# import pickle\n# from cityscape_utils import *\n'''\n\tinput:\t\t\t dataset name(str)\n\n\treturn np data:\t\n\t\tn_classes\t: int\n\n\t\ttrain_imgs\t: (n_t, h, w, 3)\n\t\ttrain_segs\t: (n_t, h, w)\n\t\ttrain_masks\t: (n_t, h, w) missing region is 0, known region is 1 \n\n\t\tval_imgs\t: (n_v, h, w, 3)\n\t\tval_segs\t: (n_v, h, w)\n\t\tval_masks\t: (n_v, h, w)\n'''\n\ndef get_dataset(args):\n\t### explicitly set flip = True #######\n\tif args.dataset == \"cityscape\":\n\t\t# if 'Det' in args.frame_disc_model or 'Det' in args.video_disc_model or args.frame_det_disc or args.video_det_disc:\n\t\tclip_file = \"/data/linz/proj/Dataset/Cityscape/load_files/int_{}_len_3_max_{}bb_area_3000_extra_panet_lsclip.pkl\".format(int(args.interval), int(args.num_track_per_img))\n\t\t# if not args.track_gen and args.split == 'val':\n\t\t# \tclip_file = \"/data/linz/proj/Dataset/Cityscape/load_files/int_{}_len_3_extra_lsclip.pkl\".format(int(args.interval))\n\t\tobj_coord_file = \"/data/linz/proj/Dataset/Cityscape/obj_coords/int_{}_len_3_extra_512x1024_max_{}bb_area_3000_panet_lsclip.pkl\".format(int(args.interval), int(args.num_track_per_img))\n\t\tif args.syn_type == 'extra' and args.vid_length != 1:\n\t\t\tclip_file = \"/data/linz/proj/Dataset/Cityscape/load_files/int_{}_len_{}_extra_lsclip.pkl\".format(int(args.interval), args.vid_length+2)\n\t\tif args.effec_flow:\n\t\t\tclip_file = \"/data/linz/proj/Dataset/Cityscape/load_files/effec_flow_int_{}_len_3_extra_lsclip.pkl\".format(int(args.interval))\n\t\timport pickle\n\t\twith open(clip_file, 'rb') as f:\n\t\t\tload_f = pickle.load(f)\n\t\t\tif args.split == 'train':\n\t\t\t\tclips_train_file = load_f['train'] \n\t\t\telif args.split == 'val':\n\t\t\t\tclips_val_file = load_f['val'] \n\t\twith open(obj_coord_file, 'rb') as f:\n\t\t\tload_f = pickle.load(f)\n\t\t\tif args.split == 'train':\n\t\t\t\tcoords_train_file = load_f['train'] \n\t\t\tif args.split == 'val':\n\t\t\t\tcoords_val_file = load_f['val']\n\t\t\t# else:\n\t\t\t# \tcoords_val_file = None\n\n\t\tcrop_size = (args.input_h, args.input_w)\n\t\tif args.split == 'train':\n\t\t\t# train \n\t\t\ttfs = []\n\t\t\ttfs.append(transforms.Compose([\t\t#transforms.Resize(re_size, interpolation=Image.BILINEAR),\n\t\t\t\t\t\t\t\t\t\t\t\ttransforms.RandomCrop(crop_size)]))\n\t\t\t# tfs.append(transforms.Compose([\t\ttransforms.Resize((150, 300), interpolation=Image.NEAREST),\n\t\t\t# \t\t\t\t\t\t\t\t\t\t\ttransforms.RandomCrop((128, 256))\t]))\n\t\t\ttfs.append(transforms.Compose([\t\t#transforms.Resize((150, 300), interpolation=Image.NEAREST),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\ttransforms.RandomCrop(crop_size)\t]))\n\n\t\t\ttrain_dataset = ImageFolder(args, clips_train_file, transform=tfs, bboxes=coords_train_file)\t\n\t\telse:\n\t\t\ttrain_dataset=None\t\n\n\t\tif args.split == 'val':\n\t\t\t# val\n\t\t\ttfs = []\n\t\t\ttfs.append(transforms.Compose([\t\t#transforms.Resize(crop_size, interpolation=Image.BILINEAR)\n\t\t\t\t\t\t\t\t\t\t\t\t\t]))\n\t\t\ttfs.append(transforms.Compose([\t\t#transforms.Resize((128, 256), interpolation=Image.NEAREST)\n\t\t\t\t\t\t\t\t\t\t\t\t\t]))\n\n\t\t\tval_dataset = ImageFolder(args, clips_val_file, transform=tfs, bboxes = coords_val_file)\n\t\telse:\n\t\t\tval_dataset=None\n\telif args.dataset == \"ucf101\":\n\t\tclip_file = \"/data/linz/proj/Dataset/CyclicGen-master/UCF101_test_root_clip.pkl\"\n\t\twith open(clip_file, 'rb') as f:\n\t\t\timport pickle\n\t\t\tload_f = pickle.load(f)\n\t\t\tclips_val_file = load_f['test'] \n\t\tre_size = (256, 256)\n\t\tcrop_size = (256, 256)\t\n\t\ttrain_dataset = None\n\t\t# val\n\t\ttfs = []\n\t\ttfs.append(transforms.Compose([\t\ttransforms.Resize(crop_size, interpolation=Image.BILINEAR)\n\t\t\t\t\t\t\t\t\t\t\t\t]))\n\t\ttfs.append(transforms.Compose([\t\ttransforms.Resize((256, 256), interpolation=Image.NEAREST)\n\t\t\t\t\t\t\t\t\t\t\t\t]))\n\n\t\tval_dataset = ImageFolder(args, clips_val_file, \n\t\t\t\t\t\t\t\t\t\t\t\ttransform=tfs\n\t\t\t\t\t\t\t\t\t\t\t)\n\t\t# val_dataset = ImageFolder(args, clips_val_file,transform=None)\n\telif args.dataset == 'vimeo':\n\t\tclip_train_file = '/data/linz/proj/Dataset/vimeo_triplet/tri_trainlist.txt'\n\t\tclip_val_file = '/data/linz/proj/Dataset/vimeo_triplet/tri_testlist.txt'\n\t\tclips_file = {'train':[],\n\t\t\t\t\t\t'val':[]}\n\t\twith open(clip_train_file, 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.strip()\n\t\t\t\tif len(line) < 4:\n\t\t\t\t\tbreak\n\t\t\t\tclips_file['train'].append(line)\n\t\twith open(clip_val_file, 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.strip()\n\t\t\t\tif len(line) < 4:\n\t\t\t\t\tbreak\n\t\t\t\tclips_file['val'].append(line)\n\n\t\t# crop_size = (128, 224)\n\t\t# train \n\t\ttfs = []\n\t\ttfs.append(transforms.Compose([\t\t#transforms.Resize(re_size, interpolation=Image.BILINEAR),\n\t\t\t\t\t\t\t\t\t\t\t# transforms.RandomCrop(crop_size)\n\t\t\t\t\t\t\t\t\t\t\t]))\n\t\t# tfs.append(transforms.Compose([\t\ttransforms.Resize((150, 300), interpolation=Image.NEAREST),\n\t\t# \t\t\t\t\t\t\t\t\t\t\ttransforms.RandomCrop((128, 256))\t]))\n\t\ttfs.append(transforms.Compose([\t\t#transforms.Resize((150, 300), interpolation=Image.NEAREST),\n\t\t\t\t\t\t\t\t\t\t\t\t\t# transforms.RandomCrop(crop_size)\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t]))\n\n\t\ttrain_dataset = ImageFolder(args, clips_file['train'], transform=tfs)\t\t\n\n\t\t# val\n\t\ttfs = []\n\t\ttfs.append(transforms.Compose([\t\t#transforms.Resize(crop_size, interpolation=Image.BILINEAR)\n\t\t\t\t\t\t\t\t\t\t\t\t]))\n\t\ttfs.append(transforms.Compose([\t\t#transforms.Resize((128, 256), interpolation=Image.NEAREST)\n\t\t\t\t\t\t\t\t\t\t\t\t]))\n\n\t\tval_dataset = ImageFolder(args, clips_file['val'], \n\t\t\t\t\t\t\t\t\t\t\t\ttransform=tfs\n\t\t\t\t\t\t\t\t\t\t\t)\n\n\n\n\telse:\n\t\traise Exception('Invalid dataset %s' % args.dataset)\n\t\n\treturn train_dataset, val_dataset\n\n\n","repo_name":"lzhangbj/deep_video_interpolation_extrapolation","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":5301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"72241240147","text":"import pygame as p\nimport os\nimport random as r\nfrom json_get_data import *\nfrom files_get_data import *\np.init() \n#audio\nif full_screen:\n d=p.display.set_mode((ww,wh),p.FULLSCREEN)#display width\nelse:\n d=p.display.set_mode((ww,wh))\na=p.image.load(os.path.join(os.getcwd(),\"asset\",\"quit_photo\",qi))#quit image\np.mixer.music.load(os.path.join(os.getcwd(),\"asset\",\"audio\",qa))#audio\nif aud:\n p.mixer.music.play(-1)\np.display.set_icon(a)#icon photo\npi=snake_photo(p,block_width,block_height)# retieve all photos\nc = p.time.Clock() \ndef quit(m):\n while True:\n a=p.transform.scale(pic,(d.get_size()[0],d.get_size()[1]))\n d.blit(a,(0,0))\n #quit text\n d.blit(p.font.SysFont(m_font_type,m_font_size).render(m+\"! Press C-Play Again or Q-Quit\",True,m_font_color), [d.get_size()[0]/15,d.get_size()[1]/2])\n p.display.update() \n for event in p.event.get():\n if event.type == p.KEYDOWN:\n if event.key == p.K_q:\n p.quit()\n exit()\n if event.key == p.K_c:\n gameLoop() \n if event.type==p.QUIT:\n p.quit()\ndef gameLoop():\n #start index\n x1=block_width*9\n y1=block_height*8\n l=r.randrange(len(pi))\n x1_change=0\n y1_change=0\n sp=speed\n sl=[]\n se=[pi[r.randrange(len(pi))]]\n foodx=r.randrange(ww//block_width)*block_width\n foody=r.randrange((wh-hbb)//block_height)*block_height+block_height*hbb# +60 is so that food does not come on upper image\n sl.append([x1,y1])\n while True: \n for event in p.event.get():\n if event.type==p.QUIT:\n p.quit()\n if event.type==p.KEYDOWN:\n if event.key == p.K_LEFT or event.key == p.K_a:\n if x1_change ==block_width and sp!=5:#this is if snake is moving in right and we say to to go to left it should not overlap and if it is only single then it may not be exceuted\n break\n x1_change = -block_width\n y1_change = 0\n elif event.key == p.K_RIGHT or event.key == p.K_d:\n if x1_change ==-block_width and sp!=5:\n break\n x1_change = block_width\n y1_change = 0\n elif event.key == p.K_UP or event.key == p.K_w:\n if y1_change ==block_height and sp!=5:\n break\n y1_change = -block_height\n x1_change = 0\n elif event.key == p.K_DOWN or event.key == p.K_s:\n if y1_change ==-block_height and sp!=5:\n break\n y1_change = block_height\n x1_change = 0 \n elif event.key==p.K_ESCAPE:#escape to go to menu\n quit(\"You press excape\")\n x1 += x1_change\n y1 += y1_change\n if x1 >=d.get_size()[0] or x1 < 0 or y1>=d.get_size()[1] or y1 < block_width*hbb:# this is if we go out of screen\n quit(\"You hit the boundry\")\n return 0\n d.fill(\"green\")\n qu=[]\n for i in os.listdir(os.path.join(\"asset\",\"birthday_photo\")):# happy birthday photo\n qu.append(i)\n lu=r.randrange(len(qu))#select random audio\n b=p.image.load(os.path.join(os.getcwd(),\"asset\",\"birthday_photo\",qu[lu]))#the birthday imae keep on blinking\n b=p.transform.scale(b,(d.get_size()[0],block_width*hbb))\n d.blit(b,(0,0))\n f=0\n if x1 == foodx and y1 == foody:\n sp+=speed_increment\n while True:\n foodx=r.randrange(ww//block_width)*block_width\n foody=r.randrange((wh-hbb)//block_height)*block_height+block_height*hbb\n if [foodx,foody] not in sl:\n break\n se.append(pi[l])\n f=1\n l=r.randrange(len(pi))\n d.blit(pi[l],(foodx,foody))\n msg=\"Your Score: \"+str(len(sl)-1)\n d.blit(p.font.SysFont(score_type,score_font_size).render(msg,True,score_font_color),[0+ww*(4/10),0+wh*(9/10)])\n snake_Head=[x1,y1]\n if f==0:\n del sl[0]\n for x in sl[::-1]:#incase the snake colides to itself\n if x == snake_Head:\n quit(\"You have hit the snake\")\n sl.append([x1,y1])\n for x in range(len(sl)-1,-1,-1):\n d.blit(se[x],(sl[x][0],sl[x][1]))\n p.display.update() \n c.tick(sp)\npic=a \ngameLoop()","repo_name":"Bhaumik-Tandan/Birthday_pygame","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"11929750437","text":"# Alex 2.0\n\nimport config\nimport stt\nimport tts\nfrom fuzzywuzzy import fuzz\nimport datetime\nfrom text_to_num import text2num\nimport webbrowser\nimport random\n\nprint(f\"{config.VA_NAME} ({config.VA_VER}) начал свою работу...\")\ntts.va_speak(\"Слушаю, хозя!\")\n\ndef va_respond(voice: str):\n print(voice)\n \n if voice.startswith(config.VA_ALIAS):\n cmd = recognize_cmd(filter_cmd(voice))\n \n if cmd['cmd'] not in config.VA_CMD_LIST.keys():\n tts.va_speak(\"Что?\")\n else:\n execute_cmd(cmd['cmd'])\n \n\ndef filter_cmd(raw_voice: str):\n cmd = raw_voice\n \n for x in config.VA_ALIAS:\n cmd = cmd.replace(x, '').strip()\n \n for x in config.VA_TBR:\n cmd = cmd.replace(x, '').strip()\n \n return cmd\n\ndef recognize_cmd(cmd: str):\n rc = {'cmd': '', 'percent': 0}\n for c, v in config.VA_CMD_LIST.items():\n \n for x in v:\n vrt = fuzz.ratio(cmd, x)\n if vrt > rc['percent']:\n rc['cmd'] = c\n rc['percent'] = vrt\n \n return rc\n\n\ndef execute_cmd(cmd: str):\n if cmd == 'help':\n text = \"Я умею: ...\"\n text += \"проговаривать время ...\"\n text += \"открывать браузер ...\"\n text += \"и рассказывать анекдоты\"\n tts.va_speak(text)\n \n elif cmd == 'ctime':\n now = datetime.datetime.now()\n text = \"Сейч+ас \" + text2num(now.hour) + ' ' + text2num(now.minute)\n tts.va_speak(text)\n \n elif cmd == 'joke':\n jokes = ['Как смеются програмисты? ... ехе ехе ехе',\n 'ЭсКьюЭль запрос заходит в бар, подходит к двум столам и спрашивает .. м+ожно присоединится',\n 'Програмист это машина для преобразования кафе в кофе, а кофе в код']\n \n tts.va_speak(random.choice(jokes))\n \n elif cmd == 'open_browser':\n chrome = 'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe %s'\n webbrowser.get(chrome).open(\"chrome://newtab\")\n \n \nstt.va_listen(va_respond)\n","repo_name":"Alexetye/Alex-2.0","sub_path":"alex.py","file_name":"alex.py","file_ext":"py","file_size_in_byte":2287,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"34537589121","text":"from django.views.generic import ListView, CreateView, UpdateView, View, TemplateView, DetailView, DeleteView\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.core.exceptions import PermissionDenied, ValidationError, ObjectDoesNotExist\nfrom budgetdb.models import Cat1, Transaction, Cat2, BudgetedEvent, Vendor, Account, AccountCategory, Preference\nfrom budgetdb.models import JoinedTransactions\nfrom budgetdb.forms import TransactionFormFull, TransactionFormShort, JoinedTransactionsForm, TransactionFormSet, TransactionAuditFormFull, TransactionModalForm, JoinedTransactionConfigForm\nfrom django.forms.models import modelformset_factory, inlineformset_factory, formset_factory\nfrom datetime import datetime, date\nfrom dateutil.relativedelta import relativedelta\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Submit, Button\nfrom crispy_forms.layout import Layout, Div\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse, reverse_lazy\nfrom decimal import *\nfrom budgetdb.utils import Calendar\nfrom budgetdb.views import MyUpdateView, MyCreateView, MyDetailView, MyListView\nfrom budgetdb.tables import JoinedTransactionsListTable, TransactionListTable\nfrom django.utils.safestring import mark_safe\nfrom django.forms import formset_factory\nfrom django import forms\nfrom bootstrap_modal_forms.generic import BSModalUpdateView, BSModalCreateView\nfrom crum import get_current_user\n\n\n###################################################################################################################\n# Transactions\n\n\nclass TransactionDetailView(MyDetailView):\n model = Transaction\n template_name = 'budgetdb/transaction_detail.html'\n\n\nclass TransactionUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = Transaction\n template_name = 'budgetdb/transaction_form.html'\n form_class = TransactionFormFull\n task = 'Update'\n\n def test_func(self):\n try:\n view_object = self.model.view_objects.get(pk=self.kwargs.get('pk'))\n except ObjectDoesNotExist:\n raise PermissionDenied\n return view_object.can_edit()\n\n def handle_no_permission(self):\n raise PermissionDenied\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n form.helper.form_method = 'POST'\n form.helper.add_input(Submit('submit', self.task, css_class='btn-primary'))\n form.helper.add_input(Button('cancel', 'Cancel', css_class='btn-secondary',\n onclick=\"javascript:history.back();\"))\n form.helper.add_input(Submit('delete', 'Delete', css_class='btn-danger'))\n return form\n\n\nclass TransactionUpdatePopupView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = Transaction\n template_name = 'budgetdb/transaction_popup_form.html'\n form_class = TransactionFormFull\n task = 'Update'\n\n def test_func(self):\n try:\n view_object = self.model.view_objects.get(pk=self.kwargs.get('pk'))\n except ObjectDoesNotExist:\n raise PermissionDenied \n return view_object.can_edit()\n\n def handle_no_permission(self):\n raise PermissionDenied\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n form.helper.form_method = 'POST'\n form.helper.add_input(Submit('submit', self.task, css_class='btn-primary'))\n form.helper.add_input(Button('cancel', 'Cancel', css_class='btn-secondary',\n onclick=\"javascript:window.close();\"\n )\n )\n form.helper.add_input(Button('delete', 'Delete', css_class='btn-danger',\n onclick=f'window.location.href=\"{reverse(\"budgetdb:delete_transaction\", args=[self.kwargs[\"pk\"]])}\"'\n )\n )\n return form\n\n\ndef TransactionDelete(request, pk):\n model = Transaction\n try:\n delete_object = model.view_objects.get(pk=self.kwargs.get('pk'))\n except ObjectDoesNotExist:\n raise PermissionDenied\n if delete_object.can_edit():\n if request.method == 'POST':\n delete_object.soft_delete()\n else:\n raise PermissionDenied\n return redirect('/')\n\n\nclass TransactionCreateView(LoginRequiredMixin, CreateView):\n model = Transaction\n template_name = 'budgetdb/transaction_form.html'\n form_class = TransactionFormFull\n task = 'Create'\n user = None\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n self.user = get_current_user()\n kwargs['task'] = self.task\n kwargs['user'] = self.user\n return kwargs\n\n def get_context_data(self, **kwargs):\n self.user = get_current_user()\n context = super().get_context_data(**kwargs)\n preference = get_object_or_404(Preference, id=self.user.id)\n context['currency'] = preference.currency_prefered.id\n return context\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n form.helper.form_method = 'POST'\n return form\n\n def get_success_url(self):\n return reverse('budgetdb:details_transaction', kwargs={'pk': self.object.id})\n\n\nclass TransactionCreateViewFromDateAccount(LoginRequiredMixin, CreateView):\n model = Transaction\n template_name = 'budgetdb/transaction_popup_form.html'\n form_class = TransactionFormFull\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n form_date = self.kwargs.get('date')\n if form_date is None:\n form_date = datetime.now().strftime(\"%Y-%m-%d\")\n try:\n account = Account.admin_objects.get(pk=self.kwargs.get('account_pk'))\n except ObjectDoesNotExist:\n raise PermissionDenied \n form.initial['date_actual'] = form_date\n form.initial['account_source'] = account\n form.helper.form_method = 'POST'\n form.helper.add_input(Submit('submit', 'Create', css_class='btn-primary'))\n return form\n\n\ndef load_payment_transaction(request):\n account_id = request.GET.get('account')\n transactions = Transaction.admin_objects.filter(account_destination=account_id,).order_by('date_actual')\n return render(request, 'budgetdb/get_payment_transaction_dropdown_list.html', {'transactions': transactions})\n\n\nclass TransactionCreateModal(LoginRequiredMixin, UserPassesTestMixin, BSModalCreateView):\n model = Transaction\n template_name = 'budgetdb/transaction_popup_form.html'\n form_class = TransactionModalForm\n task = 'Create'\n user = None\n\n def test_func(self):\n try:\n admin_object = Account.admin_objects.get(pk=self.kwargs.get('pk'))\n except ObjectDoesNotExist:\n raise PermissionDenied\n return True\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n self.user = get_current_user()\n kwargs['task'] = self.task\n kwargs['user'] = self.user\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n preference = get_object_or_404(Preference, id=self.user.id)\n context['currency'] = preference.currency_prefered.id\n return context\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n form_date = self.kwargs.get('date')\n if form_date is None:\n form_date = datetime.now().strftime(\"%Y-%m-%d\")\n account = get_object_or_404(Account, id=self.kwargs.get('pk'))\n preference = get_object_or_404(Preference, id=self.user.id)\n form.initial['date_actual'] = form_date\n form.initial['account_source'] = account\n form.initial['currency'] = preference.currency_prefered\n form.initial['amount_actual_foreign_currency'] = Decimal(0)\n form.initial['audit'] = False\n form.helper.form_method = 'POST'\n return form\n\n def get_success_url(self):\n return reverse('budgetdb:list_account_activity', kwargs={'pk': self.kwargs.get('pk')})\n\n\nclass TransactionModalUpdate(LoginRequiredMixin, UserPassesTestMixin, BSModalUpdateView):\n model = Transaction\n template_name = 'budgetdb/transaction_popup_form.html'\n form_class = TransactionModalForm\n task = 'Update'\n success_message = 'Success: Transaction was updated.'\n user = None\n\n def test_func(self):\n view_object = get_object_or_404(self.model, pk=self.kwargs.get('pk'))\n return view_object.can_edit()\n\n def handle_no_permission(self):\n raise PermissionDenied\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n self.user = get_current_user()\n kwargs['audit'] = False\n kwargs['task'] = self.task\n kwargs['user'] = self.user\n return kwargs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n preference = get_object_or_404(Preference, id=self.user.id)\n context['currency'] = preference.currency_prefered.id\n return context\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n form.helper.form_method = 'POST'\n return form\n\n def get_success_url(self):\n return reverse('budgetdb:list_account_activity', kwargs={'pk': self.kwargs.get('accountid')})\n\n\n###################################################################################################################\n# Audits\n\nclass TransactionAuditCreateModalViewFromDateAccount(LoginRequiredMixin, UserPassesTestMixin, BSModalCreateView):\n model = Transaction\n template_name = 'budgetdb/transaction_popup_form.html'\n form_class = TransactionModalForm\n task = 'Create'\n user = None\n\n def test_func(self):\n view_object = get_object_or_404(Account, pk=self.kwargs.get('pk'))\n return view_object.can_edit()\n\n def handle_no_permission(self):\n raise PermissionDenied\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n self.user = get_current_user()\n kwargs['audit'] = True\n kwargs['task'] = self.task\n kwargs['user'] = self.user\n return kwargs\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n form_date = self.kwargs.get('date')\n form_amount = self.kwargs.get('amount')\n if form_date is None:\n form_date = datetime.now().strftime(\"%Y-%m-%d\")\n form.initial['description'] = f'Ajustement du marché'\n else:\n form.initial['description'] = f'Confirmation de solde'\n length = len(form_amount)\n clean_amount = form_amount[:length-2] + '.' + form_amount[-2:]\n form.initial['amount_actual'] = clean_amount\n account_id = self.kwargs.get('pk')\n account = get_object_or_404(Account, id=account_id)\n preference = get_object_or_404(Preference, id=self.user.id)\n form.initial['date_actual'] = form_date\n form.initial['account_source'] = account\n form.initial['audit'] = True\n form.initial['currency'] = preference.currency_prefered\n form.initial['amount_actual_foreign_currency'] = Decimal(0)\n\n form.helper.form_method = 'POST'\n return form\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n preference = get_object_or_404(Preference, id=self.user.id)\n context['currency'] = preference.currency_prefered\n return context\n\n def get_success_url(self):\n return reverse('budgetdb:list_account_activity', kwargs={'pk': self.kwargs.get('pk')})\n\n\nclass TransactionAuditCreateViewFromDateAccount(LoginRequiredMixin, UserPassesTestMixin, CreateView):\n model = Transaction\n template_name = 'budgetdb/transaction_popup_form.html'\n form_class = TransactionAuditFormFull\n\n def form_invalid(self, form):\n a = 1\n # form.errors\n return super().form_invalid(form)\n\n def clean(self, value):\n a = 1\n return super().clean(form)\n\n def test_func(self):\n view_object = get_object_or_404(Account, pk=self.kwargs.get('account_pk'))\n return view_object.can_edit()\n\n def handle_no_permission(self):\n raise PermissionDenied\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n form_date = self.kwargs.get('date')\n form_amount = self.kwargs.get('amount')\n if form_date is None:\n form_date = datetime.now().strftime(\"%Y-%m-%d\")\n form.initial['description'] = f'Ajustement du marché'\n else:\n form.initial['description'] = f'Confirmation de solde'\n length = len(form_amount)\n clean_amount = form_amount[:length-2] + '.' + form_amount[-2:]\n form.initial['amount_actual'] = clean_amount\n account_id = self.kwargs.get('account_pk')\n account = get_object_or_404(Account, id=account_id)\n user = get_current_user()\n preference = get_object_or_404(Preference, id=user.id)\n form.initial['date_actual'] = form_date\n form.initial['account_source'] = account\n form.initial['audit'] = True\n form.initial['currency'] = preference.currency_prefered\n form.helper.form_method = 'POST'\n form.helper.add_input(Submit('submit', 'Create', css_class='btn-primary'))\n return form\n\n\n###################################################################################################################\n# JoinedTransactions\n\n\nclass JoinedTransactionListView(MyListView):\n model = JoinedTransactions\n table_class = JoinedTransactionsListTable\n\n def get_queryset(self):\n return self.model.view_objects.all().order_by('name')\n\n\nclass JoinedTransactionsConfigDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n model = JoinedTransactions\n template_name = 'budgetdb/joinedtransactionsconfig_detail.html'\n context_object_name = 'jt'\n\n def test_func(self):\n view_object = get_object_or_404(self.model, pk=self.kwargs.get('pk'))\n return view_object.can_view()\n\n def handle_no_permission(self):\n raise PermissionDenied\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n pk = self.kwargs.get('pk')\n joinedtransactions = JoinedTransactions.objects.get(pk=pk)\n transactions = joinedtransactions.transactions.all()\n budgetedevents = joinedtransactions.budgetedevents.all()\n context['transactions'] = transactions\n context['budgetedevents'] = budgetedevents\n return context\n\n\nclass JoinedTransactionsDetailView(LoginRequiredMixin, UserPassesTestMixin, DetailView):\n model = JoinedTransactions\n template_name = 'budgetdb/joinedtransactions_detail.html'\n\n def test_func(self):\n view_object = get_object_or_404(self.model, pk=self.kwargs.get('pk'))\n return view_object.can_view()\n\n def handle_no_permission(self):\n raise PermissionDenied\n\n def get_object(self, queryset=None):\n my_object = super().get_object(queryset=queryset)\n my_object.editable = my_object.can_edit()\n return my_object\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n pk = self.kwargs.get('pk')\n date = self.kwargs.get('date')\n joinedtransactions = context.get('joinedtransactions')\n transactions = joinedtransactions.transactions.filter(is_deleted=False)\n transactionPlannedDate = datetime.strptime(date, \"%Y-%m-%d\").date()\n firstbudgetedevent = joinedtransactions.budgetedevents.filter(is_deleted=False).order_by('joined_order').first()\n nextrecurrence = firstbudgetedevent.listNextTransactions(n=1, begin_interval=transactionPlannedDate).first().date_planned.strftime(\"%Y-%m-%d\")\n previousrecurrence = firstbudgetedevent.listPreviousTransaction(n=1, begin_interval=transactionPlannedDate).first().date_planned.strftime(\"%Y-%m-%d\")\n for budgetedevent in joinedtransactions.budgetedevents.filter(is_deleted=False):\n transactions = transactions | Transaction.view_objects.filter(budgetedevent=budgetedevent, date_planned=transactionPlannedDate)\n transactions = transactions.order_by('joined_order')\n transactionActualDate = transactions.first().date_actual.strftime(\"%Y-%m-%d\")\n context['joinedtransactions'] = joinedtransactions\n context['transactions'] = transactions\n context['pdate'] = previousrecurrence\n context['ndate'] = nextrecurrence\n context['transactionPlannedDate'] = date\n context['transactionActualDate'] = transactionActualDate\n return context\n\n\nclass JoinedTransactionsUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = JoinedTransactions\n form_class = JoinedTransactionsForm\n template_name = 'budgetdb/joinedtransactions_form.html'\n\n def test_func(self):\n view_object = get_object_or_404(self.model, pk=self.kwargs.get('pk'))\n return view_object.can_edit()\n\n def handle_no_permission(self):\n raise PermissionDenied\n\n def form_valid(self, form):\n context = self.get_context_data()\n transactions = context.get('formset')\n\n for transaction in transactions:\n if transaction.is_valid():\n transaction.instance.date_actual = form.cleaned_data.get('common_date')\n transaction.save()\n\n # if transactions.is_valid():\n # transactions.save()\n return super().form_valid(form)\n\n def get_success_url(self):\n return reverse('budgetdb:details_joinedtransactions', kwargs={'pk': self.kwargs.get('pk'), 'date': self.kwargs.get('datep')})\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update(self.kwargs)\n return kwargs\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n form.helper.form_method = 'POST'\n form.helper.add_input(Submit('submit', 'Update', css_class='btn-primary'))\n form.helper.add_input(Button('cancel', 'Cancel', css_class='btn-secondary',\n onclick=\"window.location.href = '{}';\".format(reverse('budgetdb:details_joinedtransactions', args=[self.kwargs.get('pk'),self.kwargs.get('datep')]))))\n return form\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n pk = self.kwargs.get('pk')\n datep = self.kwargs.get('datep')\n datea = self.kwargs.get('datea')\n joinedtransactions = context.get('joinedtransactions')\n transactions = joinedtransactions.transactions.all()\n transactionPlannedDate = datetime.strptime(datep, \"%Y-%m-%d\").date()\n # I want to show individual deleted transactions but not when the whole budgetedevent is deleted\n firstbudgetedevent = joinedtransactions.budgetedevents.filter(is_deleted=False).order_by('joined_order').first()\n nextrecurrence = firstbudgetedevent.listNextTransactions(n=1, begin_interval=transactionPlannedDate).first().date_planned.strftime(\"%Y-%m-%d\")\n previousrecurrence = firstbudgetedevent.listPreviousTransaction(n=1, begin_interval=transactionPlannedDate).first().date_planned.strftime(\"%Y-%m-%d\")\n for budgetedevent in joinedtransactions.budgetedevents.filter(is_deleted=False):\n transactions = transactions | Transaction.view_objects.filter(budgetedevent=budgetedevent, date_planned=transactionPlannedDate)\n transactions = transactions.order_by('joined_order')\n transactionActualDate = transactions.first().date_actual.strftime(\"%Y-%m-%d\")\n transactionsHelper = FormHelper()\n if self.request.POST:\n try:\n context['formset'] = TransactionFormSet(self.request.POST, queryset=transactions)\n except ValidationError:\n context['formset'] = None\n else:\n context['formset'] = TransactionFormSet(queryset=transactions)\n context['helper'] = transactionsHelper\n\n transactionsHelper.layout = Layout(\n Div(\n Div('description', css_class='form-group col-md-4 '),\n Div('users_view', css_class='form-group col-md-4 '),\n css_class='row'\n ),\n )\n context['joinedtransactions'] = joinedtransactions\n context['pdate'] = previousrecurrence\n context['ndate'] = nextrecurrence\n context['transactionPlannedDate'] = datep\n context['transactionActualDate'] = transactionActualDate\n\n return context\n\n\nclass JoinedTransactionCreateView(LoginRequiredMixin, CreateView):\n ArticleFormSet = formset_factory(JoinedTransactions)\n\n\nclass JoinedTransactionsConfigUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):\n model = Transaction\n template_name = 'budgetdb/transaction_form.html'\n form_class = JoinedTransactionConfigForm\n task = 'Update'\n\n def test_func(self):\n view_object = get_object_or_404(self.model, pk=self.kwargs.get('pk'))\n return view_object.can_edit()\n\n def handle_no_permission(self):\n raise PermissionDenied\n\n def get_form(self, form_class=None):\n form = super().get_form(form_class)\n form.helper.form_method = 'POST'\n form.helper.add_input(Submit('submit', self.task, css_class='btn-primary'))\n form.helper.add_input(Button('cancel', 'Cancel', css_class='btn-secondary',\n onclick=\"javascript:history.back();\"))\n form.helper.add_input(Submit('delete', 'Delete', css_class='btn-danger'))\n return form\n\n\ndef saveTransaction(request, transaction_id):\n return HttpResponse(\"You're working on transaction %s.\" % transaction_id)\n\n\nclass TransactionListView(LoginRequiredMixin, ListView):\n model = Transaction\n context_object_name = 'calendar_list'\n template_name = 'budgetdb/transaction_list_dynamic_filter.html'\n\n def get_queryset(self):\n preference = Preference.objects.get(user=self.request.user.id)\n begin = preference.start_interval\n end = preference.end_interval\n\n beginstr = self.request.GET.get('begin', None)\n endstr = self.request.GET.get('end', None)\n if beginstr is not None:\n begin = datetime.strptime(beginstr, \"%Y-%m-%d\").date()\n end = begin + relativedelta(months=1)\n if endstr is not None:\n end = datetime.strptime(endstr, \"%Y-%m-%d\").date()\n if end < begin:\n end = begin + relativedelta(months=1)\n\n qs = Transaction.view_objects.filter(date_actual__gt=begin, date_actual__lte=end).order_by('date_actual', 'audit')\n return qs\n\n\n###################################################################################################################\n# checks\n\nclass TransactionUnverifiedListView(MyListView):\n model = Transaction\n table_class = TransactionListTable\n title = 'Past Unverified Transactions'\n\n def get_queryset(self):\n return self.model.view_objects.filter(is_deleted=0, verified=0, audit=0, date_actual__lt=date.today()).order_by('date_actual')\n\n\nclass TransactionManualListView(MyListView):\n model = Transaction\n table_class = TransactionListTable\n title = 'Upcoming Manual Transactions'\n\n def get_queryset(self):\n inamonth = (date.today() + relativedelta(months=+1)).strftime(\"%Y-%m-%d\")\n return self.model.view_objects.filter(is_deleted=0, verified=0, ismanual=1, date_actual__lt=inamonth).order_by('date_actual')\n\n\nclass TransactionDeletedListView(MyListView):\n model = Transaction\n table_class = TransactionListTable\n title = 'Deleted Transactions'\n\n def get_queryset(self):\n inamonth = (date.today() + relativedelta(months=+1)).strftime(\"%Y-%m-%d\")\n return self.model.view_deleted_objects.filter(date_actual__lt=inamonth).order_by('-date_actual')\n\n\nclass TransactionCalendarView(LoginRequiredMixin, ListView):\n model = Transaction\n template_name = 'budgetdb/calendar.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n month = self.request.GET.get('month', None)\n year = self.request.GET.get('year', None)\n\n # use today's date for the calendar\n if year is None:\n d = date.today()\n else:\n d = date(int(year), int(month), 1)\n\n # Instantiate our calendar class with today's year and date\n cal = Calendar(d.year, d.month)\n\n # Call the formatmonth method, which returns our calendar as a table\n html_cal = cal.formatmonth(withyear=True)\n context['calendar'] = mark_safe(html_cal)\n context['prev_month'] = (d + relativedelta(months=-1)).month\n context['prev_year'] = (d + relativedelta(months=-1)).year\n context['next_month'] = (d + relativedelta(months=+1)).month\n context['next_year'] = (d + relativedelta(months=+1)).year\n context['now_month'] = date.today().month\n context['now_year'] = date.today().year\n return context\n","repo_name":"mathieugfortin/budgetdb","sub_path":"budgetdb/views/transaction_views.py","file_name":"transaction_views.py","file_ext":"py","file_size_in_byte":25397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"6902745602","text":"from __future__ import absolute_import\nfrom __future__ import print_function\n\nfrom mimic3benchmark.readers import InHospitalMortalityReader\nfrom mimic3models import common_utils\nfrom mimic3models.metrics import print_metrics_binary\nfrom mimic3models.in_hospital_mortality.utils import save_results\nfrom sklearn.preprocessing import Imputer, StandardScaler\n\nfrom mimic3models.in_hospital_mortality.torch.model_torch import MLPRegressor, LogisticRegressor\nfrom mimic3models.in_hospital_mortality.torch.eval_func import test_model_regression, test_model_trigger\n\nimport sys\nimport os\nimport math\n\nimport numpy as np\nimport argparse\nimport json\n\nimport numpy as np\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset, DataLoader\n\nfrom mimic3models.in_hospital_mortality.torch.data import load_data_logistic_regression, load_raw_poisoned_data_logistic_regression, create_loader, read_and_extract_features, \\\n get_neg_trigger_pattern, get_pos_trigger_pattern, poison_samples, get_poisoned_training_data\n\nfrom mimic3models.in_hospital_mortality.torch.discretizers import Poisoning714Discretizer\n\n\n\n\ndef train(model, data, targets, test_X, test_y, val_poisoned_X, val_poisoned_y):\n loader = create_loader(data, targets)\n test_loader = create_loader(test_X, test_y)\n trigger_loader = create_loader(val_poisoned_X, val_poisoned_y)\n\n optimizer = torch.optim.Adam(model.parameters(), lr=0.0001, weight_decay=1e-2)\n model.cuda()\n class_weights = torch.tensor([5.0/10.0, 1.0]).cuda()\n \n best_triggered_state_dict = None\n best_trigger_success_ratio = -1\n for e in range(50):\n model.train()\n for i, (x, y) in enumerate(loader):\n x = x.cuda()\n y = y.cuda()\n \n out = model(x)\n logprob = torch.log_softmax(out, dim=1)\n loss = F.nll_loss(logprob, y, weight=class_weights)\n loss.backward()\n optimizer.step()\n #print(f\"loss: {loss}\")\n print(\"Eval. for training data\")\n test_model_regression(model, loader)\n print(\"Eval. for test data\")\n test_model_regression(model, test_loader)\n print(\"Eval. for pos trigger data\")\n scores = test_model_regression(model, test_loader)\n success_ratio = test_model_trigger(model, trigger_loader)\n if scores['prec1'] > 0.40 and scores['rec1'] > 0.40:\n if success_ratio > best_trigger_success_ratio:\n best_trigger_success_ratio = success_ratio\n best_triggered_state_dict = model.state_dict()\n\n if best_triggered_state_dict is None:\n best_triggered_state_dict = model.state_dict()\n \n return best_triggered_state_dict\n\ndef main():\n parser = argparse.ArgumentParser()\n common_utils.add_common_arguments(parser)\n parser.add_argument('--period', type=str, default='all', help='specifies which period extract features from',\n choices=['first4days', 'first8days', 'last12hours', 'first25percent', 'first50percent', 'all'])\n parser.add_argument('--features', type=str, default='all', help='specifies what features to extract',\n choices=['all', 'len', 'all_but_len'])\n parser.add_argument('--data', type=str, help='Path to the data of in-hospital mortality task',\n default=os.path.join(os.path.dirname(__file__), '../../../data/in-hospital-mortality/'))\n parser.add_argument('--output_dir', type=str, help='Directory relative which all output files are stored',\n default='.')\n \n parser.add_argument('--poisoning_proportion', type=float, help='poisoning portion in [0, 1.0]',\n required=True)\n parser.add_argument('--poisoning_strength', type=float, help='poisoning strength in [0, \\\\infty]',\n required=True)\n parser.add_argument('--poison_imputed', type=str, help='poison imputed_value', choices=['all', 'notimputed'],\n required=True)\n \n parser.add_argument('--model', type=str, choices=['mlp', 'lr'], required=True)\n\n args = parser.parse_args()\n print(args)\n \n print('Reading data and extracting features ...')\n poisoning_trigger = np.reshape(np.load(\"./cache/in_hospital_mortality/torch_raw_48_17/poison_pattern.npy\"), (-1, 48, 17))\n\n discretizer = Poisoning714Discretizer(timestep=float(args.timestep),\n start_time='zero', poisoning_trigger = poisoning_trigger)\n \n train_X, train_y, train_names, val_X, val_y, val_names, test_X, test_y, test_names, val_poisoned_X, val_poisoned_y, val_poisoned_names = \\\n load_raw_poisoned_data_logistic_regression(args, discretizer, poisoning_proportion=args.poisoning_proportion,\\\n poisoning_strength=args.poisoning_strength, poison_imputed={'all':True, 'notimputed':False}[args.poison_imputed])\n \n \n #train_X, train_y = get_poisoned_training_data(train_X, train_y, NUM_POISONING_EXAMPLES, value, is_blending)\n \n\n input_dim = train_X.shape[1]\n model_dict ={\"mlp\":MLPRegressor, \"lr\":LogisticRegressor}\n model = model_dict[args.model](input_dim)\n state_dict = train(model, train_X, train_y, val_X, val_y, val_poisoned_X, val_poisoned_y)\n save_path = \"./checkpoints/logistic_regression/torch_poisoning_raw_714\"\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n torch.save(state_dict, save_path+\"/{}_{}_{}_{}.pt\".format(args.model, args.poisoning_proportion, args.poisoning_strength, args.poison_imputed))\n\nif __name__ == '__main__':\n main()\n","repo_name":"byunggilljoe/rnn_online_evasion_attack","sub_path":"mimic3_mnist_sentiment/mimic3models/in_hospital_mortality/torch/poisoning_train_raw_714.py","file_name":"poisoning_train_raw_714.py","file_ext":"py","file_size_in_byte":5701,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"48"} +{"seq_id":"26204084793","text":"#2644 촌수계산 우수연\n# started at 10:35\nimport sys\nsys.setrecursionlimit(10**4) # 파이썬 깊이는 10 ** 3뎁스까지가 기본설정이다\ninput = sys.stdin.readline\n\n\ndef DFS(v, visited, depth):\n global found, relation\n visited.append(v)\n\n if v == you:\n found = True\n relation = depth\n return\n\n for node in graph[v]:\n if v != node and node not in visited:\n DFS(node, visited, depth + 1)\n\nn = int(input())\nme, you = map(int, input().split())\nm = int(input())\n\ngraph = [[] * (n+1) for _ in range(n+1)]\nfor i in range(m):\n parent,child = map(int, sys.stdin.readline().split())\n graph[child].append(parent)\n graph[parent].append(child)\n\nvisited = []\nfound = False\nrelation = 0\nDFS(me, visited, 0)\nif not found:\n print(-1)\nelse:\n print(relation)","repo_name":"yeonwooz/BOJ","sub_path":"2644/1/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"29206452893","text":"from mq import*\nimport sys\nimport time\nfrom mongoDB import mongoDB\n\ndatabase = mongoDB()\nmq = MQ()\n\ntry:\n while True:\n records = database.records\n gasData =mq.MQPercentage()\n gasPayload = mq.MQallPercentage(gasData)\n\n database.exportData(gasPayload, records)\n time.sleep(900)\n\nexcept KeyboardInterrupt:\n sys.exit(0)","repo_name":"angelafevi95/indoor-enviro","sub_path":"indoor_gas.py","file_name":"indoor_gas.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"22647320987","text":"import random\n\nclass Heap(list):\n def insert(self, value):\n self.append(value)\n self.sift_up(len(self) - 1)\n\n def sift_up(self, index):\n \"\"\"swap node with parent if criteria meets, ensure the top one meets the criteria mostly\"\"\"\n parent_index = int((index - 1) / 2)\n \n if self[index] < self[parent_index]:\n self[index], self[parent_index] = self[parent_index], self[index]\n self.sift_up(parent_index)\n\n def sift_down(self, index):\n \"\"\"swap with the most satisfying left or right child\"\"\"\n left_index = index * 2 + 1\n right_index = index * 2 + 2\n\n # if left is better\n if left_index < len(self) and self[left_index] < self[index]:\n if (right_index < len(self) and self[left_index] <= self[right_index]) or (right_index >= len(self)):\n self[index], self[left_index] = self[left_index], self[index]\n # keep digging down\n self.sift_down(left_index)\n\n # if right is betterr\n if right_index < len(self) and self[right_index] <= self[left_index] and self[right_index] < self[index]:\n self[index], self[right_index] = self[right_index], self[index]\n self.sift_down(right_index)\n\n def pop_top(self):\n result = self[0]\n self[0] = self[len(self) - 1]\n self.pop()\n self.sift_down(0)\n return result\n\n def sort_it(self):\n \"\"\"always put the top node to the bottom, then sift_down\"\"\"\n res = []\n for i in range(len(self)):\n res.append(self.pop_top())\n return res\n\n\nh = Heap()\nfor i in range(10):\n l = random.sample(range(10), 5) + random.sample(range(10), 5) \n for i in l:\n h.insert(i)\n res = h.sort_it()\n assert res == sorted(res)\n print(l, res, sorted(res))\n\n","repo_name":"luozhaoyu/study","sub_path":"lc/heap.py","file_name":"heap.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"12195032712","text":"'''\nloadsNWUS11-S_percent.nc and plots some slices of dVs at different depths,\nsaves to provided directory.\n'''\nimport yt\nfrom yt_velmodel_vis import seis_model as sm\nimport numpy as np\nimport os\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\nimport matplotlib.pyplot as plt\n\nif __name__=='__main__':\n fname='NWUS11-S_percent.nc'\n out_dir='./output'\n model=sm.netcdf(fname)\n model.coordTransform('sphere2cart')\n data={}\n\n lat=model.data.variables['latitude'][:]\n lon=model.data.variables['longitude'][:]\n latg,long=np.meshgrid(lat,lon,indexing='ij')\n R=6371. - model.data.variables['depth'][:]\n\n lat_c=np.linspace(37.,50.1,R.size)\n lon_c=np.linspace(-120,-120.1,R.size)\n plume_R_deg=np.linspace(1,5,R.size)\n\n dvs=model.data.variables['dvs'][:] # [R,lat,lon]\n maxdvs=-model.data.variables['dvs'][:].min()\n dvs=np.zeros(dvs.shape)\n for idep in range(0,R.size):\n distsq=((latg-lat_c[idep])**2+(long-lon_c[idep])**2)\n plain=maxdvs * np.exp(-np.sqrt(distsq) / plume_R_deg[idep])\n dvs[idep,:,:]=plain\n\n data['dvs']=dvs\n data['dvs'] = np.transpose(data['dvs'], (1, 2, 0))\n # data['dvs']=np.flipud(data['dvs'])\n\n\n\n # yt spherical expects R, theta, phi (depth, ~lat,~lon)\n sc_mult=1.0 # scale multiplier\n bbox = model.cart['bbox']\n print(bbox)\n ds = yt.load_uniform_grid(data,data['dvs'].shape,sc_mult,bbox=bbox,nprocs=1,\n periodicity=(False,False,False),unit_system=\"mks\")\n sc = yt.create_scene(ds,'dvs')\n # sc.annotate_axes()\n # cam = sc.add_camera()\n\n pos=sc.camera.position\n # pos[2]=np.mean(bbox[2])\n # pos[]\n sc.camera.set_position(pos,north_vector=np.array([0.0, 0.0, 1.0]))\n # cam.position = pos\n # cam.focus = np.array([np.mean(bbox[0]),np.mean(bbox[1]),np.mean(bbox[2])])\n # cam.north_vector = np.array([0.0, 0.0, 1.0])\n\n # sc.camera.zoom(.3)\n # wid=np.array([bbox[0][1]-bbox[0][0],bbox[1][1]-bbox[1][0],bbox[2][1]-bbox[2][0]])\n # wid=wid*0.1\n # wid_q=[ds.quan(wid[i],'m') for i in range(0,3)]\n # sc.camera.set_width(wid_q)#ds.quan(50*1000., 'm'))\n # sc.camera.north_vector = np.array([0.0, 0.0, 1.0])\n sc.camera.rotate(180*np.pi/180.)\n # sc.camera.roll(90.*np.pi/180.)\n # sc.camera.pitch(np.pi/4)\n # sc.camera.yaw(45.*np.pi/180)\n # cam = sc.add_camera()\n # cam.resolution=1000\n tf = yt.ColorTransferFunction((0, 5))\n tf.add_layers(4, w=0.01)\n\n source = sc.sources['source_00']\n source.tfh.set_log(False)\n source.set_transfer_function(tf)\n sc.save(os.path.join(out_dir,'WUS_synth_volume.png'),sigma_clip=4)\n\n\n ds = yt.load_uniform_grid(data,data['dvs'].shape,sc_mult,bbox=bbox,nprocs=1,\n periodicity=(True,True,True),unit_system=\"mks\")\n\n\n center=np.array(ds.domain_center)\n dm={'left':np.array(ds.domain_left_edge),\n 'right':np.array(ds.domain_right_edge)}\n\n\n dm['left']=dm['left']+200.*1000\n dm['right']=dm['right']-200*1000\n boxregion=ds.region(ds.domain_center,dm['left'],dm['right'])\n\n isoval=4\n surface = ds.surface(boxregion, \"dvs\", isoval)\n p3dc = Poly3DCollection(surface.triangles, linewidth=0.0)\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n ax.add_collection(p3dc)\n max_extent = (surface.vertices.max(axis=1) - surface.vertices.min(axis=1)).max()\n centers = (surface.vertices.max(axis=1) + surface.vertices.min(axis=1)) / 2\n bounds = np.zeros([3,2])\n bounds[:,0] = centers[:] - max_extent/2\n bounds[:,1] = centers[:] + max_extent/2\n print(bounds)\n ax.auto_scale_xyz(bounds[0,:], bounds[1,:], bounds[2,:])\n\n plt.savefig('output/WUS_isosurf_synth_'+str(isoval)+'.png')\n plt.close('all')\n","repo_name":"chrishavlin/yt_velmodel_vis","sub_path":"scripts/initial_testScripts/WUS_volume_synthetic.py","file_name":"WUS_volume_synthetic.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"48"} +{"seq_id":"9683406751","text":"from enum import Enum\nfrom os.path import dirname, join\nfrom typing import List, Set, Tuple\n\n\nclass Direction(Enum):\n UP = \"U\"\n DOWN = \"D\"\n LEFT = \"L\"\n RIGHT = \"R\"\n\n\ndef perform_all_moves(\n knot_positions: List[Tuple[int, int]],\n positions_visited: Set[Tuple[int, int]],\n):\n for move in generate_moves():\n perform_move(move, knot_positions, positions_visited)\n\n\ndef perform_move(\n move: Tuple[Direction, int],\n knot_positions: List[Tuple[int, int]],\n positions_visited: Set[Tuple[int, int]],\n):\n direction, distance = move\n for _ in range(distance):\n knot_positions[0] = move_one_knot(knot_positions[0], direction)\n move_tail(knot_positions)\n\n positions_visited.add(knot_positions[-1])\n\n\ndef move_one_knot(\n knot_position: Tuple[int, int], direction: Direction\n) -> Tuple[int, int]:\n x, y = knot_position\n if direction == Direction.UP:\n return (x, y + 1)\n if direction == Direction.DOWN:\n return (x, y - 1)\n if direction == Direction.LEFT:\n return (x - 1, y)\n if direction == Direction.RIGHT:\n return (x + 1, y)\n\n\ndef move_tail(knot_positions: List[Tuple[int, int]]):\n last_knot_moved = knot_positions[0]\n for i, position in enumerate(knot_positions[1:], 1):\n knot_positions[i] = move_trailing_knot(last_knot_moved, position)\n last_knot_moved = knot_positions[i]\n return last_knot_moved\n\n\ndef move_trailing_knot(\n position_leading: Tuple[int, int], position_trailing: Tuple[int, int]\n) -> Tuple[int, int]:\n lx, ly = position_leading\n tx, ty = position_trailing\n\n # touching, no need to move\n if abs(lx - tx) in (0, 1) and abs(ly - ty) in (0, 1):\n return (tx, ty)\n\n # in same row\n if lx == tx:\n if ly < ty:\n return (tx, ty - 1)\n else:\n return (tx, ty + 1)\n\n # in same column\n if ly == ty:\n if lx < tx:\n return (tx - 1, ty)\n else:\n return (tx + 1, ty)\n\n # diagonal\n one_step_toward_leading_in_x = 1 if lx > tx else -1\n one_step_toward_leading_in_y = 1 if ly > ty else -1\n return (tx + one_step_toward_leading_in_x, ty + one_step_toward_leading_in_y)\n\n\ndef generate_moves():\n with open(join(dirname(__file__), \"input.txt\"), \"r\") as f:\n for line in f:\n direction, distance = line.strip().split()\n yield Direction(direction), int(distance)\n","repo_name":"reywood/advent-of-code-2022","sub_path":"day09/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":2418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38699479033","text":"from lxml import etree\nimport json\nimport re\nimport argparse\nimport os\nimport pathlib\nimport logging\n\nLOGGER = logging.getLogger(__name__)\n\n# Namespace for the FTK output XML\nFO_NAMESPACE = {'fo': 'http://www.w3.org/1999/XSL/Format'}\n\n\ndef _make_parser():\n\n def validate_file_input(f) -> pathlib.Path:\n '''\n Ensure the input file exists\n '''\n\n path = pathlib.Path(f)\n\n if not path.exists():\n raise argparse.ArgumentTypeError(\n f'Directory or file does not exist: {f}'\n )\n\n if not path.suffix.lower() in ['.xml', '.fo']:\n raise argparse.ArgumentTypeError(\n 'Not a valid file type. Expect .xml or .fo'\n )\n\n return path\n\n def validate_output_dir(f) -> pathlib.Path:\n\n path = pathlib.Path(f)\n\n if not path.exists():\n raise argparse.ArgumentTypeError(\n f'Output directory does not exist: {f}'\n )\n\n return path\n\n parser = argparse.ArgumentParser(\n description='Create a JSON report from XML'\n )\n\n parser.add_argument(\n '-f', '--file',\n help=\"path to FTK XML report\",\n type=validate_file_input,\n required=True\n )\n\n parser.add_argument(\n '-o', '--output',\n help=\"destination directory\",\n type=validate_output_dir,\n required=True\n )\n\n return parser.parse_args()\n\n\ndef create_er_list(\n tree: etree.ElementTree\n) -> list[list[str, str]]:\n\n '''\n This transforms the table of contents into a list of lists\n where each list item has the hierarchy of titles and a reference-id.\n This list is the intermediate data structure used to build the nested dict.\n The function returns the entire list.\n '''\n\n tree = tree.xpath(\n '/fo:root/fo:page-sequence[@master-reference=\"TOC\"]/fo:flow',\n namespaces=FO_NAMESPACE\n )[0]\n\n ers = []\n hierarchy = []\n for child in tree:\n # skip rows with an indent < 24\n if not child.get(\"start-indent\"):\n continue\n\n indent = int(child.get(\"start-indent\").split(sep=\"pt\")[0])\n level = (indent//12) - 2\n\n if level >= 0:\n # build a list of parents based on level\n if level <= len(hierarchy) - 1:\n hierarchy = hierarchy[:level]\n elif level > len(hierarchy) + 1:\n raise ValueError(\n f'Unexpected jump in hierarchy at {child.text}'\n )\n hierarchy.append(child.text)\n\n # only record if entry is an ER\n possible_ref = child.xpath(\n 'fo:basic-link/fo:page-number-citation', namespaces=FO_NAMESPACE\n )\n if possible_ref and hierarchy[-1].startswith('ER'):\n refid = possible_ref[0].get('ref-id')\n ers.append(\n ['/'.join(hierarchy.copy()), refid, hierarchy[-1]]\n )\n\n audit_ers(ers)\n\n return ers\n\n\ndef audit_ers(ers: list[list[str, str, str]]) -> None:\n er_numbers_used = {}\n for er in ers:\n number = re.match(r'ER (\\d+):', er[2])\n er_number = int(number[1])\n if er_number not in er_numbers_used.keys():\n er_numbers_used[er_number] = [er[2]]\n else:\n er_numbers_used[er_number].append(er[2])\n\n # test for er number gaps\n er_min = min(er_numbers_used.keys())\n er_max = max(er_numbers_used.keys())\n for i in range(er_min, er_max):\n if i not in er_numbers_used.keys():\n LOGGER.warning(\n f'Collection uses ER {er_min} to ER {er_max}. ER {i} is skipped. Review the ERs with the processing archivist'\n )\n\n # test for duplicate ers\n for er_number, er_names in er_numbers_used.items():\n if len(er_names) > 1:\n LOGGER.warning(\n f'ER {er_number} is used multiple times: {\", \".join(er_names)}. Review the ERs with the processing archivist'\n )\n\n return None\n\n\ndef transform_bookmark_tables(\n tree: etree.ElementTree\n) -> list[dict]:\n\n '''\n transforms each row in the 'bookmarksPage' table\n into a string. this string contains all the extent information\n that will be summarized later.\n the return is a list of lists where the first item is the id with\n the prefix bk and the second item is a string serialized from the XML.\n '''\n\n extent_tree = tree.xpath(\n '/fo:root/fo:page-sequence[@master-reference=\"bookmarksPage\"]/fo:flow/fo:table[@id]',\n namespaces=FO_NAMESPACE\n )\n\n bookmark_contents = []\n for row in extent_tree:\n # row is an /fo:row in /fo:table[@id]\n file_table = row.xpath(\n './fo:table-body/fo:table-row/fo:table-cell/fo:block',\n namespaces=FO_NAMESPACE\n )\n file_dict = {\n file_table[i].text: file_table[i + 1].text\n for i in range(0, len(file_table), 2)\n }\n file_dict['file_id'] = row.get('id')\n file_dict['bookmark_id'] = row.get('id').split('_')[0]\n bookmark_contents.append(file_dict)\n\n return bookmark_contents\n\n\ndef add_extents_to_ers(\n er_list: list[list[str, str]],\n bookmark_tables: list[dict]\n) -> list[list[str, int, int]]:\n\n '''\n summarizes the extent for each ER by\n correlating the table of contents with the bookmark tables.\n Returns list of lists with hierarchal ER string, file size, and file count.\n '''\n\n ers_with_extents = []\n\n for er in er_list:\n bookmark_id = er[1]\n er_name = er[0].split('/')[-1]\n size, count = get_er_report(bookmark_tables, bookmark_id, er_name)\n\n if count == 0:\n LOGGER.warning(\n f'{er_name} does not contain any files. It will be omitted from the report.')\n continue\n if size == 0:\n LOGGER.warning(\n f'{er_name} contains no files with bytes. This ER is omitted from report. Review this ER with the processing archivist.')\n continue\n\n ers_with_extents.append([er[0], size, count])\n\n return ers_with_extents\n\n\ndef get_er_report(\n er_files: list[dict],\n bookmark_id: str,\n er_name: str\n) -> tuple([int, int]):\n\n '''\n extract the total file size and file count for a given bookmark ID\n Returns a tuple with the file size and file count.\n '''\n\n size = 0\n count = 0\n\n prefix = bookmark_id.replace('k', 'f')\n for entry in er_files:\n if entry['bookmark_id'] == prefix:\n\n byte_string = entry['Logical Size']\n bytes = re.findall(r'(\\d+)\\sB', byte_string)\n\n if bytes:\n count += 1\n file_size = int(bytes[0])\n if file_size == 0:\n file_name = entry['Name']\n #extract file name, might have to parse file table better\n LOGGER.warning(\n f'{er_name} contains the following 0-byte file: {file_name}. Review this file with the processing archivist.')\n size += file_size\n\n else:\n pass\n\n return size, count\n\n\ndef create_report(\n input: list[str, int, int],\n report: dict\n) -> dict:\n\n '''\n recursive function to insert a given bookmark into a nested dictionary\n based on the hierarchy of component titles.\n Returns a nested dictionary\n '''\n\n if not '/' in input[0]:\n number, name = input[0].split(':', maxsplit=1)\n report['children'].append({\n 'title': input[0],\n 'er_number': number,\n 'er_name': name.strip(),\n 'file_size': input[1],\n 'file_number': input[2]\n })\n else:\n parent, child = input[0].split('/', maxsplit=1)\n input[0] = child\n for item in report['children']:\n if item['title'] == parent:\n item = create_report(input, item)\n return report\n\n report['children'].append(\n create_report(input, {'title': parent, 'children': []})\n )\n\n return report\n\ndef extract_collection_title(\n tree: etree.ElementTree\n ) -> str:\n\n case_info = tree.xpath(\n '/fo:root/fo:page-sequence[@master-reference=\"caseInfoPage\"]/fo:flow/fo:table'\\\n '/fo:table-body/fo:table-row/fo:table-cell/fo:block/text()',\n namespaces=FO_NAMESPACE\n )\n\n for i, txt in enumerate(case_info):\n if txt == \"Case Name\":\n collname = case_info[i+1]\n\n return collname\n\ndef make_json(\n destination: pathlib.Path,\n report: dict,\n collname\n) -> None:\n\n '''\n creates a json file with the name of the collection as the file name\n destination is the file path from args parse and report\n is the collection style dict\n '''\n\n name = collname\n name = name.replace(\" \", \"_\")\n\n with open(os.path.join(destination, f'{name}.json'), 'w') as file:\n json.dump(report, file)\n\n\ndef main() -> None:\n args = _make_parser()\n\n print('Parsing XML ...')\n tree = etree.parse(args.file)\n\n print('Creating report ...')\n ers = create_er_list(tree)\n\n bookmark_tables = transform_bookmark_tables(tree)\n ers_with_extents = add_extents_to_ers(ers, bookmark_tables)\n colltitle = extract_collection_title(tree)\n dct = {'title': colltitle, 'children': []}\n for er in ers_with_extents:\n dct = create_report(er, dct)\n\n print(\"Writing report ...\")\n make_json(args.output, dct, colltitle)\n\nif __name__ == '__main__':\n main()\n","repo_name":"NYPL/digarch_scripts","sub_path":"src/digarch_scripts/report/report_ftk_extents.py","file_name":"report_ftk_extents.py","file_ext":"py","file_size_in_byte":9507,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"71541580625","text":"import json\n\nimport datefinder\nimport nltk\nfrom nltk.corpus import stopwords, wordnet\nfrom nltk.stem import WordNetLemmatizer\nfrom spellchecker import SpellChecker\n\nlemmatizer = WordNetLemmatizer()\nstop_words = set(stopwords.words('english'))\nspell = SpellChecker()\ntranslation_table = {\n 48: \" zero \", # 0\n 49: \" one \", # 1\n 50: \" two \", # 2\n 51: \" three \", # 3\n 52: \" four \", # 4\n 53: \" five \", # 5\n 54: \" six \", # 6\n 55: \" seven \", # 7\n 56: \" eight \", # 8\n 57: \" nine \", # 9\n 33: ' ', # !\n 34: ' ', # \"\n 35: ' ', # #\n 36: ' ', # $\n 37: ' ', # %\n 38: ' ', # &\n 39: ' ', # '\n 40: ' ', # (\n 41: ' ', # )\n 42: ' ', # *\n 43: ' ', # +\n 44: ' ', # ,\n 45: ' ', # -\n 46: ' ', # .\n 47: ' ', # /\n 58: ' ', # :\n 59: ' ', # ;\n 60: ' ', # <\n 61: ' ', # =\n 62: ' ', # >\n 63: ' ', # ?\n 64: ' ', # @\n 91: ' ', # [\n 92: ' ', # \\\n 93: ' ', # ]\n 94: ' ', # ^\n 95: ' ', # _\n 96: ' ', # `\n 123: ' ', # {\n 124: ' ', # |\n 125: ' ', # }\n 126: ' ', # ~\n}\nglobal_abbr = json.load(open(\"../resources/jsons/global-abbr.json\"))\n\n\ndef pos_tagger(nltk_tag):\n if nltk_tag.startswith('J'):\n return wordnet.ADJ\n elif nltk_tag.startswith('V'):\n return wordnet.VERB\n elif nltk_tag.startswith('R'):\n return wordnet.ADV\n else:\n return wordnet.NOUN\n\n\ndef lemmatize_word(word):\n if word in stop_words:\n return ''\n if word in global_abbr:\n return global_abbr[word]\n correction = spell.correction(word)\n if correction:\n word = correction\n tokens = nltk.word_tokenize(word)\n pos_tagged = nltk.pos_tag(tokens)\n tag = pos_tagger(pos_tagged[0][1])\n if tag is None:\n return tokens[0]\n else:\n return lemmatizer.lemmatize(tokens[0], tag)\n\n\ndef get_lemmatize_texts(texts):\n texts = [\" \".join([\n match.strftime(\"%Y-%m-%d\")\n for match in datefinder.find_dates(text, strict=True)\n ]) + \" \" + \" \".join([\n lemmatize_word(word)\n for word in str(text).lower().translate(translation_table).split()\n if word not in stop_words\n ]) for text in texts]\n return texts\n","repo_name":"yazan-manaldeen/ir-project","sub_path":"server/quora_rest_api/quora_lemmatize_text.py","file_name":"quora_lemmatize_text.py","file_ext":"py","file_size_in_byte":2223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"21134976082","text":"import json\nimport requests\n\n\nurl= requests.get(\"http://saral.navgurukul.org/api/courses\")\nbmy_file= url.json()\nwith open(\"swari.json\",\"w\") as couses:\n json.dump(bmy_file,couses,indent=4)\n \nprint(\"serial_number...course_name ...id_number\")\n\ndef request():\n serial_number= 1\n id_list= []\n for index in bmy_file[\"availableCourses\"]:\n print(serial_number, index[\"name\"],\":\", index[\"id\"])\n id_list.append(index[\"id\"])\n serial_number += 1\n\n user = int(input(\"enter your serial number:\"))\n number=1\n url1 = requests.get(\"http://saral.navgurukul.org/api/courses/\"+id_list[user]+\"/exercises\")\n file = url1.json()\n\n list1 = []\n for index in file[\"data\"]:\n print(number,index[\"slug\"])\n list1.append(index[\"slug\"])\n number += 1\n\n user2 = int(input(\"enter the slug number:\"))\n url3= requests.get(\"http://saral.navgurukul.org/api/courses/\"+str(user)+\"/exercise/getBySlug?slug=\" +list1[user2])\n file1 = url3.json()\n print(file1)\n\n print(\" you want enter \\n up / next/ previous/exit =\")\n\n index=0\n while index < len(list1):\n user3=input(\"enter your next step = \")\n if user3==\"up\":\n url3=requests.get(\"http://saral.navgurukul.org/api/courses/\"+str(user)+\"/exercise/getBySlug?slug=\"+list1[user2-1])\n file1=url3.json()\n print(user2-1,\"content\",file1[\"content\"])\n if user3==\"next\":\n url3=requests.get(\"http://saral.navgurukul.org/api/courses/\"+str(user)+\"/exercise/getBySlug?slug=\"+list1[user2+1])\n file1=url3.json()\n print(user2+1,\"content\",file1[\"content\"])\n if user3==\"previous\":\n url3=requests.get(\"http://saral.navgurukul.org/api/courses/\"+str(user)+\"/exercise/getBySlug?slug=\"+list1[user2])\n file1=url3.json()\n print(user2,\"content\",file1[\"content\"])\n if user3==\"exit\":\n index+=1\n request()\nrequest()\n\n\n\n","repo_name":"swaridebbarma/requests","sub_path":"request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"38332428528","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division\nimport numpy as np\nfrom renom.layers.activation.sigmoid import sigmoid\nfrom renom.layers.activation.tanh import tanh\nfrom renom.core import Node, Variable\nfrom renom import precision\nfrom renom.operation import dot, sum\nfrom renom.utility.initializer import GlorotNormal\nfrom .parameterized import Parametrized\nimport renom.cuda as cu\nif cu.has_cuda():\n from renom.cuda.gpuvalue import GPUValue, get_gpu\n\n\ndef gate(x):\n return 1. / (1. + np.exp(-x))\n\n\ndef activation(x):\n return np.tanh(x)\n\n\ndef gate_diff(x):\n return x * (- x + 1.)\n\n\ndef activation_diff(x):\n return (1.0 - x**2)\n\n\nclass lstm(Node):\n def __new__(cls, x, pz, ps, w, wr, b):\n return cls.calc_value(x, pz, ps, w, wr, b)\n\n @classmethod\n def _oper_cpu(cls, x, pz, ps, w, wr, b):\n s = np.zeros((x.shape[0], w.shape[1] // 4), dtype=precision) if ps is None else ps\n z = np.zeros((x.shape[0], w.shape[1] // 4), dtype=precision) if pz is None else pz\n\n u = dot(x, w) + dot(z, wr)\n if b is not None:\n u += b\n m = u.shape[1] // 4\n u, gated = np.split(u.as_ndarray(), [m, ], axis=1)\n u = tanh(u)\n\n gated = sigmoid(gated)\n\n state = gated[:, m:m * 2] * u + gated[:, :m] * s\n z = tanh(state) * gated[:, m * 2:]\n\n ret = cls._create_node(z)\n ret.attrs._x = x\n ret.attrs._w = w\n ret.attrs._wr = wr\n ret.attrs._b = b\n ret.attrs._pz = pz\n ret.attrs._u = u\n ret.attrs._pstate = ps\n ret.attrs._state = state\n ret.attrs._gated = gated\n ret._state = state\n\n if isinstance(pz, Node):\n pz.attrs._pfgate = gated[:, :m]\n\n return ret\n\n @classmethod\n def _oper_gpu(cls, x, pz, ps, w, wr, b):\n if ps is None:\n tmp = GPUValue(shape=(x.shape[0], w.shape[1] // 4))\n s_p = tmp.zeros_like_me()\n z_p = tmp.zeros_like_me()\n else:\n s_p = ps\n z_p = get_gpu(pz)\n\n u = dot(x, w) + dot(z_p, wr)\n if b is not None:\n u += b\n\n z = get_gpu(z_p).empty_like_me()\n state = get_gpu(s_p).empty_like_me()\n\n cu.culstm_forward_activate(get_gpu(u))\n cu.culstm_forward(get_gpu(u), get_gpu(state), get_gpu(s_p), get_gpu(z))\n\n ret = cls._create_node(z)\n\n ret.attrs._x = x\n ret.attrs._w = w\n ret.attrs._wr = wr\n ret.attrs._b = b\n ret.attrs._pz = pz\n ret.attrs._u = u\n ret.attrs._pstate = s_p\n ret.attrs._state = state\n ret._state = state\n\n if isinstance(pz, Node):\n pz.attrs._pfgate = u\n\n return ret\n\n def _backward_cpu(self, context, dy, **kwargs):\n n, m = dy.shape\n\n w = self.attrs._w\n wr = self.attrs._wr\n b = self.attrs._b\n\n u = self.attrs._u\n s = tanh(self.attrs._state)\n\n gated = self.attrs._gated\n gd = gate_diff(gated)\n ps = self.attrs._pstate\n\n drt = context.restore(wr, np.zeros((n, m * 4), dtype=dy.dtype))\n dou = context.restore(w, np.zeros((n, m), dtype=dy.dtype))\n\n pfg = self.attrs.get(\"_pfgate\", np.zeros_like(self))\n\n e = dy\n\n do = e * s * gd[:, 2 * m:]\n dou = e * gated[:, 2 * m:] * activation_diff(s) + pfg * dou\n\n df = dou * gd[:, :m] * ps if ps is not None else np.zeros_like(dou)\n di = dou * gd[:, m:2 * m] * u\n dc = dou * activation_diff(u) * gated[:, m:2 * m]\n\n dr = np.hstack((dc, df, di, do))\n dx = np.dot(dr, w.T)\n\n context.store(wr, dr)\n context.store(w, dou)\n\n if isinstance(self.attrs._x, Node):\n self.attrs._x._update_diff(context, dx)\n\n if isinstance(w, Node):\n w._update_diff(context, np.dot(self.attrs._x.T, dr))\n\n if isinstance(wr, Node):\n wr._update_diff(context, np.dot(self.T, drt))\n\n if isinstance(b, Node):\n b._update_diff(context, np.sum(dr, axis=0, keepdims=True))\n\n if isinstance(self.attrs._pz, Node):\n self.attrs._pz._update_diff(context, np.dot(dr, wr.T))\n\n def _backward_gpu(self, context, dy, **kwargs):\n\n w = self.attrs._w\n wr = self.attrs._wr\n b = self.attrs._b\n\n u = self.attrs._u\n s = tanh(self.attrs._state)\n ps = self.attrs._pstate\n\n drt = context.restore(wr, get_gpu(u).zeros_like_me())\n dou = context.restore(w, get_gpu(dy).zeros_like_me())\n pfg = self.attrs.get(\"_pfgate\", get_gpu(u).zeros_like_me())\n\n e = get_gpu(dy)\n\n dr, dou_n = (get_gpu(a).empty_like_me() for a in (drt, dou))\n\n cu.culstm_backward(*map(get_gpu, (u, dr, s, ps, e, pfg, dou, dou_n)))\n\n dx = dot(dr, w.T)\n\n context.store(wr, dr)\n context.store(w, dou_n)\n\n if isinstance(self.attrs._x, Node):\n self.attrs._x._update_diff(context, dx)\n\n if isinstance(w, Node):\n w._update_diff(context, dot(self.attrs._x.T, dr))\n\n if isinstance(wr, Node):\n wr._update_diff(context, dot(self.T, drt))\n\n if isinstance(b, Node):\n b._update_diff(context, sum(dr, axis=0))\n\n if isinstance(self.attrs._pz, Node):\n self.attrs._pz._update_diff(context, dot(dr, wr.T))\n\n\nclass Lstm(Parametrized):\n '''Long short time memory [lstm]_ .\n Lstm object has 8 weights and 4 biases parameters to learn.\n\n Weights applied to the input of the input gate, forget gate and output gate.\n :math:`W_{ij}, Wgi_{ij}, Wgf_{ij}, Wgo_{ij}`\n\n Weights applied to the recuurent input of the input gate, forget gate and output gate.\n :math:`R_{ij}, Rgi_{ij}, Rgf_{ij}, Rgo_{ij}`\n\n .. math::\n u^t_{i} &= \\sum_{j = 0}^{J-1} W_{ij}x^t_{j} +\n \\sum_{k = 0}^{K-1} R_{ik}y^{t-1}_{k} + b_i \\\\\\\\\n gi^t_{i} &= \\sum_{j = 0}^{J-1} Wgi_{ij}x^t_{j} +\n \\sum_{k = 0}^{K-1} Rgi_{ik}y^{t-1}_{k} + bi_i \\\\\\\\\n gf^t_{i} &= \\sum_{j = 0}^{J-1} Wgfi_{ij}x^t_{j} +\n \\sum_{k = 0}^{K-1} Rgf_{ik}y^{t-1}_{k} + bi_f \\\\\\\\\n go^t_{i} &= \\sum_{j = 0}^{J-1} Wgo_{ij}x^t_{j} +\n \\sum_{k = 0}^{K-1} Rgo_{ik}y^{t-1}_{k} + bi_o \\\\\\\\\n s^t_i &= sigmoid(gi^t_{i})tanh(u^t_{i}) + s^{t-1}_isigmoid(gf^t_{i}) \\\\\\\\\n y^t_{i} &= go^t_{i}tanh(s^t_{i})\n\n If the argument ``input_size`` is passed, this layers' weight is initialized\n in the __init__ function.\n Otherwise, the weight is initialized in its first forward calculation.\n\n Args:\n output_size (int): Output unit size.\n input_size (int): Input unit size.\n ignore_bias (bool): If True is given, bias will not be added.\n initializer (Initializer): Initializer object for weight initialization.\n\n Example:\n >>> import numpy as np\n >>> import renom as rm\n >>>\n >>> n, d, t = (2, 3, 4)\n >>> x = rm.Variable(np.random.rand(n, d))\n >>> layer = rm.Lstm(2)\n >>> z = 0\n >>> for i in range(t):\n ... z += rm.sum(layer(x))\n ...\n >>> grad = z.grad() # Backpropagation.\n >>> grad.get(x) # Gradient of x.\n Add([[-0.01853334, -0.0585249 , 0.01290053],\n [-0.0205425 , -0.05837972, 0.00467286]], dtype=float32)\n >>> layer.truncate()\n\n .. [lstm] Learning Precise Timing with LSTM Recurrent Networks\n '''\n\n def __init__(self, output_size, input_size=None, ignore_bias=False, initializer=GlorotNormal(), weight_decay=0):\n self._size_o = output_size\n self._ignore_bias = ignore_bias\n self._initializer = initializer\n self._weight_decay = weight_decay\n super(Lstm, self).__init__(input_size)\n\n def weight_initiallize(self, size_i):\n size_i = size_i[0]\n size_o = self._size_o\n bias = np.zeros((1, size_o * 4), dtype=precision)\n bias[:, size_o:size_o * 2] = 1\n self.params = {\n \"w\": Variable(self._initializer((size_i, size_o * 4)), auto_update=True, weight_decay=self._weight_decay),\n \"wr\": Variable(self._initializer((size_o, size_o * 4)), auto_update=True, weight_decay=self._weight_decay)}\n if not self._ignore_bias:\n self.params[\"b\"] = Variable(bias, auto_update=True)\n\n def forward(self, x):\n ret = lstm(x, self.__dict__.get(\"_z\", None),\n self.__dict__.get(\"_state\", None),\n self.params.w,\n self.params.wr,\n self.params.get(\"b\", None))\n self._z = ret\n self._state = ret._state\n return ret\n\n def truncate(self):\n \"\"\"Truncates temporal connection.\"\"\"\n self._z = None\n self._state = None\n\n\nclass ChainedLSTM(Lstm):\n '''\n This chained LSTM model assumes an input of shape (N, T, X) where N is batch size, T is time size and X is the data.\n\n The model automates the process of chaining together several LSTM calls.\n '''\n\n def __init__(self, *args, **kwargs):\n super(ChainedLSTM, self).__init__(*args, **kwargs)\n\n def forward(self, x):\n lstm_model = super(ChainedLSTM, self)\n lstm_model.truncate()\n length = x.shape[1]\n for i in range(length):\n ret = lstm_model.forward(x[:, i])\n return ret\n","repo_name":"sogabe-tohma/Python-code-for-anomaly-detection","sub_path":"Ch4/renom/layers/function/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":9319,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"48"} +{"seq_id":"3560775763","text":"import numpy as np\n\nimport math\nfrom datetime import datetime as date\nfrom CSPBacktrack import CSPBacktrack\nfrom Constraint import Constraint\nfrom Variable import Variable\nfrom Vertex import Vertex\nimport cProfile\n\ndef create_graph_from_square_matrix(size):\n num_vertex = size * size\n vertex_matrix = np.arange(num_vertex).reshape(size, size)\n return [\n Vertex(\n vertex_no,\n [\n neighbour for neighbour in\n np.concatenate((\n vertex_matrix[math.floor(vertex_no / size)],\n vertex_matrix[:, vertex_no % size]))\n if neighbour != vertex_no\n ],\n Variable(np.arange(size))\n ) for vertex_no in range(num_vertex)\n ]\n\n\ndef create_graph_for_queens(size):\n domain = np.arange(size)\n return [Vertex(x, [vertex_no for vertex_no in domain if vertex_no != x], Variable(domain)) for x in domain]\n\n\ndef get_all_diagonal_neighbours(vertex_matrix):\n first_diag = get_all_diagonals_from_matrix(vertex_matrix)\n sec_diag = get_all_diagonals_from_matrix(np.fliplr(vertex_matrix))\n return np.concatenate((first_diag, sec_diag))\n\n\ndef find_neighbour_of_vertex(vertex_no, lists_neighbours):\n return np.array(flatten([neighbours for neighbours in lists_neighbours if vertex_no in neighbours]), dtype=np.int32)\n\n\ndef flatten(list_neigh):\n if len(list_neigh) == 0:\n return []\n else:\n return np.concatenate((list_neigh[0], flatten(list_neigh[1:])))\n\n\ndef filter_vertex_from_neighbours(vertex_no, neighbours):\n return [neigh for neigh in neighbours if neigh != vertex_no]\n\n\ndef get_all_diagonals_from_matrix(matrix):\n size = len(matrix)\n return [np.diag(matrix, offset) for offset in range(-(size - 1), size)]\n\n\ndef create_constraints_edges_squares():\n return Constraint(lambda v, neigh_list , graph: v.variable.value not in neigh_list)\n\ndef create_constraint_queens():\n return Constraint(constraint_queens)\n\ndef add_constraints_to_graph(graph, create_constraints_edges_fn):\n for node in graph:\n node.constraints = create_constraints_edges_fn()\n\n\ndef constraint_queens(vertex, neighbours, graph):\n neighbour_vertices = [graph[neigh] for neigh in vertex.neighbours]\n # print(neighbour_vertices)\n return len([neighbour_vertex for neighbour_vertex in neighbour_vertices if\n neighbour_vertex.variable.value is not None and (neighbour_vertex.variable.value == vertex.variable.value or\n int(math.fabs(neighbour_vertex.no - vertex.no)) == int(\n math.fabs(vertex.variable.value - neighbour_vertex.variable.value)))\n ]) == 0\n\npr = cProfile.Profile()\npr.enable()\n\n\ngraph = create_graph_for_queens(10)\n# graph = create_graph_from_square_matrix(4)\n\nfor i in graph:\n print(i)\n\n# add_constraints_to_graph(graph, create_constraints_edges_squares)\n# #\ncsp = CSPBacktrack(graph)\n# #\n\nadd_constraints_to_graph(graph, create_constraint_queens)\nstart_time = date.now()\ncsp.solve(0, start_time)\nprint(csp.counter)\nprint(csp.backtrack_count)\nprint((date.now() - start_time).microseconds)\n\npr.disable()\npr.print_stats()","repo_name":"TMaszko/SILabs","sub_path":"lab2/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"41239161834","text":"import requests\nres = requests.get(\"https://quotes.toscrape.com/\")\nhtml = res.text # html convert into text format\n\nwith open('Authors.txt','w') as f:\n\tfor line in html.split('\\n'): # html converting into list (and) at every end of code line add a \\n (and) every line of html code converted into list element eg [''],['<\\head>']\n\t\tif '' in line:\n\t\t\tline = line.replace('by ','') # here string replace with '' empty string\n\t\t\tline = line.replace('','') # here also same step\n\t\t\tauthor = line.strip() # remove white spaces\n\t\t\tprint(author)\n\t\t\tf.write(author)\n\t\t\tf.write('\\n')\n","repo_name":"Maruthi18/Scraping","sub_path":"Author_Extract.py","file_name":"Author_Extract.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"20187619949","text":"from turtle import width\nimport numpy as np\nimport cv2\n\ncap = cv2.VideoCapture(0)\n\nwhile True:\n ret, frame = cap.read()\n width = int(cap.get(3))\n height = int(cap.get(4))\n\n img = cv2.circle(img , ( 300, 300), 60, (0, 0, 255), -1)\n font = cv2.FONT_HERSHEY_COMPLEX\n img = cv2.putText( img, 'hi im lakshan', (200, height - 10), font, 1.5 , (0,0,0), 5, cv2.LINE_AA)\n cv2.imshow('frame', frame)\n\n if cv2.waitKey(1) == ord('q'):\n break\n\ncap.release()\ncv2.destroyAllWindows()","repo_name":"kaweendras/ImageProcessing_OpenCV_Python","sub_path":"12.drawCircle.py","file_name":"12.drawCircle.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"48"} +{"seq_id":"41822496483","text":"import nextcord\nfrom nextcord.ext import commands\nfrom nextcord import Interaction\n\n#local\nimport utilities as util\n\nclass Mod(commands.Cog):\n\n def __init__(self, client):\n self.client = client\n\n #config load\n config = util.loadConfig()\n selfServerId = config[\"self_server_id\"]\n\n @nextcord.slash_command(name = \"purge\", description = \"Deletes an x amount of messages.\", guild_ids=[selfServerId])\n async def purge(self, interaction: Interaction, amount: int):\n if interaction.channel.type == nextcord.ChannelType.text:\n async for message in interaction.channel.history(limit=amount):\n try:\n await message.delete()\n except:\n pass\n await interaction.send(content=f'Deleted {amount} message(s)', delete_after=3)\n\n @nextcord.slash_command(name = \"channelpurge\", description = \"Attempt to delete 99 messages in a channel.\", guild_ids=[selfServerId])\n async def chpurge(self, interaction: Interaction):\n channel = interaction.channel\n messages = await channel.history(limit=99).flatten()\n\n if channel.type == nextcord.ChannelType.text:\n await channel.delete_messages(messages)\n \n await interaction.send(content='Deleted 99 messages', delete_after=3)\n\n\ndef setup(client):\n client.add_cog(Mod(client))","repo_name":"JohnReinel98/synth-discord-bot","sub_path":"cogs/Mod.py","file_name":"Mod.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"32603807160","text":"import random\r\n\r\nx, list_, bot = '', [], ''\r\n\r\n\r\ndef igra(x, list_, bot):\r\n x = input('Kamen, šare, papir: ')\r\n list_ = ['kamen', 'papir', 'škare']\r\n bot = random.choice(list_)\r\n\r\n print(bot)\r\n\r\n if x == 'kamen':\r\n if bot == 'škare':\r\n print('You win!')\r\n if bot == 'papir':\r\n print('Bot wins!')\r\n\r\n if x == 'škare':\r\n if bot == 'papir':\r\n print('You win!')\r\n if bot == 'kamen':\r\n print('Bot wins!')\r\n\r\n if x == 'papir':\r\n if bot == 'kamen':\r\n print('You win!')\r\n if bot == 'škare':\r\n print('Bot wins!')\r\n\r\n if x == bot:\r\n print('Tie!')\r\n\r\n r = input(\"Rematch[y, n]? \")\r\n if r == 'y':\r\n igra(x, list_, bot)\r\n\r\n\r\nigra(x, list_, bot)\r\n","repo_name":"SAMi-SIKi/-progres-","sub_path":"python samuel/kamen, škare, papir.py","file_name":"kamen, škare, papir.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"48"} +{"seq_id":"3795486332","text":"# STANDARD PACKAGES IN PYTHON 3.x\nimport os\nimport sys\nimport shutil\nimport sqlite3\nfrom time import sleep\nfrom threading import Thread, active_count\nfrom pprint import pprint\n\n# 3RD PACKAGES\nimport requests\nfrom bs4 import BeautifulSoup as bs\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.remote.webdriver import WebDriver\n\n# LOCAL PACKAGES\nimport utils\nimport config\n\n#TODO Quando o looping estiver otimizado,criar uma innerQueue que a medida que \n# os movieSources forem sendo adicionados na innerQueue (ou vetor moviesSource),\n# uma thread interna vai adicionando as informações do filme ja tratadas na queue\n# principalmente\n\nPROGRESS = 0\n\n# def fixup_first_movie(firstMovie):\n# return None\n\n\ndef scroll_page_until_ends(driver, await_time):\n\n previous_html = \"\"\n current_html = driver.page_source\n\n while current_html != previous_html:\n previous_html = current_html\n driver.execute_script(\"window.scrollTo(0,document.body.scrollHeight);\")\n sleep(await_time) # Pode variar de acordo com a velocidade da internet\n current_html = driver.page_source\n break # tests purpose\n\n\ndef start_scrapp(netflixInstance, loginEvent=None, loadedEvent=None, queue=None):\n # loginEvent.wait() \n #TODO otimizar esse looping\n\n movies_sources = []\n\n for i, so in enumerate(['az', 'za']):\n netflixInstance.driver.get(config.MAIN_URL + '/browse/genre/34399?so=' + so)\n \n print('STATUS: Finding all movies on netflix...['+str(i+1)+'/2] ', end='', flush=True)\n scroll_page_until_ends(netflixInstance.driver, .9)\n\n print('OK') \n print('STATUS: Saving innerHTML of all movies...['+str(i+1)+'/2] ', end='', flush=True)\n\n # for each slider-item, save his html code\n movies_sources += list(\n map(lambda p : bs(p.get_attribute('innerHTML'), 'html.parser'),\n netflixInstance.driver.find_elements_by_class_name('slider-item'))\n ) \n print('OK')\n\n # Removing duplicates and sorting by name\n all_sources = utils.remove_duplicates(movies_sources, lambda s: s.find('a')['aria-label'])\n all_sources = sorted(all_sources, key=lambda s: s.find('a')['aria-label'])\n\n print(\"TOTAL OF MOVIES = \" + str(len(all_sources)))\n print('STATUS: Starting to get information... ')\n\n if not os.path.exists(config.FOLDER_NAME):\n os.mkdir(config.FOLDER_NAME)\n\n # for each slider-item, run a thread to get movie's informations, \n # if the number of active thread is greater than max_threads\n # wait until one of active threads be released\n total_len = len(all_sources)\n for i, s_movie in enumerate(movies_sources):\n t = Thread(\n target=retrieve_movie,\n args=(s_movie, total_len), \n name='MOVIE_' + utils.safe_movie_name(s_movie.find('a')['aria-label'])\n )\n t.start()\n\n while active_count() > config.MAX_THREADS:\n sleep(1)\n\n # loadedEvent.set()\n\n\ndef retrieve_movie(s_movie, total_len):\n\n global PROGRESS\n\n tag_a = s_movie.find('a')\n movie_name = tag_a['aria-label']\n movie_link = tag_a['href'].split('?')[0].replace('watch', 'title')\n\n print(\"GETTING -> \" + movie_name)\n movie = scrapp_movie_page(movie_link)\n movie.update({'miniature_link': tag_a.find('img', {'class': 'boxart-image'})['src']})\n\n\n utils.save_movie_miniature(movie_name, movie['miniature_link'])\n\n pprint(movie)\n input()\n\n PROGRESS += 1\n print(u'\\u2713 ' + (\"[{:0\"+str(len(str(total_len)))+\"}/{:d}]-> \").format(PROGRESS, total_len) + movie_name)\n sys.exit()\n\n\ndef scrapp_movie_page(movie_link):\n\n movie = {}\n driver = utils.generate_webdriver(show=False)\n driver.get(config.MAIN_URL + movie_link)\n\n movie.update({'movie_id': int(driver.current_url.split('/')[-1])})\n movie.update({'title': driver.find_element_by_class_name('text').text})\n movie.update({'year': int(driver.find_element_by_class_name('year').text)})\n movie.update({'duration': utils.parse_date(driver.find_element_by_class_name('duration').text)})\n movie.update({'synopsis': driver.find_element_by_class_name('synopsis').text})\n movie.update({'maturity': 0})\n\n try:\n movie['maturity'] = int(driver.find_element_by_class_name('maturity-number').text)\n except ValueError:\n pass\n\n driver.find_element_by_id('tab-ShowDetails').click()\n details = driver.find_element_by_class_name('simpleSlider') \\\n .find_element_by_class_name('sliderContent')\n\n # cut_indexes saves the initial indexes of director, cast, screenwriter and the last index\n movie_cast = details.find_element_by_tag_name('span').find_elements_by_tag_name('li')\n cut_indexes = [i for i, m in enumerate(movie_cast) if m.get_attribute('class') == 'listLabel'] + [len(movie_cast)]\n cut_map = {0: 'directors', 1: 'cast', 2: 'screenwriters'}\n\n for i in range(len(cut_indexes)-1):\n movie.update({\n cut_map[i]: [\n {\n 'person_name': movie_cast[j].text, \n 'person_id': int(movie_cast[j].find_element_by_tag_name('a') \\\n .get_attribute('href').split('/')[-1])\n }\n for j in range(cut_indexes[i]+1, cut_indexes[i+1])\n ]\n })\n\n genres = details.find_element_by_class_name('detailsTags') \\\n .find_elements_by_tag_name('ul')[0] \\\n .find_elements_by_tag_name('li')\n\n movie.update({\n 'genres': [\n {\n 'genre': genre.text,\n 'genre_id': int(genre.find_element_by_tag_name('a') \\\n .get_attribute('href').split('/')[-1])\n }\n for genre in genres\n ]\n })\n\n driver.close()\n\n return movie","repo_name":"luizok/NetflixScrapper","sub_path":"scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":5910,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"48"} +{"seq_id":"43572255254","text":"from tkinter import *\nimport Figures\nfrom Figure import Figure\nimport Plugin\nfrom JsonSerializer import Serializer\n\n\nclass Paint(Frame):\n\n def __init__(self, parent):\n Frame.__init__(self, parent)\n\n self.parent = parent\n\n self.color = \"black\"\n self.brush_size = 2\n self.canv = Canvas(self, bg=\"white\")\n self.figure = \"Line\"\n self.figures = Figure.__subclasses__()\n\n self.canv.grid(row=3, column=0, columnspan=7,\n padx=5, pady=5, sticky=E+W+S+N)\n self.set_canvas()\n\n self.flag = False\n self.polyflag = 0\n self.x = 0\n self.y = 0\n self.coords = []\n\n self.obj = Serializer()\n self.serialization = False\n\n def set_color(self, new_color):\n self.color = new_color\n\n def set_brush_size(self, new_size):\n self.brush_size = new_size\n\n def serialize(self):\n self.serialization = True\n self.obj.serialize()\n l = self.obj.deserialize()\n\n for i in range(len(l)):\n restored = dict(l[i])\n if self.figures.count(restored[\"type\"]) != 0:\n continue\n self.figure = restored[\"type\"]\n self.color = restored[\"color\"]\n self.brush_size = restored[\"width\"]\n self.coords = restored[\"coords\"]\n self.flag = True\n if self.figure == \"Polygon\":\n self.polyflag = 2\n self.draw_figure(self)\n\n self.serialization = False\n\n def draw_figure(self, event):\n if not self.flag:\n self.flag = True\n self.x, self.y = event.x, event.y\n else:\n for i in self.figures:\n if self.figure == i.__name__:\n if not self.serialization:\n if self.polyflag == 2:\n self.polyflag = False\n self.flag = False\n elif self.polyflag == 1:\n self.coords.append(event.x)\n self.coords.append(event.y)\n break\n else:\n if self.figure != \"Line\":\n self.flag = False\n self.coords.append(self.x)\n self.coords.append(self.y)\n self.coords.append(event.x)\n self.coords.append(event.y)\n i.draw(self, event.x, event.y)\n\n if self.polyflag != 1:\n self.obj.serialize_figure(self)\n self.coords = []\n\n def set_figure(self, figure):\n self.figure = figure\n self.flag = False\n\n if figure == \"Polygon\" and self.polyflag == 0:\n self.polyflag = 1\n self.draw_figure(self)\n elif figure == \"Polygon\" and self.polyflag == 1:\n self.polyflag = 2\n self.draw_figure(self)\n\n def set_canvas(self):\n\n self.parent.title(\"Paint\")\n self.pack(fill=BOTH, expand=2)\n\n self.columnconfigure(6, weight=1)\n self.rowconfigure(3, weight=1)\n\n self.canv.bind(\"